Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * (C) 2001 Clemson University and The University of Chicago
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 2018 Omnibond Systems, L.L.C.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * See COPYING in top-level directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  Linux VFS inode operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/bvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "protocol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "orangefs-kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "orangefs-bufmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) static int orangefs_writepage_locked(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)     struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	struct orangefs_write_range *wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	size_t len, wlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	loff_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	len = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	if (PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 		wr = (struct orangefs_write_range *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 		WARN_ON(wr->pos >= len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 		off = wr->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		if (off + wr->len > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 			wlen = len - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 			wlen = wr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 		off = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		if (off + PAGE_SIZE > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 			wlen = len - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 			wlen = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	/* Should've been handled in orangefs_invalidatepage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	WARN_ON(off == len || off + wlen > len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	bv.bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	bv.bv_len = wlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	bv.bv_offset = off % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	WARN_ON(wlen == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	    len, wr, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		mapping_set_error(page->mapping, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	kfree(detach_page_private(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	ret = orangefs_writepage_locked(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) struct orangefs_writepages {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	loff_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	kgid_t gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	int maxpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	int npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct bio_vec *bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static int orangefs_writepages_work(struct orangefs_writepages *ow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)     struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct inode *inode = ow->pages[0]->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct orangefs_write_range *wrp, wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	loff_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	len = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	for (i = 0; i < ow->npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		set_page_writeback(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		ow->bv[i].bv_page = ow->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		    ow->off + ow->len) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		    max(ow->off, page_offset(ow->pages[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			ow->bv[i].bv_offset = ow->off -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			    page_offset(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			ow->bv[i].bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	WARN_ON(ow->off >= len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	if (ow->off + ow->len > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		ow->len = len - ow->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	off = ow->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	wr.uid = ow->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	wr.gid = ow->gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	    0, &wr, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		for (i = 0; i < ow->npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			SetPageError(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			mapping_set_error(ow->pages[i]->mapping, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 			if (PagePrivate(ow->pages[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 				wrp = (struct orangefs_write_range *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 				    page_private(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 				ClearPagePrivate(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 				put_page(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 				kfree(wrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			end_page_writeback(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			unlock_page(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		for (i = 0; i < ow->npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			if (PagePrivate(ow->pages[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				wrp = (struct orangefs_write_range *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				    page_private(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 				ClearPagePrivate(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 				put_page(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 				kfree(wrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			end_page_writeback(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			unlock_page(ow->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static int orangefs_writepages_callback(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)     struct writeback_control *wbc, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct orangefs_writepages *ow = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct orangefs_write_range *wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (!PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		/* It's not private so there's nothing to write, right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		printk("writepages_callback not private!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	wr = (struct orangefs_write_range *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	if (ow->npages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		ow->off = wr->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		ow->len = wr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		ow->uid = wr->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		ow->gid = wr->gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		ow->pages[ow->npages++] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		orangefs_writepages_work(ow, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		ow->npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (ow->off + ow->len == wr->pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		ow->len += wr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		ow->pages[ow->npages++] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (ret == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		if (ow->npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			orangefs_writepages_work(ow, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			ow->npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		ret = orangefs_writepage_locked(page, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		mapping_set_error(page->mapping, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		if (ow->npages == ow->maxpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			orangefs_writepages_work(ow, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			ow->npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static int orangefs_writepages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)     struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct orangefs_writepages *ow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (!ow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	if (!ow->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		kfree(ow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (!ow->bv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		kfree(ow->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		kfree(ow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (ow->npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		ret = orangefs_writepages_work(ow, wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	kfree(ow->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	kfree(ow->bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	kfree(ow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static int orangefs_launder_page(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static int orangefs_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	loff_t off; /* offset into this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	pgoff_t index; /* which page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct page *next_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	loff_t read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	int buffer_index = -1; /* orangefs shared memory slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	int slot_index;   /* index into slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	int remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 * Get up to this many bytes from Orangefs at a time and try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * to fill them into the page cache at once. Tests with dd made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * this seem like a reasonable static number, if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * interest perhaps this number could be made setable through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * sysfs...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	read_size = 524288;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (PageDirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		orangefs_launder_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	off = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	index = off >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	bv.bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	bv.bv_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	bv.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	    read_size, inode->i_size, NULL, &buffer_index, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	remaining = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	/* this will only zero remaining unread portions of the page data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	iov_iter_zero(~0U, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	/* takes care of potential aliasing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		if (PageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	/* unlock the page after the ->readpage() routine completes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (remaining > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		slot_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		while ((remaining - PAGE_SIZE) >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			remaining -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			 * It is an optimization to try and fill more than one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			 * page... by now we've already gotten the single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			 * page we were after, if stuff doesn't seem to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			 * be going our way at this point just return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			 * and hope for the best.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			 * If we look for pages and they're already there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			 * one reason to give up, and if they're not there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			 * and we can't create them is another reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			slot_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			next_page = find_get_page(inode->i_mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			if (next_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 				gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 					"%s: found next page, quitting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 					__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				put_page(next_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			next_page = find_or_create_page(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 							index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 							GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			 * I've never hit this, leave it as a printk for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			 * now so it will be obvious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			if (!next_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 				printk("%s: can't create next page, quitting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 					__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			kaddr = kmap_atomic(next_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			orangefs_bufmap_page_fill(kaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 						buffer_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 						slot_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			SetPageUptodate(next_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			unlock_page(next_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			put_page(next_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (buffer_index != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		orangefs_bufmap_put(buffer_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static int orangefs_write_begin(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)     struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)     loff_t pos, unsigned len, unsigned flags, struct page **pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)     void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct orangefs_write_range *wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	index = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	page = grab_cache_page_write_begin(mapping, index, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	*pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (PageDirty(page) && !PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		 * Should be impossible.  If it happens, launder the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		 * since we don't know what's dirty.  This will WARN in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 * orangefs_writepage_locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		ret = orangefs_launder_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		struct orangefs_write_range *wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		wr = (struct orangefs_write_range *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		if (wr->pos + wr->len == pos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		    uid_eq(wr->uid, current_fsuid()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		    gid_eq(wr->gid, current_fsgid())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			wr->len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			goto okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			ret = orangefs_launder_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	wr = kmalloc(sizeof *wr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	if (!wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	wr->pos = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	wr->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	wr->uid = current_fsuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	wr->gid = current_fsgid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	attach_page_private(page, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) okay:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static int orangefs_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)     loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	loff_t last_pos = pos + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 * No need to use i_size_read() here, the i_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * cannot change under us because we hold the i_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (last_pos > inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		i_size_write(inode, last_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	/* zero the stale part of the page if we did a short copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		unsigned from = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		if (copied < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			zero_user(page, from + copied, len - copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		/* Set fully written pages uptodate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		if (pos == page_offset(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		    (len == PAGE_SIZE || pos + len == inode->i_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			zero_user_segment(page, from + copied, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	mark_inode_dirty_sync(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) static void orangefs_invalidatepage(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 				 unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				 unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct orangefs_write_range *wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	wr = (struct orangefs_write_range *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (offset == 0 && length == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		kfree(detach_page_private(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	/* write range entirely within invalidate range (or equal) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	} else if (page_offset(page) + offset <= wr->pos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	    wr->pos + wr->len <= page_offset(page) + offset + length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		kfree(detach_page_private(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		/* XXX is this right? only caller in fs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		cancel_dirty_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	/* invalidate range chops off end of write range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	} else if (wr->pos < page_offset(page) + offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	    wr->pos + wr->len <= page_offset(page) + offset + length &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	     page_offset(page) + offset < wr->pos + wr->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		size_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		x = wr->pos + wr->len - (page_offset(page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		WARN_ON(x > wr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		wr->len -= x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		wr->uid = current_fsuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		wr->gid = current_fsgid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* invalidate range chops off beginning of write range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	} else if (page_offset(page) + offset <= wr->pos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	    page_offset(page) + offset + length < wr->pos + wr->len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	    wr->pos < page_offset(page) + offset + length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		size_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		x = page_offset(page) + offset + length - wr->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		WARN_ON(x > wr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		wr->pos += x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		wr->len -= x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		wr->uid = current_fsuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		wr->gid = current_fsgid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	/* invalidate range entirely within write range (punch hole) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	} else if (wr->pos < page_offset(page) + offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	    page_offset(page) + offset + length < wr->pos + wr->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		/* XXX what do we do here... should not WARN_ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		/* punch hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		 * should we just ignore this and write it out anyway?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		 * it hardly makes sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* non-overlapping ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		/* WARN if they do overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (!((page_offset(page) + offset + length <= wr->pos) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		    (wr->pos + wr->len <= page_offset(page) + offset))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			printk("invalidate range offset %llu length %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			    page_offset(page) + offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			printk("write range offset %llu length %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			    wr->pos, wr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 * Above there are returns where wr is freed or where we WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	 * Thus the following runs if wr was modified above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	orangefs_launder_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static int orangefs_releasepage(struct page *page, gfp_t foo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	return !PagePrivate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static void orangefs_freepage(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	kfree(detach_page_private(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) static int orangefs_launder_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct writeback_control wbc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		.sync_mode = WB_SYNC_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		.nr_to_write = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	wait_on_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (clear_page_dirty_for_io(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		r = orangefs_writepage_locked(page, &wbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) static ssize_t orangefs_direct_IO(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				  struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 * Comment from original do_readv_writev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	 * Common entry point for read/write/readv/writev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	 * This function will dispatch it to either the direct I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * or buffered I/O path depending on the mount options and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 * augmented/extended metadata attached to the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * Note: File extended attributes override any mount options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	loff_t pos = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)             ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	loff_t *offset = &pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	size_t count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	ssize_t total_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		"%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		(int)count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (type == ORANGEFS_IO_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			     "%s(%pU): proceeding with offset : %llu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			     "size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			     handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			     llu(*offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			     (int)count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	while (iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		size_t each_count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		size_t amt_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		/* how much to transfer in this loop iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		if (each_count > orangefs_bufmap_size_query())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			each_count = orangefs_bufmap_size_query();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			     "%s(%pU): size of each_count(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			     handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			     (int)each_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			     "%s(%pU): BEFORE wait_for_io: offset is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			     handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			     (int)*offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		ret = wait_for_direct_io(type, inode, offset, iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				each_count, 0, NULL, NULL, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			     "%s(%pU): return from wait_for_io:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			     handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			     (int)ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		*offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		total_count += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		amt_complete = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			     "%s(%pU): AFTER wait_for_io: offset is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			     handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			     (int)*offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		 * if we got a short I/O operations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		 * fall out and return what we got so far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		if (amt_complete < each_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	} /*end while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (total_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		ret = total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		if (type == ORANGEFS_IO_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			file_accessed(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			file_update_time(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			if (*offset > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				i_size_write(inode, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	gossip_debug(GOSSIP_FILE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		     "%s(%pU): Value(%d) returned.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		     handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		     (int)ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) /** ORANGEFS2 implementation of address space operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static const struct address_space_operations orangefs_address_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	.writepage = orangefs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	.readpage = orangefs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	.writepages = orangefs_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	.set_page_dirty = __set_page_dirty_nobuffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	.write_begin = orangefs_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	.write_end = orangefs_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	.invalidatepage = orangefs_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	.releasepage = orangefs_releasepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	.freepage = orangefs_freepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	.launder_page = orangefs_launder_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	.direct_IO = orangefs_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct inode *inode = file_inode(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	unsigned long *bitlock = &orangefs_inode->bitlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	struct orangefs_write_range *wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	sb_start_pagefault(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		ret = VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (PageDirty(page) && !PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		 * Should be impossible.  If it happens, launder the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		 * since we don't know what's dirty.  This will WARN in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		 * orangefs_writepage_locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		if (orangefs_launder_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (PagePrivate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		wr = (struct orangefs_write_range *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		if (uid_eq(wr->uid, current_fsuid()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		    gid_eq(wr->gid, current_fsgid())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			wr->pos = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			wr->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			goto okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			if (orangefs_launder_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 				ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	wr = kmalloc(sizeof *wr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (!wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	wr->pos = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	wr->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	wr->uid = current_fsuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	wr->gid = current_fsgid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	attach_page_private(page, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) okay:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	file_update_time(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (page->mapping != inode->i_mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * We mark the page dirty already here so that when freeze is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 * progress, we are guaranteed that writeback during freezing will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * see the dirty page and writeprotect it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	wait_for_stable_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	ret = VM_FAULT_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	sb_end_pagefault(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct orangefs_kernel_op_s *new_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	loff_t orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	gossip_debug(GOSSIP_INODE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		     "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		     get_khandle_from_ino(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		     &orangefs_inode->refn.khandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		     orangefs_inode->refn.fs_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		     iattr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/* Ensure that we have a up to date size, so we know if it changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (ret == -ESTALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		    __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	orig_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	/* This is truncate_setsize in a different order. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	truncate_pagecache(inode, iattr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	i_size_write(inode, iattr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (iattr->ia_size > orig_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		pagecache_isize_extended(inode, orig_size, iattr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (!new_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	new_op->upcall.req.truncate.refn = orangefs_inode->refn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	ret = service_operation(new_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		get_interruptible_flag(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 * the truncate has no downcall members to retrieve, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * the status value tells us if it went through ok or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	op_release(new_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (orig_size != i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (iattr->ia_valid & ATTR_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (iattr->ia_mode & (S_ISVTX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			if (is_root_handle(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 				 * allow sticky bit to be set on root (since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 				 * it shows up that way by default anyhow),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				 * but don't show it to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				iattr->ia_mode -= S_ISVTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 				gossip_debug(GOSSIP_UTILS_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 					     "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		if (iattr->ia_mode & (S_ISUID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			gossip_debug(GOSSIP_UTILS_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 				     "Attempting to set setuid bit (not supported); returning EINVAL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (iattr->ia_valid & ATTR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		ret = orangefs_setattr_size(inode, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (ORANGEFS_I(inode)->attr_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		    gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			write_inode_now(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		ORANGEFS_I(inode)->attr_uid = current_fsuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		ORANGEFS_I(inode)->attr_gid = current_fsgid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	setattr_copy(inode, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (iattr->ia_valid & ATTR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		/* change mod on a file that has ACLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		ret = posix_acl_chmod(inode, inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * Change attributes of an object referenced by dentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	    dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	ret = setattr_prepare(dentry, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	        goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	ret = __orangefs_setattr(d_inode(dentry), iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	sync_inode_metadata(d_inode(dentry), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	    ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  * Obtain attributes of an object given a dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) int orangefs_getattr(const struct path *path, struct kstat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		     u32 request_mask, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	struct inode *inode = path->dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	gossip_debug(GOSSIP_INODE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		     "orangefs_getattr: called on %pd mask %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		     path->dentry, request_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	ret = orangefs_inode_getattr(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	    request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		generic_fillattr(inode, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		/* override block size reported to stat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		if (!(request_mask & STATX_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			stat->result_mask &= ~STATX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		stat->attributes_mask = STATX_ATTR_IMMUTABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		    STATX_ATTR_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		if (inode->i_flags & S_IMMUTABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			stat->attributes |= STATX_ATTR_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (inode->i_flags & S_APPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			stat->attributes |= STATX_ATTR_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) int orangefs_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (mask & MAY_NOT_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/* Make sure the permission (and other common attrs) are up to date. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	ret = orangefs_inode_getattr(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	return generic_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct iattr iattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	    get_khandle_from_ino(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	generic_update_time(inode, time, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	memset(&iattr, 0, sizeof iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)         if (flags & S_ATIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		iattr.ia_valid |= ATTR_ATIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (flags & S_CTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		iattr.ia_valid |= ATTR_CTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (flags & S_MTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		iattr.ia_valid |= ATTR_MTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return __orangefs_setattr(inode, &iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) /* ORANGEFS2 implementation of VFS inode operations for files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static const struct inode_operations orangefs_file_inode_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	.get_acl = orangefs_get_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	.set_acl = orangefs_set_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	.setattr = orangefs_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	.getattr = orangefs_getattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	.listxattr = orangefs_listxattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	.permission = orangefs_permission,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	.update_time = orangefs_update_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static int orangefs_init_iops(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	inode->i_mapping->a_ops = &orangefs_address_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	switch (inode->i_mode & S_IFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	case S_IFREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		inode->i_op = &orangefs_file_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		inode->i_fop = &orangefs_file_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	case S_IFLNK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		inode->i_op = &orangefs_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	case S_IFDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		inode->i_op = &orangefs_dir_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		inode->i_fop = &orangefs_dir_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		gossip_debug(GOSSIP_INODE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			     "%s: unsupported mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			     __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * Given an ORANGEFS object identifier (fsid, handle), convert it into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * a ino_t type that will be used as a hash-index from where the handle will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * be searched for in the VFS hash table of inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	return orangefs_khandle_to_ino(&(ref->khandle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * Called to set up an inode from iget5_locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static int orangefs_set_inode(struct inode *inode, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	ORANGEFS_I(inode)->refn.khandle = ref->khandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	ORANGEFS_I(inode)->attr_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	hash_init(ORANGEFS_I(inode)->xattr_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	ORANGEFS_I(inode)->mapping_time = jiffies - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	ORANGEFS_I(inode)->bitlock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * Called to determine if handles match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static int orangefs_test_inode(struct inode *inode, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	struct orangefs_inode_s *orangefs_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	orangefs_inode = ORANGEFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	/* test handles and fs_ids... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 				&(ref->khandle)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			orangefs_inode->refn.fs_id == ref->fs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * file handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * @sb: the file system super block instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  * @ref: The ORANGEFS object for which we are trying to locate an inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct inode *orangefs_iget(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		struct orangefs_object_kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	unsigned long hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	hash = orangefs_handle_hash(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	inode = iget5_locked(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			orangefs_test_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			orangefs_set_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (!(inode->i_state & I_NEW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		iget_failed(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	inode->i_ino = hash;	/* needed for stat etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	orangefs_init_iops(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	gossip_debug(GOSSIP_INODE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		     "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		     &ref->khandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		     ref->fs_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		     hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		     inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * Allocate an inode for a newly created file and insert it into the inode hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		int mode, dev_t dev, struct orangefs_object_kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	unsigned long hash = orangefs_handle_hash(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	gossip_debug(GOSSIP_INODE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		     "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		     __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		     sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		     MAJOR(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		     MINOR(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		     mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	inode = new_inode(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	orangefs_set_inode(inode, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	inode->i_ino = hash;	/* needed for stat etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		goto out_iput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	orangefs_init_iops(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	inode->i_rdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		goto out_iput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	gossip_debug(GOSSIP_INODE_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		     "Initializing ACL's for inode %pU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		     get_khandle_from_ino(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	orangefs_init_acl(inode, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) out_iput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }