^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2019 Google LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fsverity.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/lz4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "data_mgmt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "integrity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "sysfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "verity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int incfs_scan_metadata_chain(struct data_file *df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static void log_wake_up_all(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct delayed_work *dw = container_of(work, struct delayed_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct read_log *rl = container_of(dw, struct read_log, ml_wakeup_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) wake_up_all(&rl->ml_notif_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void zstd_free_workspace(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct delayed_work *dw = container_of(work, struct delayed_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct mount_info *mi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) container_of(dw, struct mount_info, mi_zstd_cleanup_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) mutex_lock(&mi->mi_zstd_workspace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) kvfree(mi->mi_zstd_workspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) mi->mi_zstd_workspace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) mi->mi_zstd_stream = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) mutex_unlock(&mi->mi_zstd_workspace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct mount_options *options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct path *backing_dir_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct incfs_sysfs_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) mi = kzalloc(sizeof(*mi), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mi->mi_sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mi->mi_backing_dir_path = *backing_dir_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mi->mi_owner = get_current_cred();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) path_get(&mi->mi_backing_dir_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mutex_init(&mi->mi_dir_struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) init_waitqueue_head(&mi->mi_pending_reads_notif_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) init_waitqueue_head(&mi->mi_log.ml_notif_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) init_waitqueue_head(&mi->mi_blocks_written_notif_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) atomic_set(&mi->mi_blocks_written, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) INIT_DELAYED_WORK(&mi->mi_log.ml_wakeup_work, log_wake_up_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_lock_init(&mi->mi_log.rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) spin_lock_init(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) INIT_LIST_HEAD(&mi->mi_reads_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) spin_lock_init(&mi->mi_per_uid_read_timeouts_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mutex_init(&mi->mi_zstd_workspace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) INIT_DELAYED_WORK(&mi->mi_zstd_cleanup_work, zstd_free_workspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) mutex_init(&mi->mi_le_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) node = incfs_add_sysfs_node(options->sysfs_name, mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (IS_ERR(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) error = PTR_ERR(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mi->mi_sysfs_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) error = incfs_realloc_mount_info(mi, options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) incfs_free_mount_info(mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int incfs_realloc_mount_info(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct mount_options *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void *new_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void *old_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) size_t new_buffer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (options->read_log_pages != mi->mi_options.read_log_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct read_log_state log_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Even though having two buffers allocated at once isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * usually good, allocating a multipage buffer under a spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * is even worse, so let's optimize for the shorter lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * duration. It's not end of the world if we fail to increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * the buffer size anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (options->read_log_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) new_buffer_size = PAGE_SIZE * options->read_log_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) new_buffer = kzalloc(new_buffer_size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!new_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_lock(&mi->mi_log.rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) old_buffer = mi->mi_log.rl_ring_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mi->mi_log.rl_ring_buf = new_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mi->mi_log.rl_size = new_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) log_state = (struct read_log_state){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .generation_id = mi->mi_log.rl_head.generation_id + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mi->mi_log.rl_head = log_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mi->mi_log.rl_tail = log_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock(&mi->mi_log.rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kfree(old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (options->sysfs_name && !mi->mi_sysfs_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mi->mi_sysfs_node = incfs_add_sysfs_node(options->sysfs_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) else if (!options->sysfs_name && mi->mi_sysfs_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) incfs_free_sysfs_node(mi->mi_sysfs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mi->mi_sysfs_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) } else if (options->sysfs_name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) strcmp(options->sysfs_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) kobject_name(&mi->mi_sysfs_node->isn_sysfs_node))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) incfs_free_sysfs_node(mi->mi_sysfs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mi->mi_sysfs_node = incfs_add_sysfs_node(options->sysfs_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (IS_ERR(mi->mi_sysfs_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int err = PTR_ERR(mi->mi_sysfs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mi->mi_sysfs_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mi->mi_options = *options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void incfs_free_mount_info(struct mount_info *mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) flush_delayed_work(&mi->mi_log.ml_wakeup_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) flush_delayed_work(&mi->mi_zstd_cleanup_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dput(mi->mi_index_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dput(mi->mi_incomplete_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) path_put(&mi->mi_backing_dir_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mutex_destroy(&mi->mi_dir_struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) mutex_destroy(&mi->mi_zstd_workspace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) put_cred(mi->mi_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kfree(mi->mi_log.rl_ring_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for (i = 0; i < ARRAY_SIZE(mi->pseudo_file_xattr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) kfree(mi->pseudo_file_xattr[i].data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) kfree(mi->mi_per_uid_read_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) incfs_free_sysfs_node(mi->mi_sysfs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) kfree(mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void data_file_segment_init(struct data_file_segment *segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) init_waitqueue_head(&segment->new_data_arrival_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) init_rwsem(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) INIT_LIST_HEAD(&segment->reads_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) char *file_id_to_str(incfs_uuid_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) char *result = kmalloc(1 + sizeof(id.bytes) * 2, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) end = bin2hex(result, id.bytes, sizeof(id.bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct dentry *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) inode = d_inode(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) inode_lock_nested(inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) result = lookup_one_len(name, parent, strlen(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (IS_ERR(result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pr_warn("%s err:%ld\n", __func__, PTR_ERR(result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static struct data_file *handle_mapped_file(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct data_file *df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) char *file_id_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct dentry *index_file_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct file *bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct data_file *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) const struct cred *old_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) file_id_str = file_id_to_str(df->df_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!file_id_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) file_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kfree(file_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!index_file_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (IS_ERR(index_file_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return (struct data_file *)index_file_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!d_really_is_positive(index_file_dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) result = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) path = (struct path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .mnt = mi->mi_backing_dir_path.mnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .dentry = index_file_dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) old_cred = override_creds(mi->mi_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bf = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) current_cred());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) revert_creds(old_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (IS_ERR(bf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) result = (struct data_file *)bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) result = incfs_open_data_file(mi, bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) fput(bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (IS_ERR(result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) result->df_mapped_offset = df->df_metadata_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) dput(index_file_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct data_file *df = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int md_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!bf || !mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!S_ISREG(bf->f_inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bfc = incfs_alloc_bfc(mi, bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (IS_ERR(bfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return ERR_CAST(bfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) df = kzalloc(sizeof(*df), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!df) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mutex_init(&df->df_enable_verity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) df->df_backing_file_context = bfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) df->df_mount_info = mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) for (i = 0; i < ARRAY_SIZE(df->df_segments); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) data_file_segment_init(&df->df_segments[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) error = incfs_read_file_header(bfc, &df->df_metadata_off, &df->df_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) &size, &df->df_header_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) df->df_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) df->df_data_block_count = get_blocks_count_for_size(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (df->df_header_flags & INCFS_FILE_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct data_file *mapped_df = handle_mapped_file(mi, df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) incfs_free_data_file(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return mapped_df;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) md_records = incfs_scan_metadata_chain(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (md_records < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) error = md_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) incfs_free_bfc(bfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) df->df_backing_file_context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) incfs_free_data_file(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return df;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void incfs_free_data_file(struct data_file *df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u32 data_blocks_written, hash_blocks_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) data_blocks_written = atomic_read(&df->df_data_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) hash_blocks_written = atomic_read(&df->df_hash_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (data_blocks_written != df->df_initial_data_blocks_written ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) hash_blocks_written != df->df_initial_hash_blocks_written) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct backing_file_context *bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int error = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (bfc && !mutex_lock_interruptible(&bfc->bc_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) error = incfs_write_status_to_backing_file(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) df->df_backing_file_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) df->df_status_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) data_blocks_written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) hash_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mutex_unlock(&bfc->bc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Nothing can be done, just warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pr_warn("incfs: failed to write status to backing file\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) incfs_free_mtree(df->df_hash_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) incfs_free_bfc(df->df_backing_file_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) kfree(df->df_signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kfree(df->df_verity_file_digest.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) kfree(df->df_verity_signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mutex_destroy(&df->df_enable_verity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kfree(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int make_inode_ready_for_data_ops(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct file *backing_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct inode_info *node = get_incfs_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct data_file *df = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!node->n_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) df = incfs_open_data_file(mi, backing_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (IS_ERR(df))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) err = PTR_ERR(df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) node->n_file = df;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct dir_file *dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!S_ISDIR(bf->f_inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dir = kzalloc(sizeof(*dir), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dir->backing_dir = get_file(bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dir->mount_info = mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void incfs_free_dir_file(struct dir_file *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (dir->backing_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) fput(dir->backing_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kfree(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static ssize_t zstd_decompress_safe(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct mem_range src, struct mem_range dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ssize_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ZSTD_inBuffer inbuf = {.src = src.data, .size = src.len};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ZSTD_outBuffer outbuf = {.dst = dst.data, .size = dst.len};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) result = mutex_lock_interruptible(&mi->mi_zstd_workspace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!mi->mi_zstd_stream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned int workspace_size = ZSTD_DStreamWorkspaceBound(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) INCFS_DATA_FILE_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) void *workspace = kvmalloc(workspace_size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ZSTD_DStream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!workspace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) stream = ZSTD_initDStream(INCFS_DATA_FILE_BLOCK_SIZE, workspace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) workspace_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!stream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) kvfree(workspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mi->mi_zstd_workspace = workspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) mi->mi_zstd_stream = stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) result = ZSTD_decompressStream(mi->mi_zstd_stream, &outbuf, &inbuf) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) -EBADMSG : outbuf.pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mod_delayed_work(system_wq, &mi->mi_zstd_cleanup_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) msecs_to_jiffies(5000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mutex_unlock(&mi->mi_zstd_workspace_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static ssize_t decompress(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct mem_range src, struct mem_range dst, int alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) switch (alg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case INCFS_BLOCK_COMPRESSED_LZ4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) result = LZ4_decompress_safe(src.data, dst.data, src.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dst.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) case INCFS_BLOCK_COMPRESSED_ZSTD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return zstd_decompress_safe(mi, src, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) WARN_ON(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void log_read_one_record(struct read_log *rl, struct read_log_state *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) union log_record *record =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) (union log_record *)((u8 *)rl->rl_ring_buf + rs->next_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) size_t record_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) switch (record->full_record.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) rs->base_record = record->full_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) record_size = sizeof(record->full_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case SAME_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rs->base_record.block_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) record->same_file.block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rs->base_record.absolute_ts_us +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) record->same_file.relative_ts_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) rs->base_record.uid = record->same_file.uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) record_size = sizeof(record->same_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case SAME_FILE_CLOSE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rs->base_record.block_index +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) record->same_file_close_block.block_index_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) rs->base_record.absolute_ts_us +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) record->same_file_close_block.relative_ts_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) record_size = sizeof(record->same_file_close_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case SAME_FILE_CLOSE_BLOCK_SHORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) rs->base_record.block_index +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) record->same_file_close_block_short.block_index_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rs->base_record.absolute_ts_us +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) record->same_file_close_block_short.relative_ts_tens_us * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) record_size = sizeof(record->same_file_close_block_short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case SAME_FILE_NEXT_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ++rs->base_record.block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) rs->base_record.absolute_ts_us +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) record->same_file_next_block.relative_ts_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) record_size = sizeof(record->same_file_next_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case SAME_FILE_NEXT_BLOCK_SHORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ++rs->base_record.block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rs->base_record.absolute_ts_us +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) record->same_file_next_block_short.relative_ts_tens_us * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) record_size = sizeof(record->same_file_next_block_short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) rs->next_offset += record_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (rs->next_offset > rl->rl_size - sizeof(*record)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rs->next_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ++rs->current_pass_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ++rs->current_record_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int block_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct read_log *log = &mi->mi_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct read_log_state *head, *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) s64 now_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) s64 relative_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) union log_record record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) size_t record_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) uid_t uid = current_uid().val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int block_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) bool same_file, same_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) bool next_block, close_block, very_close_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bool close_time, very_close_time, very_very_close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * This may read the old value, but it's OK to delay the logging start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * right after the configuration update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (READ_ONCE(log->rl_size) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) now_us = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) spin_lock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (log->rl_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) spin_unlock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) head = &log->rl_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) tail = &log->rl_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) relative_us = now_us - head->base_record.absolute_ts_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) same_file = !memcmp(id, &head->base_record.file_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) sizeof(incfs_uuid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) same_uid = uid == head->base_record.uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) block_delta = block_index - head->base_record.block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) next_block = block_delta == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) very_close_block = block_delta >= S8_MIN && block_delta <= S8_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) close_block = block_delta >= S16_MIN && block_delta <= S16_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) very_very_close_time = relative_us < (1 << 5) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) very_close_time = relative_us < (1 << 13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) close_time = relative_us < (1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (same_file && same_uid && next_block && very_very_close_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) record.same_file_next_block_short =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) (struct same_file_next_block_short){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .type = SAME_FILE_NEXT_BLOCK_SHORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .relative_ts_tens_us = div_s64(relative_us, 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) record_size = sizeof(struct same_file_next_block_short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) } else if (same_file && same_uid && next_block && very_close_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) record.same_file_next_block = (struct same_file_next_block){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .type = SAME_FILE_NEXT_BLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .relative_ts_us = relative_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) record_size = sizeof(struct same_file_next_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) } else if (same_file && same_uid && very_close_block &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) very_very_close_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) record.same_file_close_block_short =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) (struct same_file_close_block_short){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) .type = SAME_FILE_CLOSE_BLOCK_SHORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .relative_ts_tens_us = div_s64(relative_us, 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .block_index_delta = block_delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) record_size = sizeof(struct same_file_close_block_short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } else if (same_file && same_uid && close_block && very_close_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) record.same_file_close_block = (struct same_file_close_block){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) .type = SAME_FILE_CLOSE_BLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) .relative_ts_us = relative_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) .block_index_delta = block_delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) record_size = sizeof(struct same_file_close_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) } else if (same_file && close_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) record.same_file = (struct same_file){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .type = SAME_FILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .block_index = block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .relative_ts_us = relative_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .uid = uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) record_size = sizeof(struct same_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) record.full_record = (struct full_record){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) .type = FULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .block_index = block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) .file_id = *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) .absolute_ts_us = now_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) .uid = uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) head->base_record.file_id = *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) record_size = sizeof(struct full_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) head->base_record.block_index = block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) head->base_record.absolute_ts_us = now_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Advance tail beyond area we are going to overwrite */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) while (tail->current_pass_no < head->current_pass_no &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) tail->next_offset < head->next_offset + record_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) log_read_one_record(log, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) memcpy(((u8 *)log->rl_ring_buf) + head->next_offset, &record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) record_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) head->next_offset += record_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (head->next_offset > log->rl_size - sizeof(record)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) head->next_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ++head->current_pass_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ++head->current_record_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int validate_hash_tree(struct backing_file_context *bfc, struct file *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int block_index, struct mem_range data, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct data_file *df = get_incfs_data_file(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) u8 stored_digest[INCFS_MAX_HASH_SIZE] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u8 calculated_digest[INCFS_MAX_HASH_SIZE] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct mtree *tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct incfs_df_signature *sig = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int hash_block_index = block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int lvl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) loff_t hash_block_offset[INCFS_MAX_MTREE_LEVELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) size_t hash_offset_in_block[INCFS_MAX_MTREE_LEVELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int hash_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pgoff_t file_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Memory barrier to make sure tree is fully present if added via enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * verity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) tree = smp_load_acquire(&df->df_hash_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) sig = df->df_signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!tree || !sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) digest_size = tree->alg->digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) for (lvl = 0; lvl < tree->depth; lvl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) loff_t lvl_off = tree->hash_level_suboffset[lvl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) hash_block_offset[lvl] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) lvl_off + round_down(hash_block_index * digest_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) INCFS_DATA_FILE_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) hash_offset_in_block[lvl] = hash_block_index * digest_size %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) INCFS_DATA_FILE_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) hash_block_index /= hash_per_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) memcpy(stored_digest, tree->root_hash, digest_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) file_pages = DIV_ROUND_UP(df->df_size, INCFS_DATA_FILE_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) for (lvl = tree->depth - 1; lvl >= 0; lvl--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) pgoff_t hash_page =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) file_pages +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) hash_block_offset[lvl] / INCFS_DATA_FILE_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct page *page = find_get_page_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) f->f_inode->i_mapping, hash_page, FGP_ACCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (page && PageChecked(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) u8 *addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) memcpy(stored_digest, addr + hash_offset_in_block[lvl],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) digest_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) res = incfs_kread(bfc, buf, INCFS_DATA_FILE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) hash_block_offset[lvl] + sig->hash_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (res != INCFS_DATA_FILE_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) res = incfs_calc_digest(tree->alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) range(buf, INCFS_DATA_FILE_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) range(calculated_digest, digest_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (memcmp(stored_digest, calculated_digest, digest_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) bool zero = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pr_warn("incfs: Hash mismatch lvl:%d blk:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) lvl, block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) for (i = 0; i < digest_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (stored_digest[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) zero = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) pr_debug("Note saved_digest all zero - did you forget to load the hashes?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) memcpy(stored_digest, buf + hash_offset_in_block[lvl],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) digest_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) page = grab_cache_page(f->f_inode->i_mapping, hash_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) u8 *addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) memcpy(addr, buf, INCFS_DATA_FILE_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) SetPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) res = incfs_calc_digest(tree->alg, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) range(calculated_digest, digest_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (memcmp(stored_digest, calculated_digest, digest_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pr_debug("Leaf hash mismatch blk:%d\n", block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static struct data_file_segment *get_file_segment(struct data_file *df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int block_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int seg_idx = block_index % ARRAY_SIZE(df->df_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return &df->df_segments[seg_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static bool is_data_block_present(struct data_file_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return (block->db_backing_file_data_offset != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) (block->db_stored_size != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static void convert_data_file_block(struct incfs_blockmap_entry *bme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct data_file_block *res_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) u16 flags = le16_to_cpu(bme->me_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) res_block->db_backing_file_data_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) le16_to_cpu(bme->me_data_offset_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) res_block->db_backing_file_data_offset <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) res_block->db_backing_file_data_offset |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) le32_to_cpu(bme->me_data_offset_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) res_block->db_stored_size = le16_to_cpu(bme->me_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) res_block->db_comp_alg = flags & INCFS_BLOCK_COMPRESSED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int get_data_file_block(struct data_file *df, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct data_file_block *res_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct incfs_blockmap_entry bme = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) loff_t blockmap_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (!df || !res_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) blockmap_off = df->df_blockmap_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (index < 0 || blockmap_off == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) error = incfs_read_blockmap_entry(bfc, index, blockmap_off, &bme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) convert_data_file_block(&bme, res_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static int check_room_for_one_range(u32 size, u32 size_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (size_out + sizeof(struct incfs_filled_range) > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static int copy_one_range(struct incfs_filled_range *range, void __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) u32 size, u32 *size_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int error = check_room_for_one_range(size, *size_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (copy_to_user(((char __user *)buffer) + *size_out, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) sizeof(*range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) *size_out += sizeof(*range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #define READ_BLOCKMAP_ENTRIES 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) int incfs_get_filled_blocks(struct data_file *df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct incfs_file_data *fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct incfs_get_filled_blocks_args *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) bool in_range = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct incfs_filled_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) void __user *buffer = u64_to_user_ptr(arg->range_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u32 size = arg->range_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 end_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) arg->end_index ? arg->end_index : df->df_total_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) u32 *size_out = &arg->range_buffer_size_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int i = READ_BLOCKMAP_ENTRIES - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int entries_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct incfs_blockmap_entry *bme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int data_blocks_filled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int hash_blocks_filled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) *size_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (end_index > df->df_total_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) end_index = df->df_total_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) arg->total_blocks_out = df->df_total_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) arg->data_blocks_out = df->df_data_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (atomic_read(&df->df_data_blocks_written) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) df->df_data_block_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) pr_debug("File marked full, fast get_filled_blocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (arg->start_index > end_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) arg->index_out = arg->start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) arg->index_out = arg->start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) error = check_room_for_one_range(size, *size_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) range = (struct incfs_filled_range){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .begin = arg->start_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) .end = end_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) error = copy_one_range(&range, buffer, size, size_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) arg->index_out = end_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) bme = kzalloc(sizeof(*bme) * READ_BLOCKMAP_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) GFP_NOFS | __GFP_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!bme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) for (arg->index_out = arg->start_index; arg->index_out < end_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ++arg->index_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct data_file_block dfb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (++i == READ_BLOCKMAP_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) entries_read = incfs_read_blockmap_entries(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) df->df_backing_file_context, bme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) arg->index_out, READ_BLOCKMAP_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) df->df_blockmap_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (entries_read < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) error = entries_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (i >= entries_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) convert_data_file_block(bme + i, &dfb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (is_data_block_present(&dfb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (arg->index_out >= df->df_data_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ++hash_blocks_filled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ++data_blocks_filled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (is_data_block_present(&dfb) == in_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!in_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) error = check_room_for_one_range(size, *size_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) in_range = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) range.begin = arg->index_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) range.end = arg->index_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) error = copy_one_range(&range, buffer, size, size_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* there will be another try out of the loop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * it will reset the index_out if it fails too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) in_range = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (in_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) range.end = arg->index_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) error = copy_one_range(&range, buffer, size, size_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) arg->index_out = range.begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (arg->start_index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) fd->fd_get_block_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) fd->fd_filled_data_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) fd->fd_filled_hash_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (arg->start_index == fd->fd_get_block_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) fd->fd_get_block_pos = arg->index_out + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) fd->fd_filled_data_blocks += data_blocks_filled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) fd->fd_filled_hash_blocks += hash_blocks_filled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (fd->fd_get_block_pos == df->df_total_block_count + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (fd->fd_filled_data_blocks >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) atomic_read(&df->df_data_blocks_written))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) atomic_set(&df->df_data_blocks_written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) fd->fd_filled_data_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (fd->fd_filled_hash_blocks >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) atomic_read(&df->df_hash_blocks_written))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) atomic_set(&df->df_hash_blocks_written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) fd->fd_filled_hash_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) kfree(bme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static bool is_read_done(struct pending_read *read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return atomic_read_acquire(&read->done) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void set_read_done(struct pending_read *read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) atomic_set_release(&read->done, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * Notifies a given data file about pending read from a given block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * Returns a new pending read entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static struct pending_read *add_pending_read(struct data_file *df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int block_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct pending_read *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct data_file_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) segment = get_file_segment(df, block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) mi = df->df_mount_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) result = kzalloc(sizeof(*result), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (!result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) result->file_id = df->df_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) result->block_index = block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) result->timestamp_us = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) result->uid = current_uid().val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) spin_lock(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) result->serial_number = ++mi->mi_last_pending_read_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) mi->mi_pending_reads_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) list_add_rcu(&result->mi_reads_list, &mi->mi_reads_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) list_add_rcu(&result->segment_reads_list, &segment->reads_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) spin_unlock(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) wake_up_all(&mi->mi_pending_reads_notif_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void free_pending_read_entry(struct rcu_head *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct pending_read *read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) read = container_of(entry, struct pending_read, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) kfree(read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* Notifies a given data file that pending read is completed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static void remove_pending_read(struct data_file *df, struct pending_read *read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (!df || !read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) WARN_ON(!df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) WARN_ON(!read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mi = df->df_mount_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) spin_lock(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) list_del_rcu(&read->mi_reads_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) list_del_rcu(&read->segment_reads_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) mi->mi_pending_reads_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) spin_unlock(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* Don't free. Wait for readers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) call_rcu(&read->rcu, free_pending_read_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void notify_pending_reads(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct data_file_segment *segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct pending_read *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* Notify pending reads waiting for this block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) list_for_each_entry_rcu(entry, &segment->reads_list_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) segment_reads_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (entry->block_index == index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) set_read_done(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) wake_up_all(&segment->new_data_arrival_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) atomic_inc(&mi->mi_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) wake_up_all(&mi->mi_blocks_written_notif_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static int usleep_interruptible(u32 us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* See:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * for explanation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (us < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) udelay(us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) } else if (us < 20000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) usleep_range(us, us + us / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return msleep_interruptible(us / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static int wait_for_data_block(struct data_file *df, int block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct data_file_block *res_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct incfs_read_data_file_timeouts *timeouts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct data_file_block block = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct data_file_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct pending_read *read = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int wait_res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned int delayed_pending_us = 0, delayed_min_us = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) bool delayed_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!df || !res_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (block_index < 0 || block_index >= df->df_data_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (df->df_blockmap_off <= 0 || !df->df_mount_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) mi = df->df_mount_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) segment = get_file_segment(df, block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) error = down_read_killable(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* Look up the given block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) error = get_data_file_block(df, block_index, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) up_read(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /* If the block was found, just return it. No need to wait. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (is_data_block_present(&block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) *res_block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (timeouts && timeouts->min_time_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) delayed_min_us = timeouts->min_time_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) error = usleep_interruptible(delayed_min_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* If it's not found, create a pending read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (timeouts && timeouts->max_pending_time_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) read = add_pending_read(df, block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) log_block_read(mi, &df->df_id, block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* Rest of function only applies if timeouts != NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!timeouts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pr_warn("incfs: timeouts unexpectedly NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* Wait for notifications about block's arrival */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) wait_res =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) wait_event_interruptible_timeout(segment->new_data_arrival_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) (is_read_done(read)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) usecs_to_jiffies(timeouts->max_pending_time_us));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* Woke up, the pending read is no longer needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) remove_pending_read(df, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (wait_res == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* Wait has timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) log_block_read(mi, &df->df_id, block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (wait_res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * Only ERESTARTSYS is really expected here when a signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * comes while we wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return wait_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) delayed_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) delayed_pending_us = timeouts->max_pending_time_us -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) jiffies_to_usecs(wait_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (timeouts->min_pending_time_us > delayed_pending_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) delayed_min_us = timeouts->min_pending_time_us -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) delayed_pending_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) error = usleep_interruptible(delayed_min_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) error = down_read_killable(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Re-read blocks info now, it has just arrived and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * should be available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) error = get_data_file_block(df, block_index, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (is_data_block_present(&block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) *res_block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * Somehow wait finished successfully but block still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * can't be found. It's not normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) pr_warn("incfs: Wait succeeded but block not found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) error = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) up_read(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (delayed_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) mi->mi_reads_delayed_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) mi->mi_reads_delayed_pending_us +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) delayed_pending_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (delayed_min_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) mi->mi_reads_delayed_min++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) mi->mi_reads_delayed_min_us += delayed_min_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static int incfs_update_sysfs_error(struct file *file, int index, int result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct mount_info *mi, struct data_file *df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (result >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) error = mutex_lock_interruptible(&mi->mi_le_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) mi->mi_le_file_id = df->df_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) mi->mi_le_time_us = ktime_to_us(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) mi->mi_le_page = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) mi->mi_le_errno = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) mi->mi_le_uid = current_uid().val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) mutex_unlock(&mi->mi_le_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int index, struct mem_range tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct incfs_read_data_file_timeouts *timeouts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) loff_t pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ssize_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) size_t bytes_to_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct data_file_block block = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct data_file *df = get_incfs_data_file(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!dst.data || !df || !tmp.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (tmp.len < 2 * INCFS_DATA_FILE_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) mi = df->df_mount_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) result = wait_for_data_block(df, index, &block, timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) pos = block.db_backing_file_data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (block.db_comp_alg == COMPRESSION_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) bytes_to_read = min(dst.len, block.db_stored_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) result = incfs_kread(bfc, dst.data, bytes_to_read, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* Some data was read, but not enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (result >= 0 && result != bytes_to_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) bytes_to_read = min(tmp.len, block.db_stored_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) result = incfs_kread(bfc, tmp.data, bytes_to_read, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (result == bytes_to_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) decompress(mi, range(tmp.data, bytes_to_read),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) dst, block.db_comp_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) const char *name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) bfc->bc_file->f_path.dentry->d_name.name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) pr_warn_once("incfs: Decompression error. %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else if (result >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* Some data was read, but not enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (result > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) int err = validate_hash_tree(bfc, f, index, dst, tmp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) result = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (result >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) log_block_read(mi, &df->df_id, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (result == -ETIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) mi->mi_reads_failed_timed_out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) else if (result == -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) mi->mi_reads_failed_hash_verification++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) else if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) mi->mi_reads_failed_other++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) incfs_update_sysfs_error(f, index, result, mi, df);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct data_file *df, size_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct incfs_df_signature *sig = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) size_t to_read = dst.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (!dst.data || !df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) sig = df->df_signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (offset > sig->hash_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (offset + to_read > sig->hash_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) to_read = sig->hash_size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return incfs_kread(bfc, dst.data, to_read, sig->hash_offset + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) int incfs_process_new_data_block(struct data_file *df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct incfs_fill_block *block, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct data_file_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct data_file_block existing_block = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u16 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!df || !block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) mi = df->df_mount_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (block->block_index >= df->df_data_block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) segment = get_file_segment(df, block->block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (block->compression == COMPRESSION_LZ4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) flags |= INCFS_BLOCK_COMPRESSED_LZ4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) else if (block->compression == COMPRESSION_ZSTD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) flags |= INCFS_BLOCK_COMPRESSED_ZSTD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) else if (block->compression)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) error = down_read_killable(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) error = get_data_file_block(df, block->block_index, &existing_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) up_read(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (is_data_block_present(&existing_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* Block is already present, nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) error = down_write_killable(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) error = mutex_lock_interruptible(&bfc->bc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) error = incfs_write_data_block_to_backing_file(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) bfc, range(data, block->data_len), block->block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) df->df_blockmap_off, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) mutex_unlock(&bfc->bc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) notify_pending_reads(mi, segment, block->block_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) atomic_inc(&df->df_data_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) up_write(&segment->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) pr_debug("%d error: %d\n", block->block_index, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int incfs_read_file_signature(struct data_file *df, struct mem_range dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct backing_file_context *bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct incfs_df_signature *sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) int read_res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (!dst.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) sig = df->df_signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (dst.len < sig->sig_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) read_res = incfs_kread(bfc, dst.data, sig->sig_size, sig->sig_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (read_res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return read_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (read_res != sig->sig_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return read_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int incfs_process_new_hash_block(struct data_file *df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct incfs_fill_block *block, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct mount_info *mi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct mtree *hash_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct incfs_df_signature *sig = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) loff_t hash_area_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) loff_t hash_area_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (!df || !block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (!(block->flags & INCFS_BLOCK_FLAGS_HASH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) mi = df->df_mount_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (!df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) hash_tree = df->df_hash_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) sig = df->df_signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (!hash_tree || !sig || sig->hash_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) hash_area_base = sig->hash_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) hash_area_size = sig->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (hash_area_size < block->block_index * INCFS_DATA_FILE_BLOCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) + block->data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* Hash block goes beyond dedicated hash area of this file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) error = mutex_lock_interruptible(&bfc->bc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) error = incfs_write_hash_block_to_backing_file(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) bfc, range(data, block->data_len), block->block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) hash_area_base, df->df_blockmap_off, df->df_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) mutex_unlock(&bfc->bc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) atomic_inc(&df->df_hash_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) static int process_blockmap_md(struct incfs_blockmap *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct metadata_handler *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct data_file *df = handler->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) loff_t base_off = le64_to_cpu(bm->m_base_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) u32 block_count = le32_to_cpu(bm->m_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (df->df_data_block_count > block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) df->df_total_block_count = block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) df->df_blockmap_off = base_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static int process_file_signature_md(struct incfs_file_signature *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct metadata_handler *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct data_file *df = handler->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) struct mtree *hash_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct incfs_df_signature *signature =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) kzalloc(sizeof(*signature), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ssize_t read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (!signature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (!df || !df->df_backing_file_context ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) !df->df_backing_file_context->bc_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) signature->hash_offset = le64_to_cpu(sg->sg_hash_tree_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) signature->hash_size = le32_to_cpu(sg->sg_hash_tree_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) signature->sig_offset = le64_to_cpu(sg->sg_sig_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) signature->sig_size = le32_to_cpu(sg->sg_sig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) buf = kzalloc(signature->sig_size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) read = incfs_kread(df->df_backing_file_context, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) signature->sig_size, signature->sig_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (read < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) error = read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (read != signature->sig_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) hash_tree = incfs_alloc_mtree(range(buf, signature->sig_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) df->df_data_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (IS_ERR(hash_tree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) error = PTR_ERR(hash_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) hash_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (hash_tree->hash_tree_area_size != signature->hash_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (signature->hash_size > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) handler->md_record_offset <= signature->hash_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (handler->md_record_offset <= signature->sig_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) df->df_hash_tree = hash_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) hash_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) df->df_signature = signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) signature = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) incfs_free_mtree(hash_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) kfree(signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static int process_status_md(struct incfs_status *is,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) struct metadata_handler *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct data_file *df = handler->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) df->df_initial_data_blocks_written =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) le32_to_cpu(is->is_data_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) atomic_set(&df->df_data_blocks_written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) df->df_initial_data_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) df->df_initial_hash_blocks_written =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) le32_to_cpu(is->is_hash_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) atomic_set(&df->df_hash_blocks_written,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) df->df_initial_hash_blocks_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) df->df_status_offset = handler->md_record_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) static int process_file_verity_signature_md(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct incfs_file_verity_signature *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) struct metadata_handler *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct data_file *df = handler->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct incfs_df_verity_signature *verity_signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (!df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) verity_signature = kzalloc(sizeof(*verity_signature), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (!verity_signature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) verity_signature->offset = le64_to_cpu(vs->vs_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) verity_signature->size = le32_to_cpu(vs->vs_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (verity_signature->size > FS_VERITY_MAX_SIGNATURE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) kfree(verity_signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) df->df_verity_signature = verity_signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static int incfs_scan_metadata_chain(struct data_file *df)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct metadata_handler *handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int records_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct backing_file_context *bfc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) int nondata_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (!df || !df->df_backing_file_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) bfc = df->df_backing_file_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) handler = kzalloc(sizeof(*handler), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (!handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) handler->md_record_offset = df->df_metadata_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) handler->context = df;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) handler->handle_blockmap = process_blockmap_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) handler->handle_signature = process_file_signature_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) handler->handle_status = process_status_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) handler->handle_verity_signature = process_file_verity_signature_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) while (handler->md_record_offset > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) error = incfs_read_next_metadata_record(bfc, handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) pr_warn("incfs: Error during reading incfs-metadata record. Offset: %lld Record #%d Error code: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) handler->md_record_offset, records_count + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) -error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) records_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) pr_warn("incfs: Error %d after reading %d incfs-metadata records.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) -error, records_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) result = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) result = records_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) nondata_block_count = df->df_total_block_count -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) df->df_data_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (df->df_hash_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) int hash_block_count = get_blocks_count_for_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) df->df_hash_tree->hash_tree_area_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * Files that were created with a hash tree have the hash tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * included in the block map, i.e. nondata_block_count ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * hash_block_count. Files whose hash tree was added by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * FS_IOC_ENABLE_VERITY will still have the original block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * count, i.e. nondata_block_count == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (nondata_block_count != hash_block_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) nondata_block_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) } else if (nondata_block_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) kfree(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * Quickly checks if there are pending reads with a serial number larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * than a given one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) bool result = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) spin_lock(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) result = (mi->mi_last_pending_read_number > last_number) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) (mi->mi_pending_reads_count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) spin_unlock(&mi->pending_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct incfs_pending_read_info *reads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct incfs_pending_read_info2 *reads2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) int reads_size, int *new_max_sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int reported_reads = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct pending_read *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (!mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (reads_size <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (!incfs_fresh_pending_reads_exist(mi, sn_lowerbound))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) list_for_each_entry_rcu(entry, &mi->mi_reads_list_head, mi_reads_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (entry->serial_number <= sn_lowerbound)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (reads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) reads[reported_reads].file_id = entry->file_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) reads[reported_reads].block_index = entry->block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) reads[reported_reads].serial_number =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) entry->serial_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) reads[reported_reads].timestamp_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) entry->timestamp_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (reads2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) reads2[reported_reads].file_id = entry->file_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) reads2[reported_reads].block_index = entry->block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) reads2[reported_reads].serial_number =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) entry->serial_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) reads2[reported_reads].timestamp_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) entry->timestamp_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) reads2[reported_reads].uid = entry->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (entry->serial_number > *new_max_sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *new_max_sn = entry->serial_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) reported_reads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (reported_reads >= reads_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return reported_reads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct read_log_state incfs_get_log_state(struct mount_info *mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) struct read_log *log = &mi->mi_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct read_log_state result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) spin_lock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) result = log->rl_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) spin_unlock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) int incfs_get_uncollected_logs_count(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) const struct read_log_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct read_log *log = &mi->mi_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) u32 generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) u64 head_no, tail_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) spin_lock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) tail_no = log->rl_tail.current_record_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) head_no = log->rl_head.current_record_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) generation = log->rl_head.generation_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) spin_unlock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (generation != state->generation_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return head_no - tail_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return head_no - max_t(u64, tail_no, state->current_record_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int incfs_collect_logged_reads(struct mount_info *mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct read_log_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct incfs_pending_read_info *reads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct incfs_pending_read_info2 *reads2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) int reads_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int dst_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct read_log *log = &mi->mi_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct read_log_state *head, *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) spin_lock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) head = &log->rl_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) tail = &log->rl_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (state->generation_id != head->generation_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) pr_debug("read ptr is wrong generation: %u/%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) state->generation_id, head->generation_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) *state = (struct read_log_state){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) .generation_id = head->generation_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (state->current_record_no < tail->current_record_no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) pr_debug("read ptr is behind, moving: %u/%u -> %u/%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) (u32)state->next_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) (u32)state->current_pass_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) (u32)tail->next_offset, (u32)tail->current_pass_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) *state = *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) for (dst_idx = 0; dst_idx < reads_size; dst_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (state->current_record_no == head->current_record_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) log_read_one_record(log, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (reads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) reads[dst_idx] = (struct incfs_pending_read_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) .file_id = state->base_record.file_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) .block_index = state->base_record.block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) .serial_number = state->current_record_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) .timestamp_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) state->base_record.absolute_ts_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (reads2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) reads2[dst_idx] = (struct incfs_pending_read_info2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) .file_id = state->base_record.file_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) .block_index = state->base_record.block_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) .serial_number = state->current_record_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) .timestamp_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) state->base_record.absolute_ts_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) .uid = state->base_record.uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) spin_unlock(&log->rl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return dst_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)