^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* FS-Cache object state machine handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * See Documentation/filesystems/caching/object.rst for a description of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * object state machine and the in-kernel representations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define FSCACHE_DEBUG_LEVEL COOKIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define __STATE_NAME(n) fscache_osm_##n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define STATE(n) (&__STATE_NAME(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Define a work state. Work states are execution states. No event processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * is performed by them. The function attached to a work state returns a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * pointer indicating the next state to which the state machine should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * transition. Returning NO_TRANSIT repeats the current state, but goes back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * to the scheduler first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define WORK_STATE(n, sn, f) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) const struct fscache_state __STATE_NAME(n) = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .name = #n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .short_name = sn, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .work = f \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Returns from work states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define NO_TRANSIT ((struct fscache_state *)NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Define a wait state. Wait states are event processing states. No execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * is performed by them. Wait states are just tables of "if event X occurs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * clear it and transition to state Y". The dispatcher returns to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * scheduler if none of the events in which the wait state has an interest are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * currently pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define WAIT_STATE(n, sn, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) const struct fscache_state __STATE_NAME(n) = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .name = #n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .short_name = sn, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) .work = NULL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .transitions = { __VA_ARGS__, { 0, NULL } } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define TRANSIT_TO(state, emask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { .events = (emask), .transit_to = STATE(state) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * The object state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static WAIT_STATE(WAIT_FOR_INIT, "?INI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Out-of-band event transition tables. These are for handling unexpected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * events, such as an I/O error. If an OOB event occurs, the state machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * clears and disables the event and forces a transition to the nominated work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * state (acurrently executing work states will complete first).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * In such a situation, object->state remembers the state the machine should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * have been in/gone to and returning NO_TRANSIT returns to that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static const struct fscache_transition fscache_osm_init_oob[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) TRANSIT_TO(ABORT_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) (1 << FSCACHE_OBJECT_EV_ERROR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) (1 << FSCACHE_OBJECT_EV_KILL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) { 0, NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static const struct fscache_transition fscache_osm_lookup_oob[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) TRANSIT_TO(LOOKUP_FAILURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) (1 << FSCACHE_OBJECT_EV_ERROR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) (1 << FSCACHE_OBJECT_EV_KILL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) { 0, NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static const struct fscache_transition fscache_osm_run_oob[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) TRANSIT_TO(KILL_OBJECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) (1 << FSCACHE_OBJECT_EV_ERROR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (1 << FSCACHE_OBJECT_EV_KILL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) { 0, NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int fscache_get_object(struct fscache_object *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) enum fscache_obj_ref_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void fscache_put_object(struct fscache_object *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum fscache_obj_ref_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static bool fscache_enqueue_dependents(struct fscache_object *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void fscache_dequeue_object(struct fscache_object *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void fscache_update_aux_data(struct fscache_object *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * we need to notify the parent when an op completes that we had outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * upon it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline void fscache_done_parent_op(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct fscache_object *parent = object->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) _enter("OBJ%x {OBJ%x,%x}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) object->debug_id, parent->debug_id, parent->n_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_lock_nested(&parent->lock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) parent->n_obj_ops--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) parent->n_ops--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (parent->n_ops == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_unlock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Object state machine dispatcher.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void fscache_object_sm_dispatcher(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) const struct fscache_transition *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) const struct fscache_state *state, *new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long events, event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bool oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int event = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ASSERT(object != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) _enter("{OBJ%x,%s,%lx}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) object->debug_id, object->state->name, object->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) event_mask = object->event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) object->event_mask = 0; /* Mask normal event handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) state = object->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) restart_masked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) events = object->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Handle any out-of-band events (typically an error) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (events & object->oob_event_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) _debug("{OBJ%x} oob %lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) object->debug_id, events & object->oob_event_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) oob = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) for (t = object->oob_table; t->events; t++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (events & t->events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) state = t->transit_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ASSERT(state->work != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) event = fls(events & t->events) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __clear_bit(event, &object->oob_event_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) clear_bit(event, &object->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto execute_work_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) oob = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Wait states are just transition tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!state->work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (events & event_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) for (t = state->transitions; t->events; t++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (events & t->events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) new_state = t->transit_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) event = fls(events & t->events) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) trace_fscache_osm(object, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) true, false, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) clear_bit(event, &object->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) _debug("{OBJ%x} ev %d: %s -> %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) object->debug_id, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) state->name, new_state->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) object->state = state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) goto execute_work_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* The event mask didn't include all the tabled bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Randomly woke up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto unmask_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) execute_work_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) _debug("{OBJ%x} exec %s", object->debug_id, state->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) trace_fscache_osm(object, state, false, oob, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) new_state = state->work(object, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) event = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (new_state == NO_TRANSIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (unlikely(state == STATE(OBJECT_DEAD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) _leave(" [dead]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) fscache_enqueue_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) event_mask = object->oob_event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto unmask_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) _debug("{OBJ%x} %s -> %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) object->debug_id, state->name, new_state->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) object->state = state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (state->work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (unlikely(state == STATE(OBJECT_DEAD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) _leave(" [dead]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) goto restart_masked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Transited to wait state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) event_mask = object->oob_event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (t = state->transitions; t->events; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) event_mask |= t->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unmask_events:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) object->event_mask = event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) events = object->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (events & event_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) _leave(" [msk %lx]", event_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * execute an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void fscache_object_work_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct fscache_object *object =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) container_of(work, struct fscache_object, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) _enter("{OBJ%x}", object->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) fscache_object_sm_dispatcher(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) fscache_hist(fscache_objs_histogram, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fscache_put_object(object, fscache_obj_put_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * fscache_object_init - Initialise a cache object description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * @object: Object description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * @cookie: Cookie object will be attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @cache: Cache in which backing object will be found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Initialise a cache object description to its basic values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * See Documentation/filesystems/caching/backend-api.rst for a complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * description.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void fscache_object_init(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct fscache_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct fscache_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) const struct fscache_transition *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) atomic_inc(&cache->object_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) object->state = STATE(WAIT_FOR_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) object->oob_table = fscache_osm_init_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_lock_init(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) INIT_LIST_HEAD(&object->cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) INIT_HLIST_NODE(&object->cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) INIT_WORK(&object->work, fscache_object_work_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) INIT_LIST_HEAD(&object->dependents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) INIT_LIST_HEAD(&object->dep_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) INIT_LIST_HEAD(&object->pending_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) object->n_children = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) object->n_ops = object->n_in_progress = object->n_exclusive = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) object->events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) object->store_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) object->store_limit_l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) object->cache = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) object->cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) object->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef CONFIG_FSCACHE_OBJECT_LIST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) RB_CLEAR_NODE(&object->objlist_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) object->oob_event_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) for (t = object->oob_table; t->events; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) object->oob_event_mask |= t->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) object->event_mask = object->oob_event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) for (t = object->state->transitions; t->events; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) object->event_mask |= t->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) EXPORT_SYMBOL(fscache_object_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Mark the object as no longer being live, making sure that we synchronise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * against op submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline void fscache_mark_object_dead(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Abort object initialisation before we start it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) object->oob_event_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) fscache_dequeue_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return transit_to(KILL_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * initialise an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * - check the specified object's parent to see if we can make use of it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * immediately to do a creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * - we may need to start the process of creating a parent and we need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * for the parent's lookup and creation to complete if it's not there yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct fscache_object *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) bool success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ASSERT(list_empty(&object->dep_link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) parent = object->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) _leave(" [no parent]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return transit_to(DROP_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) _debug("parent: %s of:%lx", parent->state->name, parent->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (fscache_object_is_dying(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) _leave(" [bad parent]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return transit_to(DROP_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (fscache_object_is_available(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) _leave(" [ready]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return transit_to(PARENT_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) _debug("wait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_lock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) fscache_stat(&fscache_n_cop_grab_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) success = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (fscache_object_is_live(parent) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) list_add(&object->dep_link, &parent->dependents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) fscache_stat_d(&fscache_n_cop_grab_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) spin_unlock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) _leave(" [grab failed]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return transit_to(DROP_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* fscache_acquire_non_index_cookie() uses this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * to wake the chain up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) _leave(" [wait]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return transit_to(WAIT_FOR_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Once the parent object is ready, we should kick off our lookup op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct fscache_object *parent = object->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ASSERT(parent != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_lock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) parent->n_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) parent->n_obj_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) object->lookup_jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) spin_unlock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return transit_to(LOOK_UP_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * look an object up in the cache from which it was allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * - we hold an "access lock" on the parent object, so the parent object cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * be withdrawn by either party till we've finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct fscache_cookie *cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct fscache_object *parent = object->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) object->oob_table = fscache_osm_lookup_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ASSERT(parent != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ASSERTCMP(parent->n_ops, >, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ASSERTCMP(parent->n_obj_ops, >, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* make sure the parent is still available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ASSERT(fscache_object_is_available(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (fscache_object_is_dying(parent) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) !fscache_use_cookie(object)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) _leave(" [unavailable]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return transit_to(LOOKUP_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) _debug("LOOKUP \"%s\" in \"%s\"",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) cookie->def->name, object->cache->tag->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) fscache_stat(&fscache_n_object_lookups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) fscache_stat(&fscache_n_cop_lookup_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = object->cache->ops->lookup_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) fscache_stat_d(&fscache_n_cop_lookup_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) fscache_unuse_cookie(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (ret == -ETIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* probably stuck behind another object, so move this one to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * the back of the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fscache_stat(&fscache_n_object_lookups_timed_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) _leave(" [timeout]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return NO_TRANSIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) _leave(" [error]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return transit_to(LOOKUP_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) _leave(" [ok]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return transit_to(OBJECT_AVAILABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * fscache_object_lookup_negative - Note negative cookie lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @object: Object pointing to cookie to mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * Note negative lookup, permitting those waiting to read data from an already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * existing backing object to continue as there's no data for them to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) void fscache_object_lookup_negative(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct fscache_cookie *cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) fscache_stat(&fscache_n_object_lookups_negative);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /* Allow write requests to begin stacking up and read requests to begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * returning ENODATA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) _debug("wake up lookup %p", &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) EXPORT_SYMBOL(fscache_object_lookup_negative);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * fscache_obtained_object - Note successful object lookup or creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * @object: Object pointing to cookie to mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Note successful lookup and/or creation, permitting those waiting to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * data to a backing object to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Note that after calling this, an object's cookie may be relinquished by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * netfs, and so must be accessed with object lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void fscache_obtained_object(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct fscache_cookie *cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* if we were still looking up, then we must have a positive lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * result, in which case there may be data available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) fscache_stat(&fscache_n_object_lookups_positive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* We do (presumably) have data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* Allow write requests to begin stacking up and read requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * to begin shovelling data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) fscache_stat(&fscache_n_object_created);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) EXPORT_SYMBOL(fscache_obtained_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * handle an object that has just become available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static const struct fscache_state *fscache_object_available(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) object->oob_table = fscache_osm_run_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) fscache_done_parent_op(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (object->n_in_progress == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (object->n_ops > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) fscache_start_operations(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ASSERT(list_empty(&object->pending_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) fscache_stat(&fscache_n_cop_lookup_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) object->cache->ops->lookup_complete(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) fscache_stat_d(&fscache_n_cop_lookup_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) fscache_stat(&fscache_n_object_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return transit_to(JUMPSTART_DEPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * Wake up this object's dependent objects now that we've become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return NO_TRANSIT; /* Not finished; requeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return transit_to(WAIT_FOR_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Handle lookup or creation failute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct fscache_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) object->oob_event_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) fscache_stat(&fscache_n_cop_lookup_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) object->cache->ops->lookup_complete(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) fscache_stat_d(&fscache_n_cop_lookup_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) fscache_done_parent_op(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return transit_to(KILL_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * Wait for completion of all active operations on this object and the death of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * all child objects of this object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) _enter("{OBJ%x,%d,%d},%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) object->debug_id, object->n_ops, object->n_children, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) fscache_mark_object_dead(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) object->oob_event_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Reject any new read/write ops and abort any that are pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) fscache_cancel_all_ops(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (list_empty(&object->dependents) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) object->n_ops == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) object->n_children == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return transit_to(DROP_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (object->n_in_progress == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (object->n_ops > 0 && object->n_in_progress == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) fscache_start_operations(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!list_empty(&object->dependents))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return transit_to(KILL_DEPENDENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return transit_to(WAIT_FOR_CLEARANCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Kill dependent objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return NO_TRANSIT; /* Not finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return transit_to(WAIT_FOR_CLEARANCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Drop an object's attachments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct fscache_object *parent = object->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct fscache_cookie *cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct fscache_cache *cache = object->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) bool awaken = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ASSERT(cookie != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ASSERT(!hlist_unhashed(&object->cookie_link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) _debug("final update");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) fscache_update_aux_data(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* Make sure the cookie no longer points here and that the netfs isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * waiting for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) hlist_del_init(&object->cookie_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (hlist_empty(&cookie->backing_objects) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) awaken = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (awaken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Prevent a race with our last child, which has to signal EV_CLEARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * before dropping our spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Discard from the cache's collection of objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_lock(&cache->object_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) list_del_init(&object->cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) spin_unlock(&cache->object_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) fscache_stat(&fscache_n_cop_drop_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) cache->ops->drop_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) fscache_stat_d(&fscache_n_cop_drop_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* The parent object wants to know when all it dependents have gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) _debug("release parent OBJ%x {%d}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) parent->debug_id, parent->n_children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) spin_lock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) parent->n_children--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (parent->n_children == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_unlock(&parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) object->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* this just shifts the object release to the work processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) fscache_put_object(object, fscache_obj_put_drop_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) fscache_stat(&fscache_n_object_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return transit_to(OBJECT_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * get a ref on an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int fscache_get_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) enum fscache_obj_ref_trace why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) fscache_stat(&fscache_n_cop_grab_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) fscache_stat_d(&fscache_n_cop_grab_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Discard a ref on an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static void fscache_put_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) enum fscache_obj_ref_trace why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) fscache_stat(&fscache_n_cop_put_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) object->cache->ops->put_object(object, why);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) fscache_stat_d(&fscache_n_cop_put_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * fscache_object_destroy - Note that a cache object is about to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @object: The object to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Note the imminent destruction and deallocation of a cache object record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) void fscache_object_destroy(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) fscache_objlist_remove(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* We can get rid of the cookie now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) fscache_cookie_put(object->cookie, fscache_cookie_put_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) object->cookie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) EXPORT_SYMBOL(fscache_object_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * enqueue an object for metadata-type processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) void fscache_enqueue_object(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) _enter("{OBJ%x}", object->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) wait_queue_head_t *cong_wq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) &get_cpu_var(fscache_object_cong_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (queue_work(fscache_object_wq, &object->work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (fscache_object_congested())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) wake_up(cong_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) fscache_put_object(object, fscache_obj_put_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) put_cpu_var(fscache_object_cong_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * fscache_object_sleep_till_congested - Sleep until object wq is congested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * @timeoutp: Scheduler sleep timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * Allow an object handler to sleep until the object workqueue is congested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * The caller must set up a wake up event before calling this and must have set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * condition before calling this function as no test is made here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * %true is returned if the object wq is congested, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) bool fscache_object_sleep_till_congested(signed long *timeoutp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (fscache_object_congested())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) add_wait_queue_exclusive(cong_wq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!fscache_object_congested())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *timeoutp = schedule_timeout(*timeoutp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) finish_wait(cong_wq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return fscache_object_congested();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Enqueue the dependents of an object for metadata-type processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * If we don't manage to finish the list before the scheduler wants to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * again then return false immediately. We return true if the list was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct fscache_object *dep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) _enter("{OBJ%x}", object->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (list_empty(&object->dependents))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_lock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) while (!list_empty(&object->dependents)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dep = list_entry(object->dependents.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct fscache_object, dep_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) list_del_init(&dep->dep_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) fscache_raise_event(dep, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) fscache_put_object(dep, fscache_obj_put_enq_dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (!list_empty(&object->dependents) && need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) spin_unlock(&object->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * remove an object from whatever queue it's waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static void fscache_dequeue_object(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) _enter("{OBJ%x}", object->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (!list_empty(&object->dep_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) spin_lock(&object->parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) list_del_init(&object->dep_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) spin_unlock(&object->parent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * fscache_check_aux - Ask the netfs whether an object on disk is still valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * @object: The object to ask about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * @data: The auxiliary data for the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * @datalen: The size of the auxiliary data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * This function consults the netfs about the coherency state of an object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * The caller must be holding a ref on cookie->n_active (held by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * fscache_look_up_object() on behalf of the cache backend during object lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * and creation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) const void *data, uint16_t datalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) loff_t object_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) enum fscache_checkaux result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!object->cookie->def->check_aux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) fscache_stat(&fscache_n_checkaux_none);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return FSCACHE_CHECKAUX_OKAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) result = object->cookie->def->check_aux(object->cookie->netfs_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) data, datalen, object_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* entry okay as is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) case FSCACHE_CHECKAUX_OKAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) fscache_stat(&fscache_n_checkaux_okay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* entry requires update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) case FSCACHE_CHECKAUX_NEEDS_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) fscache_stat(&fscache_n_checkaux_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* entry requires deletion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) case FSCACHE_CHECKAUX_OBSOLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) fscache_stat(&fscache_n_checkaux_obsolete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) EXPORT_SYMBOL(fscache_check_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * Asynchronously invalidate an object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct fscache_operation *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct fscache_cookie *cookie = object->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* We're going to need the cookie. If the cookie is not available then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * retire the object instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!fscache_use_cookie(object)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ASSERT(radix_tree_empty(&object->cookie->stores));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) _leave(" [no cookie]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return transit_to(KILL_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* Reject any new read/write ops and abort any that are pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) fscache_invalidate_writes(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) fscache_cancel_all_ops(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* Now we have to wait for in-progress reads and writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) op = kzalloc(sizeof(*op), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) fscache_operation_init(cookie, op, object->cache->ops->invalidate_object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) op->flags = FSCACHE_OP_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) (1 << FSCACHE_OP_EXCLUSIVE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) (1 << FSCACHE_OP_UNUSE_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) spin_lock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (fscache_submit_exclusive_op(object, op) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto submit_op_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) fscache_put_operation(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Once we've completed the invalidation, we know there will be no data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * stored in the cache and thus we can reinstate the data-check-skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * optimisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* We can allow read and write requests to come in once again. They'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * queue up behind our exclusive invalidation operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) _leave(" [ok]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return transit_to(UPDATE_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) fscache_mark_object_dead(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) fscache_unuse_cookie(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) _leave(" [ENOMEM]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return transit_to(KILL_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) submit_op_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) fscache_mark_object_dead(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) spin_unlock(&cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) fscache_unuse_cookie(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) kfree(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) _leave(" [EIO]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return transit_to(KILL_OBJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) const struct fscache_state *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) fscache_stat(&fscache_n_invalidates_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) fscache_stat(&fscache_n_cop_invalidate_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) s = _fscache_invalidate_object(object, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) fscache_stat_d(&fscache_n_cop_invalidate_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Update auxiliary data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static void fscache_update_aux_data(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) fscache_stat(&fscache_n_updates_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) fscache_stat(&fscache_n_cop_update_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) object->cache->ops->update_object(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) fscache_stat_d(&fscache_n_cop_update_object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * Asynchronously update an object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static const struct fscache_state *fscache_update_object(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) _enter("{OBJ%x},%d", object->debug_id, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) fscache_update_aux_data(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return transit_to(WAIT_FOR_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * fscache_object_retrying_stale - Note retrying stale object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * @object: The object that will be retried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * Note that an object lookup found an on-disk object that was adjudged to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * stale and has been deleted. The lookup will be retried.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) void fscache_object_retrying_stale(struct fscache_object *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) fscache_stat(&fscache_n_cache_no_space_reject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) EXPORT_SYMBOL(fscache_object_retrying_stale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * fscache_object_mark_killed - Note that an object was killed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * @object: The object that was culled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * @why: The reason the object was killed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * Note that an object was killed. Returns true if the object was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * already marked killed, false if it wasn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) void fscache_object_mark_killed(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) enum fscache_why_object_killed why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) pr_err("Error: Object already killed by cache [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) object->cache->identifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) switch (why) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) case FSCACHE_OBJECT_NO_SPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) fscache_stat(&fscache_n_cache_no_space_reject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) case FSCACHE_OBJECT_IS_STALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) fscache_stat(&fscache_n_cache_stale_objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) case FSCACHE_OBJECT_WAS_RETIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) fscache_stat(&fscache_n_cache_retired_objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) case FSCACHE_OBJECT_WAS_CULLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) fscache_stat(&fscache_n_cache_culled_objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) EXPORT_SYMBOL(fscache_object_mark_killed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * The object is dead. We can get here if an object gets queued by an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * that would lead to its death (such as EV_KILL) when the dispatcher is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * already running (and so can be requeued) but hasn't yet cleared the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) &object->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return NO_TRANSIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) WARN(true, "FS-Cache object redispatched after death");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return NO_TRANSIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }