^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _RAID5_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _RAID5_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/raid/xor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Each stripe contains one buffer per device. Each buffer can be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * one of a number of states stored in "flags". Changes between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * these states happen *almost* exclusively under the protection of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * these are not protected by STRIPE_ACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The flag bits that are used to represent these states are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * R5_UPTODATE and R5_LOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * State Empty == !UPTODATE, !LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * We have no data, and there is no active request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * State Want == !UPTODATE, LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * A read request is being submitted for this block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * State Dirty == UPTODATE, LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Some new data is in this buffer, and it is being written out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * State Clean == UPTODATE, !LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * We have valid data which is the same as on disc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The possible state transitions are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Empty -> Want - on read or write to get old data for parity calc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Empty -> Dirty - on compute_parity to satisfy write/sync request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Empty -> Clean - on compute_block when computing a block for failed drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Want -> Empty - on failed read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Want -> Clean - on successful completion of read request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Dirty -> Clean - on successful completion of write request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Dirty -> Clean - on failed write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * The Want->Empty, Want->Clean, Dirty->Clean, transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * all happen in b_end_io at interrupt time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Each sets the Uptodate bit before releasing the Lock bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * This leaves one multi-stage transition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Want->Dirty->Clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * This is safe because thinking that a Clean buffer is actually dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * will at worst delay some action, and the stripe will be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * for attention after the transition is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * There is one possibility that is not covered by these states. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * is if one drive has failed and there is a spare being rebuilt. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * can't distinguish between a clean block that has been generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * from parity calculations, and a clean block that has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * successfully written to the spare ( or to parity when resyncing).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * To distinguish these states we have a stripe bit STRIPE_INSYNC that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * is set whenever a write is scheduled to the spare, or to the parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * disc if there is no spare. A sync request clears this bit, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * when we find it set with no buffers locked, we know the sync is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Buffers for the md device that arrive via make_request are attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * to the appropriate stripe in one of two lists linked on b_reqnext.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * One list (bh_read) for read requests, one (bh_write) for write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * There should never be more than one buffer on the two lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * together, but we are not guaranteed of that so we allow for more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * If a buffer is on the read list when the associated cache buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Uptodate, the data is copied into the read buffer and it's b_end_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * routine is called. This may happen in the end_request routine only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * if the buffer has just successfully been read. end_request should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * remove the buffers from the list and then set the Uptodate bit on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * the buffer. Other threads may do this only if they first check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * that the Uptodate bit is set. Once they have checked that they may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * take buffers off the read queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * When a buffer on the write list is committed for write it is copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * into the cache buffer, which is then marked dirty, and moved onto a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * third list, the written list (bh_written). Once both the parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * block and the cached buffer are successfully written, any buffer on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * a written list can be returned with b_end_io.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * The write list and read list both act as fifos. The read list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * write list and written list are protected by the device_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The device_lock is only for list manipulations and will only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * held for a very short time. It can be claimed from interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Stripes in the stripe cache can be on one of two lists (or on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * neither). The "inactive_list" contains stripes which are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * currently being used for any request. They can freely be reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * for another stripe. The "handle_list" contains stripes that need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * to be handled in some way. Both of these are fifo queues. Each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * stripe is also (potentially) linked to a hash bucket in the hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * table so that it can be found by sector number. Stripes that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * not hashed must be on the inactive_list, and will normally be at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the front. All stripes start life this way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * The inactive_list, handle_list and hash bucket lists are all protected by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * device_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * - stripes have a reference counter. If count==0, they are on a list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * - If a stripe might need handling, STRIPE_HANDLE is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * handle_list else inactive_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * This, combined with the fact that STRIPE_HANDLE is only ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * cleared while a stripe has a non-zero count means that if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * refcount is 0 and STRIPE_HANDLE is set, then it is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * the stripe is on inactive_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * The possible transitions are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * activate an unhashed/inactive stripe (get_active_stripe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * activate a hashed, possibly active stripe (get_active_stripe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * attach a request to an active stripe (add_stripe_bh())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * lockdev attach-buffer unlockdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * handle a stripe (handle_stripe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * (lockdev check-buffers unlockdev) ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * change-state ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * release an active stripe (release_stripe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * The refcount counts each thread that have activated the stripe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * plus raid5d if it is handling it, plus one for each active request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * on a cached buffer, and plus one if the stripe is undergoing stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * The stripe operations are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * -copying data between the stripe cache and user application buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * -computing blocks to save a disk access, or to recover a missing block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * -updating the parity on a write operation (reconstruct write and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * read-modify-write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * -checking parity correctness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * -running i/o to disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * These operations are carried out by raid5_run_ops which uses the async_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * api to (optionally) offload operations to dedicated hardware engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * When requesting an operation handle_stripe sets the pending bit for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * operation and increments the count. raid5_run_ops is then run whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * the count is non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * There are some critical dependencies between the operations that prevent some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * from being requested while another is in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * 1/ Parity check operations destroy the in cache version of the parity block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * so we prevent parity dependent operations like writes and compute_blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * from starting while a check is in progress. Some dma engines can perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * the check without damaging the parity block, in these cases the parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * block is re-marked up to date (assuming the check was successful) and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * not re-read from disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * 2/ When a write operation is requested we immediately lock the affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * blocks, and mark them as not up to date. This causes new read requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * to be held off, as well as parity checks and compute block operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * 3/ Once a compute block operation has been requested handle_stripe treats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * that block as if it is up to date. raid5_run_ops guaruntees that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * operation that is dependent on the compute block result is initiated after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * the compute block completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Operations state - intermediate states that are visible outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * STRIPE_ACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * In general _idle indicates nothing is running, _run indicates a data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * processing operation is active, and _result means the data processing result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * is stable and can be acted upon. For simple operations like biofill and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * compute that only have an _idle and _run state they are indicated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * enum check_states - handles syncing / repairing a stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @check_state_idle - check operations are quiesced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @check_state_run - check operation is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @check_state_result - set outside lock when check result is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @check_state_compute_run - check failed and we are repairing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @check_state_compute_result - set outside lock when compute result is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) enum check_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) check_state_idle = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) check_state_run, /* xor parity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) check_state_run_q, /* q-parity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) check_state_run_pq, /* pq dual parity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) check_state_check_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) check_state_compute_run, /* parity repair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) check_state_compute_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * enum reconstruct_states - handles writing or expanding a stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) enum reconstruct_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) reconstruct_state_idle = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) reconstruct_state_prexor_drain_run, /* prexor-write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) reconstruct_state_drain_run, /* write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) reconstruct_state_run, /* expand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) reconstruct_state_prexor_drain_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) reconstruct_state_drain_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) reconstruct_state_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define DEFAULT_STRIPE_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct stripe_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct hlist_node hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct list_head lru; /* inactive_list or handle_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct llist_node release_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct r5conf *raid_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) short generation; /* increments with every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * reshape */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sector_t sector; /* sector of this row */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) short pd_idx; /* parity disk index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) short qd_idx; /* 'Q' disk index for raid6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) short ddf_layout;/* use DDF ordering to calculate Q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) short hash_lock_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long state; /* state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) atomic_t count; /* nr of active thread/requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int bm_seq; /* sequence number for bitmap flushes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int disks; /* disks in stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int overwrite_disks; /* total overwrite disks in stripe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * this is only checked when stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * has STRIPE_BATCH_READY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) enum check_states check_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) enum reconstruct_states reconstruct_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spinlock_t stripe_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct r5worker_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct stripe_head *batch_head; /* protected by stripe lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) spinlock_t batch_lock; /* only header's lock is useful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct list_head batch_list; /* protected by head's batch lock*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct r5l_io_unit *log_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct ppl_io_unit *ppl_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct list_head log_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sector_t log_start; /* first meta block on the journal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct list_head r5c; /* for r5c_cache->stripe_in_journal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct page *ppl_page; /* partial parity of this stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * struct stripe_operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @target - STRIPE_OP_COMPUTE_BLK target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @target2 - 2nd compute target in the raid6 case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @zero_sum_result - P and Q verification flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @request - async service request flags for raid_run_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct stripe_operations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int target, target2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) enum sum_check_flags zero_sum_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) } ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* These pages will be used by bios in dev[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int nr_pages; /* page array size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int stripes_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct r5dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* rreq and rvec are used for the replacement device when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * writing data to both devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct bio req, rreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct bio_vec vec, rvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct page *page, *orig_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned int offset; /* offset of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct bio *toread, *read, *towrite, *written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) sector_t sector; /* sector of this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 log_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned short write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } dev[1]; /* allocated with extra space depending of RAID geometry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * for handle_stripe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct stripe_head_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* 'syncing' means that we need to read all devices, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * to check/correct parity, or to reconstruct a missing device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * 'replacing' means we are replacing one or more drives and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * the source is valid at this point so we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * read all devices, just the replacement targets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int syncing, expanding, expanded, replacing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int locked, uptodate, to_read, to_write, failed, written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int to_fill, compute, req_compute, non_overwrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int injournal, just_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int failed_num[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int p_failed, q_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int dec_preread_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long ops_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct md_rdev *blocked_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int handle_bad_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int log_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int waiting_extra_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Flags for struct r5dev.flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) enum r5dev_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) R5_UPTODATE, /* page contains current data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) R5_LOCKED, /* IO has been submitted on "req" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) R5_OVERWRITE, /* towrite covers whole page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* and some that are internal to handle_stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) R5_Insync, /* rdev && rdev->in_sync at start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) R5_Wantread, /* want to schedule a read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) R5_Wantwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) R5_Overlap, /* There is a pending overlapping request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * on this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) R5_ReadNoMerge, /* prevent bio from merging in block-layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) R5_ReadError, /* seen a read error here recently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) R5_ReWrite, /* have tried to over-write the readerror */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) R5_Expanded, /* This block now has post-expand data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) R5_Wantcompute, /* compute_block in progress treat as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * uptodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) R5_Wantfill, /* dev->toread contains a bio that needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * filling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) R5_Wantdrain, /* dev->towrite needs to be drained */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) R5_WantFUA, /* Write should be FUA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) R5_SyncIO, /* The IO is sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) R5_WriteError, /* got a write error - need to record it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) R5_MadeGood, /* A bad block has been fixed by writing to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) R5_ReadRepl, /* Will/did read from replacement rather than orig */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) R5_MadeGoodRepl,/* A bad block on the replacement device has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * fixed by writing to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) R5_NeedReplace, /* This device has a replacement which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * up-to-date at this stripe. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) R5_WantReplace, /* We need to update the replacement, we have read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * data in, and now is a good time to write it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) R5_Discard, /* Discard the stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) R5_SkipCopy, /* Don't copy data from bio to stripe cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) R5_InJournal, /* data being written is in the journal device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * if R5_InJournal is set for parity pd_idx, all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * data and parity being written are in the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * dev->orig_page for prexor. When this flag is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * set, orig_page contains latest data in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * raid disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Stripe state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) STRIPE_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) STRIPE_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) STRIPE_SYNC_REQUESTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) STRIPE_SYNCING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) STRIPE_INSYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) STRIPE_REPLACED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) STRIPE_PREREAD_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) STRIPE_DELAYED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) STRIPE_DEGRADED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) STRIPE_BIT_DELAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) STRIPE_EXPANDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) STRIPE_EXPAND_SOURCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) STRIPE_EXPAND_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) STRIPE_BIOFILL_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) STRIPE_COMPUTE_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) STRIPE_ON_UNPLUG_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) STRIPE_DISCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) STRIPE_ON_RELEASE_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) STRIPE_BATCH_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) STRIPE_BATCH_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * to batch yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * this bit is used in two scenarios:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * 1. write-out phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * set in first entry of r5l_write_stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * clear in second entry of r5l_write_stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * used to bypass logic in handle_stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * 2. caching phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * set in r5c_try_caching_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * clear when journal write is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * used to initiate r5c_cache_data()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * also used to bypass logic in handle_stripe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) STRIPE_R5C_CACHING, /* the stripe is in caching phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * see more detail in the raid5-cache.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * in conf->r5c_partial_stripe_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * in conf->r5c_full_stripe_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) STRIPE_R5C_PREFLUSH, /* need to flush journal device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #define STRIPE_EXPAND_SYNC_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ((1 << STRIPE_EXPAND_SOURCE) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) (1 << STRIPE_EXPAND_READY) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (1 << STRIPE_EXPANDING) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) (1 << STRIPE_SYNC_REQUESTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Operation request flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) STRIPE_OP_BIOFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) STRIPE_OP_COMPUTE_BLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) STRIPE_OP_PREXOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) STRIPE_OP_BIODRAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) STRIPE_OP_RECONSTRUCT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) STRIPE_OP_CHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) STRIPE_OP_PARTIAL_PARITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * RAID parity calculation preferences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) PARITY_DISABLE_RMW = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) PARITY_ENABLE_RMW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) PARITY_PREFER_RMW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Pages requested from set_syndrome_sources()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) SYNDROME_SRC_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) SYNDROME_SRC_WANT_DRAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) SYNDROME_SRC_WRITTEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Plugging:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * To improve write throughput, we need to delay the handling of some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * stripes until there has been a chance that several write requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * for the one stripe have all been collected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * In particular, any write request that would require pre-reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * is put on a "delayed" queue until there are no stripes currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * in a pre-read phase. Further, if the "delayed" queue is empty when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * a stripe is put on it then we "plug" the queue and do not process it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * until an unplug call is made. (the unplug_io_fn() is called).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * it to the count of prereading stripes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * When write is initiated, or the stripe refcnt == 0 (just in case) we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * clear the PREREAD_ACTIVE flag and decrement the count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Whenever the 'handle' queue is empty and the device is not plugged, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * move any strips from delayed to handle and clear the DELAYED flag and set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * PREREAD_ACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * In stripe_handle, if we find pre-reading is necessary, we do it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * HANDLE gets cleared if stripe_handle leaves nothing locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * There are three safe ways to access disk_info.rdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * 1/ when holding mddev->reconfig_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * is called as part of performing resync/recovery/reshape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * When .rdev is set to NULL, the nr_pending count checked again and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * it has been incremented, the pointer is put back in .rdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct disk_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct md_rdev *rdev, *replacement;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct page *extra_page; /* extra page to use in prexor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Stripe cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #define NR_STRIPES 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #define STRIPE_SIZE PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define STRIPE_SHIFT (PAGE_SHIFT - 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #define STRIPE_SECTORS (STRIPE_SIZE>>9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) #define IO_THRESHOLD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define BYPASS_THRESHOLD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #define HASH_MASK (NR_HASH - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #define MAX_STRIPE_BATCH 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * This is because we sometimes take all the spinlocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * and creating that much locking depth can cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #define NR_STRIPE_HASH_LOCKS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct r5worker {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct r5worker_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) bool working;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct r5worker_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct list_head handle_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct list_head loprio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct r5conf *conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct r5worker *workers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int stripes_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * r5c journal modes of the array: write-back or write-through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * write-through mode has identical behavior as existing log only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) enum r5c_journal_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) R5C_JOURNAL_MODE_WRITE_BACK = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) enum r5_cache_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * waiting for 25% to be free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) R5_ALLOC_MORE, /* It might help to allocate another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * stripe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) R5_DID_ALLOC, /* A stripe was allocated, don't allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * more until at least one has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * released. This avoids flooding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) R5C_LOG_TIGHT, /* log device space tight, need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * prioritize stripes at last_checkpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) R5C_LOG_CRITICAL, /* log device is running out of space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * only process stripes that are already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * occupying the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * for prexor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #define PENDING_IO_MAX 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #define PENDING_IO_ONE_FLUSH 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct r5pending_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct list_head sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) sector_t sector; /* stripe sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct bio_list bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct r5conf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct hlist_head *stripe_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* only protect corresponding hash list and inactive_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct mddev *mddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int level, algorithm, rmw_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int max_degraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int max_nr_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int min_nr_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) unsigned long stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned int stripe_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned long stripe_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* reshape_progress is the leading edge of a 'reshape'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * It has value MaxSector when no reshape is happening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * If delta_disks < 0, it is the last sector we started work on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * else is it the next sector to work on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) sector_t reshape_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* reshape_safe is the trailing edge of a reshape. We know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * before (or after) this address, all reshape has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) sector_t reshape_safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int previous_raid_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int prev_chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int prev_algo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) short generation; /* increments with every reshape */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) seqcount_spinlock_t gen_lock; /* lock against generation changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned long reshape_checkpoint; /* Time we last updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) long long min_offset_diff; /* minimum difference between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * data_offset and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * new_data_offset across all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * devices. May be negative,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * but is closest to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct list_head handle_list; /* stripes needing handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct list_head loprio_list; /* low priority stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct list_head hold_list; /* preread ready stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct list_head delayed_list; /* stripes that have plugged requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct bio *retry_read_aligned; /* currently retrying aligned bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned int retry_read_offset; /* sector offset into retry_read_aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct bio *retry_read_aligned_list; /* aligned bios retry list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) atomic_t preread_active_stripes; /* stripes with scheduled io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) atomic_t active_aligned_reads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) atomic_t pending_full_writes; /* full write backlog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int bypass_count; /* bypassed prereads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int bypass_threshold; /* preread nice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int skip_copy; /* Don't copy data from bio to stripe cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct list_head *last_hold; /* detect hold_list promotions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) atomic_t reshape_stripes; /* stripes with pending writes for reshape */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* unfortunately we need two cache names as we temporarily have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * two caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int active_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) char cache_name[2][32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct kmem_cache *slab_cache; /* for allocating stripes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct mutex cache_size_mutex; /* Protect changes to cache size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int seq_flush, seq_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int quiesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int fullsync; /* set to 1 if a full sync is needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * (fresh device added).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Cleared when a sync completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) int recovery_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* per cpu variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct raid5_percpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct page *spare_page; /* Used when checking P/Q in raid6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) void *scribble; /* space for constructing buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * lists and performing address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * conversions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int scribble_obj_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } __percpu *percpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int scribble_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int scribble_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Free stripes pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) atomic_t active_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) atomic_t r5c_cached_full_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct list_head r5c_full_stripe_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) atomic_t r5c_cached_partial_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct list_head r5c_partial_stripe_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) atomic_t r5c_flushing_full_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) atomic_t r5c_flushing_partial_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) atomic_t empty_inactive_list_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct llist_head released_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) wait_queue_head_t wait_for_quiescent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) wait_queue_head_t wait_for_stripe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) wait_queue_head_t wait_for_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unsigned long cache_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct shrinker shrinker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int pool_size; /* number of disks in stripeheads in pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) spinlock_t device_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct disk_info *disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct bio_set bio_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* When taking over an array from a different personality, we store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * the new thread here until we fully activate the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct md_thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct r5worker_group *worker_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int group_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int worker_cnt_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct r5l_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) void *log_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) spinlock_t pending_bios_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bool batch_bio_dispatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct r5pending_data *pending_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct list_head free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct list_head pending_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int pending_data_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct r5pending_data *next_pending_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #define RAID5_STRIPE_SIZE(conf) STRIPE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #define RAID5_STRIPE_SHIFT(conf) STRIPE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) #define RAID5_STRIPE_SECTORS(conf) STRIPE_SECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) #define RAID5_STRIPE_SIZE(conf) ((conf)->stripe_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) #define RAID5_STRIPE_SHIFT(conf) ((conf)->stripe_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #define RAID5_STRIPE_SECTORS(conf) ((conf)->stripe_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* bio's attached to a stripe+device for I/O are linked together in bi_sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * order without overlap. There may be several bio's per stripe+device, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * a bio could span several devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * When walking this list for a particular stripe+device, we must never proceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * beyond a bio that extends past this device, as the next bio might no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * This function is used to determine the 'next' bio in the list, given the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * sector of the current stripe+device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Our supported algorithms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* Define non-rotating (raid4) algorithms. These allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * conversion of raid4 to raid5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * Firstly, the exact positioning of the parity block is slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * different between the 'LEFT_*' modes of md and the "_N_*" modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * of DDF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Secondly, or order of datablocks over which the Q syndrome is computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * is different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Consequently we have different layouts for DDF/raid6 than md/raid6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * These layouts are from the DDFv1.2 spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * leaves RLQ=3 as 'Vendor Specific'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* For every RAID5 algorithm we define a RAID6 algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * with exactly the same layout for data and parity, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * with the Q block always on the last device (N-1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * This allows trivial conversion from RAID5 to RAID6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #define ALGORITHM_LEFT_ASYMMETRIC_6 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) #define ALGORITHM_RIGHT_ASYMMETRIC_6 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #define ALGORITHM_LEFT_SYMMETRIC_6 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #define ALGORITHM_RIGHT_SYMMETRIC_6 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #define ALGORITHM_PARITY_0_6 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static inline int algorithm_valid_raid5(int layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return (layout >= 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) (layout <= 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static inline int algorithm_valid_raid6(int layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return (layout >= 0 && layout <= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) (layout >= 8 && layout <= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) (layout >= 16 && layout <= 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static inline int algorithm_is_DDF(int layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return layout >= 8 && layout <= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Return offset of the corresponding page for r5dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * Return corresponding page address for r5dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static inline struct page *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return sh->pages[disk_idx / sh->stripes_per_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) extern void md_raid5_kick_device(struct r5conf *conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) extern int raid5_set_cache_size(struct mddev *mddev, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) extern void raid5_release_stripe(struct stripe_head *sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int previous, int *dd_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct stripe_head *sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) extern struct stripe_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int previous, int noblock, int noquiesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) extern int raid5_calc_degraded(struct r5conf *conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) #endif