^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This is for all the tests related to refcount bugs (e.g. overflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * underflow, reaching zero untested, etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "lkdtm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static void overflow_check(refcount_t *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) switch (refcount_read(ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) case REFCOUNT_SATURATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) pr_info("Overflow detected: saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) case REFCOUNT_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) pr_warn("Overflow detected: unsafely reset to max\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * A refcount_inc() above the maximum value of the refcount implementation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * should at least saturate, and at most also WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void lkdtm_REFCOUNT_INC_OVERFLOW(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pr_info("attempting good refcount_inc() without overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) refcount_dec(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) refcount_inc(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pr_info("attempting bad refcount_inc() overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) refcount_inc(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) refcount_inc(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) overflow_check(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* refcount_add() should behave just like refcount_inc() above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pr_info("attempting good refcount_add() without overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) refcount_dec(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) refcount_dec(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) refcount_dec(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) refcount_dec(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) refcount_add(4, &over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pr_info("attempting bad refcount_add() overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) refcount_add(4, &over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) overflow_check(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* refcount_inc_not_zero() should behave just like refcount_inc() above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pr_info("attempting bad refcount_inc_not_zero() overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!refcount_inc_not_zero(&over))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) overflow_check(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* refcount_add_not_zero() should behave just like refcount_inc() above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) pr_info("attempting bad refcount_add_not_zero() overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!refcount_add_not_zero(6, &over))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) pr_warn("Weird: refcount_add_not_zero() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) overflow_check(&over);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void check_zero(refcount_t *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) switch (refcount_read(ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) case REFCOUNT_SATURATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pr_info("Zero detected: saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case REFCOUNT_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pr_warn("Zero detected: unsafely reset to max\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * zero it should either saturate (when inc-from-zero isn't protected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * or stay at zero (when inc-from-zero is protected) and should WARN for both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void lkdtm_REFCOUNT_DEC_ZERO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) refcount_t zero = REFCOUNT_INIT(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) pr_info("attempting good refcount_dec()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) refcount_dec(&zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pr_info("attempting bad refcount_dec() to zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) refcount_dec(&zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) check_zero(&zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void check_negative(refcount_t *ref, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * refcount_t refuses to move a refcount at all on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * over-sub, so we have to track our starting position instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * looking only at zero-pinning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (refcount_read(ref) == start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) switch (refcount_read(ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case REFCOUNT_SATURATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pr_info("Negative detected: saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) case REFCOUNT_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pr_warn("Negative detected: unsafely reset to max\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* A refcount_dec() going negative should saturate and may WARN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) refcount_t neg = REFCOUNT_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_info("attempting bad refcount_dec() below zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) refcount_dec(&neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) check_negative(&neg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * A refcount_dec_and_test() should act like refcount_dec() above when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * going negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) refcount_t neg = REFCOUNT_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pr_info("attempting bad refcount_dec_and_test() below zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (refcount_dec_and_test(&neg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pr_warn("Weird: refcount_dec_and_test() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) check_negative(&neg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * A refcount_sub_and_test() should act like refcount_dec_and_test()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * above when going negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) refcount_t neg = REFCOUNT_INIT(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pr_info("attempting bad refcount_sub_and_test() below zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (refcount_sub_and_test(5, &neg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pr_warn("Weird: refcount_sub_and_test() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) check_negative(&neg, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void check_from_zero(refcount_t *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) switch (refcount_read(ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pr_info("Zero detected: stayed at zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case REFCOUNT_SATURATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) pr_info("Zero detected: saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case REFCOUNT_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) pr_warn("Zero detected: unsafely reset to max\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_info("Fail: zero not detected, incremented to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) refcount_read(ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * A refcount_inc() from zero should pin to zero or saturate and may WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void lkdtm_REFCOUNT_INC_ZERO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) refcount_t zero = REFCOUNT_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pr_info("attempting safe refcount_inc_not_zero() from zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!refcount_inc_not_zero(&zero)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pr_info("Good: zero detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (refcount_read(&zero) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pr_info("Correctly stayed at zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pr_err("Fail: refcount went past zero!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pr_err("Fail: Zero not detected!?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pr_info("attempting bad refcount_inc() from zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) refcount_inc(&zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) check_from_zero(&zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * A refcount_add() should act like refcount_inc() above when starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * at zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void lkdtm_REFCOUNT_ADD_ZERO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) refcount_t zero = REFCOUNT_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pr_info("attempting safe refcount_add_not_zero() from zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!refcount_add_not_zero(3, &zero)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pr_info("Good: zero detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (refcount_read(&zero) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pr_info("Correctly stayed at zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_err("Fail: refcount went past zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_err("Fail: Zero not detected!?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pr_info("attempting bad refcount_add() from zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) refcount_add(3, &zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) check_from_zero(&zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void check_saturated(refcount_t *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) switch (refcount_read(ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case REFCOUNT_SATURATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pr_info("Saturation detected: still saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case REFCOUNT_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pr_warn("Saturation detected: unsafely reset to max\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * A refcount_inc() from a saturated value should at most warn about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * being saturated already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void lkdtm_REFCOUNT_INC_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pr_info("attempting bad refcount_inc() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) refcount_inc(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Should act like refcount_inc() above from saturated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void lkdtm_REFCOUNT_DEC_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pr_info("attempting bad refcount_dec() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) refcount_dec(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Should act like refcount_inc() above from saturated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) void lkdtm_REFCOUNT_ADD_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pr_info("attempting bad refcount_dec() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) refcount_add(8, &sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Should act like refcount_inc() above from saturated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!refcount_inc_not_zero(&sat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Should act like refcount_inc() above from saturated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pr_info("attempting bad refcount_add_not_zero() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!refcount_add_not_zero(7, &sat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pr_warn("Weird: refcount_add_not_zero() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Should act like refcount_inc() above from saturated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pr_info("attempting bad refcount_dec_and_test() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (refcount_dec_and_test(&sat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_warn("Weird: refcount_dec_and_test() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Should act like refcount_inc() above from saturated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pr_info("attempting bad refcount_sub_and_test() from saturated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (refcount_sub_and_test(8, &sat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) pr_warn("Weird: refcount_sub_and_test() reported zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) check_saturated(&sat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Used to time the existing atomic_t when used for reference counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) void lkdtm_ATOMIC_TIMING(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) atomic_t count = ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < INT_MAX - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) atomic_inc(&count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) for (i = INT_MAX; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (atomic_dec_and_test(&count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (i != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pr_info("atomic timing: done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * This can be compared to ATOMIC_TIMING when implementing fast refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * protections. Looking at the number of CPU cycles tells the real story
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * about performance. For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * cd /sys/kernel/debug/provoke-crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void lkdtm_REFCOUNT_TIMING(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) refcount_t count = REFCOUNT_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) for (i = 0; i < INT_MAX - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) refcount_inc(&count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) for (i = INT_MAX; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (refcount_dec_and_test(&count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (i != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pr_info("refcount timing: done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }