^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SN Platform GRU Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Dump GRU State
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/uv/uv_hub.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "gru.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "grutables.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "gruhandles.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "grulib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define CCH_LOCK_ATTEMPTS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int gru_user_copy_handle(void __user **dp, void *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *dp += GRU_HANDLE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int gru_dump_context_data(void *grubase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct gru_context_configuration_handle *cch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void __user *ubuf, int ctxnum, int dsrcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int flush_cbrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void *cb, *cbe, *tfh, *gseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int i, scr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) cb = gseg + GRU_CB_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) cbe = grubase + GRU_CBE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) tfh = grubase + GRU_TFH_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (flush_cbrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) gru_flush_cache(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (gru_user_copy_handle(&ubuf, cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cb += GRU_HANDLE_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (dsrcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int gru_dump_tfm(struct gru_state *gru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void __user *ubuf, void __user *ubufend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct gru_tlb_fault_map *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for (i = 0; i < GRU_NUM_TFM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) tfm = get_tfm(gru->gs_gru_base_vaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (gru_user_copy_handle(&ubuf, tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int gru_dump_tgh(struct gru_state *gru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void __user *ubuf, void __user *ubufend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct gru_tlb_global_handle *tgh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) for (i = 0; i < GRU_NUM_TGH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) tgh = get_tgh(gru->gs_gru_base_vaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (gru_user_copy_handle(&ubuf, tgh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int gru_dump_context(struct gru_state *gru, int ctxnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void __user *ubuf, void __user *ubufend, char data_opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) char lock_cch, char flush_cbrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct gru_dump_context_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct gru_dump_context_header __user *uhdr = ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct gru_context_configuration_handle *cch, *ubufcch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct gru_thread_state *gts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void *grubase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) memset(&hdr, 0, sizeof(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) grubase = gru->gs_gru_base_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cch = get_cch(grubase, ctxnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cch_locked = trylock_cch_handle(cch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (cch_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ubuf += sizeof(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ubufcch = ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (gru_user_copy_handle(&ubuf, cch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (cch_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unlock_cch_handle(cch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (cch_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ubufcch->delresp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (cch_locked || !lock_cch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) gts = gru->gs_gts[ctxnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (gts && gts->ts_vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) hdr.pid = gts->ts_tgid_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) hdr.vaddr = gts->ts_vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (cch->state != CCHSTATE_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cbrcnt = hweight64(cch->cbr_allocation_map) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) GRU_CBR_AU_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) GRU_DSR_AU_CL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (bytes > ubufend - ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ret = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) dsrcnt, flush_cbrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (cch_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unlock_cch_handle(cch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) hdr.magic = GRU_DUMP_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) hdr.gid = gru->gs_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) hdr.ctxnum = ctxnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) hdr.cbrcnt = cbrcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) hdr.dsrcnt = dsrcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) hdr.cch_locked = cch_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int gru_dump_chiplet_request(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct gru_state *gru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct gru_dump_chiplet_state_req req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void __user *ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void __user *ubufend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int ctxnum, ret, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Currently, only dump by gid is implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (req.gid >= gru_max_gids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) req.gid = array_index_nospec(req.gid, gru_max_gids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) gru = GID_TO_GRU(req.gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ubuf = req.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ubufend = req.buf + req.buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ret = gru_dump_tfm(gru, ubuf, ubufend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ubuf += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = gru_dump_tgh(gru, ubuf, ubufend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ubuf += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (req.ctxnum == ctxnum || req.ctxnum < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) req.data_opt, req.lock_cch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) req.flush_cbrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ubuf += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (copy_to_user((void __user *)arg, &req, sizeof(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }