^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "reiserfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /* find where objectid map starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define objectid_map(s,rs) (old_format_only (s) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) (__le32 *)((rs) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void check_objectid_map(struct super_block *s, __le32 * map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) if (le32_to_cpu(map[0]) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) (long unsigned int)le32_to_cpu(map[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* FIXME: add something else here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void check_objectid_map(struct super_block *s, __le32 * map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * When we allocate objectids we allocate the first unused objectid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Each sequence of objectids in use (the odd sequences) is followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * by a sequence of objectids not in use (the even sequences). We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * only need to record the last objectid in each of these sequences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * (both the odd and even sequences) in order to fully define the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * boundaries of the sequences. A consequence of allocating the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * objectid not in use is that under most conditions this scheme is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * extremely compact. The exception is immediately after a sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * of operations which deletes a large number of objects of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * non-sequential objectids, and even then it will become compact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * again as soon as more objects are created. Note that many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * interesting optimizations of layout could result from complicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * objectid assignment, but we have deferred making them for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* get unique object identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct super_block *s = th->t_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __le32 *map = objectid_map(s, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __u32 unused_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) check_objectid_map(s, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* comment needed -Hans */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unused_objectid = le32_to_cpu(map[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (unused_objectid == U32_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) reiserfs_warning(s, "reiserfs-15100", "no more object ids");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * This incrementation allocates the first unused objectid. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * is to say, the first entry on the objectid map is the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * unused objectid, and by incrementing it we use it. See below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * where we check to see if we eliminated a sequence of unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * objectids....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) map[1] = cpu_to_le32(unused_objectid + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Now we check to see if we eliminated the last remaining member of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * the first even sequence (and can eliminate the sequence by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * eliminating its last objectid from oids), and can collapse the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * first two odd sequences into one sequence. If so, then the net
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * result is to eliminate a pair of objectids from oids. We do this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * by shifting the entire map to the left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) memmove(map + 1, map + 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) (sb_oid_cursize(rs) - 3) * sizeof(__u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return unused_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* makes object identifier unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __u32 objectid_to_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct super_block *s = th->t_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __le32 *map = objectid_map(s, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) BUG_ON(!th->t_trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*return; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) check_objectid_map(s, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * start at the beginning of the objectid map (i = 0) and go to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * the end of it (i = disk_sb->s_oid_cursize). Linear search is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * what we use, though it is possible that binary search would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * more efficient after performing lots of deletions (which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * when oids is large.) We only check even i's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) while (i < sb_oid_cursize(rs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (objectid_to_release == le32_to_cpu(map[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* This incrementation unallocates the objectid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) le32_add_cpu(&map[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Did we unallocate the last member of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * odd sequence, and can shrink oids?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (map[i] == map[i + 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* shrink objectid map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) memmove(map + i, map + i + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) (sb_oid_cursize(rs) - i -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 2) * sizeof(__u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) RFALSE(sb_oid_cursize(rs) < 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) sb_oid_cursize(rs) > sb_oid_maxsize(rs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) sb_oid_cursize(rs), sb_oid_maxsize(rs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (objectid_to_release > le32_to_cpu(map[i]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) objectid_to_release < le32_to_cpu(map[i + 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* size of objectid map is not changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) le32_add_cpu(&map[i + 1], -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * JDM comparing two little-endian values for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * equality -- safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * objectid map must be expanded, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * there is no space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) PROC_INFO_INC(s, leaked_oid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* expand the objectid map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) memmove(map + i + 3, map + i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) (sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) map[i + 1] = cpu_to_le32(objectid_to_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) map[i + 2] = cpu_to_le32(objectid_to_release + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) i += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (long unsigned)objectid_to_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int reiserfs_convert_objectid_map_v1(struct super_block *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int cur_size = sb_oid_cursize(disk_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int old_max = sb_oid_maxsize(disk_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct reiserfs_super_block_v1 *disk_sb_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __le32 *objectid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) disk_sb_v1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) objectid_map = (__le32 *) (disk_sb_v1 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (cur_size > new_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * mark everyone used that was listed as free at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * the end of the objectid map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) objectid_map[new_size - 1] = objectid_map[cur_size - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) set_sb_oid_cursize(disk_sb, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* move the smaller objectid map past the end of the new super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) for (i = new_size - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) objectid_map[i + (old_max - new_size)] = objectid_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* set the max size so we don't overflow later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) set_sb_oid_maxsize(disk_sb, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Zero out label and generate random UUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) generate_random_uuid(disk_sb->s_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* finally, zero out the unused chunk of the new super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }