^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2006-2009 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is released under the LGPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/connector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dm-log-userspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "dm-log-userspace-transfer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static uint32_t dm_ulog_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Netlink/Connector is an unreliable protocol. How long should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * we wait for a response before assuming it was lost and retrying?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * (If we do receive a response after this time, it will be discarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * and the response to the resent request will be waited for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Pre-allocated space for speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DM_ULOG_PREALLOCED_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct cn_msg *prealloced_cn_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static struct dm_ulog_request *prealloced_ulog_tfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static struct cb_id ulog_cn_id = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .idx = CN_IDX_DM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .val = CN_VAL_DM_USERSPACE_LOG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static DEFINE_MUTEX(dm_ulog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct receiving_pkg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct completion complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) uint32_t seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) size_t *data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static DEFINE_SPINLOCK(receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct list_head receiving_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct cn_msg *msg = prealloced_cn_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) memset(msg, 0, sizeof(struct cn_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) msg->id.idx = ulog_cn_id.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) msg->id.val = ulog_cn_id.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) msg->ack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) msg->seq = tfr->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) r = cn_netlink_send(msg, 0, 0, gfp_any());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Parameters for this function can be either msg or tfr, but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * both. This function fills in the reply for a waiting request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * If just msg is given, then the reply is simply an ACK from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * that the request was received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Returns: 0 on success, -ENOENT on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct receiving_pkg *pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * The 'receiving_pkg' entries in this list are statically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * allocated on the stack in 'dm_consult_userspace'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Each process that is waiting for a reply from the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * space server will have an entry in this list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * We are safe to do it this way because the stack space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * is unique to each process, but still addressable by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * other processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) list_for_each_entry(pkg, &receiving_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (rtn_seq != pkg->seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) pkg->error = -msg->ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * If we are trying again, we will need to know our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * storage capacity. Otherwise, along with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * error code, we make explicit that we have no data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (pkg->error != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *(pkg->data_size) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } else if (tfr->data_size > *(pkg->data_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) DMERR("Insufficient space to receive package [%u] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) "(%u vs %zu)", tfr->request_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) tfr->data_size, *(pkg->data_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *(pkg->data_size) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pkg->error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pkg->error = tfr->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) memcpy(pkg->data, tfr->data, tfr->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *(pkg->data_size) = tfr->data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) complete(&pkg->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * This is the connector callback that delivers data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * that was sent from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spin_lock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (msg->len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) fill_pkg(msg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) else if (msg->len < sizeof(*tfr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) DMERR("Incomplete message received (expected %u, got %u): [%u]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) (unsigned)sizeof(*tfr), msg->len, msg->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) fill_pkg(NULL, tfr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_unlock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * dm_consult_userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @luid: log's local unique identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @request_type: found in include/linux/dm-log-userspace.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @data: data to tx to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @data_size: size of data in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @rdata: place to put return data from server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @rdata_size: value-result (amount of space given/amount of space used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * rdata_size is undefined on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Memory used to communicate with userspace is zero'ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * before populating to ensure that no unwanted bits leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * from kernel space to user-space. All userspace log communications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * between kernel and user space go through this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Returns: 0 on success, -EXXX on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) char *data, size_t data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) char *rdata, size_t *rdata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) size_t dummy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct dm_ulog_request *tfr = prealloced_ulog_tfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct receiving_pkg pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Given the space needed to hold the 'struct cn_msg' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * 'struct dm_ulog_request' - do we have enough payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * space remaining?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) DMINFO("Size of tfr exceeds preallocated size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!rdata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rdata_size = &dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) resend:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * We serialize the sending of requests so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * use the preallocated space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mutex_lock(&dm_ulog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) memcpy(tfr->uuid, uuid, DM_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) tfr->version = DM_ULOG_REQUEST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tfr->luid = luid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) tfr->seq = dm_ulog_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Must be valid request type (all other bits set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * zero). This reserves other bits for possible future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) tfr->data_size = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (data && data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) memcpy(tfr->data, data, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) memset(&pkg, 0, sizeof(pkg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) init_completion(&pkg.complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pkg.seq = tfr->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pkg.data_size = rdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pkg.data = rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_lock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) list_add(&(pkg.list), &receiving_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) spin_unlock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) r = dm_ulog_sendto_server(tfr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mutex_unlock(&dm_ulog_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) DMERR("Unable to send log request [%u] to userspace: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) request_type, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_lock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) list_del_init(&(pkg.list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) spin_unlock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) tmo = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_lock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) list_del_init(&(pkg.list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_unlock(&receiving_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!tmo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) DMWARN("[%s] Request timed out: [%u/%u] - retrying",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) (strlen(uuid) > 8) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (uuid + (strlen(uuid) - 8)) : (uuid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) request_type, pkg.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) r = pkg.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (r == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int dm_ulog_tfr_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void *prealloced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) INIT_LIST_HEAD(&receiving_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!prealloced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) prealloced_cn_msg = prealloced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kfree(prealloced_cn_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) void dm_ulog_tfr_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) cn_del_callback(&ulog_cn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) kfree(prealloced_cn_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }