^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Intel Management Engine Interface (Intel MEI) Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "mei_dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "hbm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "client.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * mei_me_cl_init - initialize me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void mei_me_cl_init(struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) INIT_LIST_HEAD(&me_cl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) kref_init(&me_cl->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * mei_me_cl_get - increases me client refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Return: me client or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * mei_me_cl_release - free me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @ref: me_client refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void mei_me_cl_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct mei_me_client *me_cl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) container_of(ref, struct mei_me_client, refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) kfree(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * mei_me_cl_put - decrease me client refcount and free client if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void mei_me_cl_put(struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) kref_put(&me_cl->refcnt, mei_me_cl_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * __mei_me_cl_del - delete me client from the list and decrease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * reference counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Locking: dev->me_clients_rwsem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) list_del_init(&me_cl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mei_me_cl_put(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * mei_me_cl_del - delete me client from the list and decrease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * reference counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) down_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __mei_me_cl_del(dev, me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) up_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * mei_me_cl_add - add me client to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) down_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) list_add(&me_cl->list, &dev->me_clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) up_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * __mei_me_cl_by_uuid - locate me client by uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * increases ref count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @uuid: me client uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Return: me client or NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Locking: dev->me_clients_rwsem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const uuid_le *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mei_me_client *me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const uuid_le *pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) list_for_each_entry(me_cl, &dev->me_clients, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pn = &me_cl->props.protocol_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (uuid_le_cmp(*uuid, *pn) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return mei_me_cl_get(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * mei_me_cl_by_uuid - locate me client by uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * increases ref count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @uuid: me client uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Return: me client or NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Locking: dev->me_clients_rwsem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) const uuid_le *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct mei_me_client *me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) down_read(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) me_cl = __mei_me_cl_by_uuid(dev, uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) up_read(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * mei_me_cl_by_id - locate me client by client id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * increases ref count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @client_id: me client id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Return: me client or NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Locking: dev->me_clients_rwsem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct mei_me_client *__me_cl, *me_cl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) down_read(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_for_each_entry(__me_cl, &dev->me_clients, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (__me_cl->client_id == client_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) me_cl = mei_me_cl_get(__me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) up_read(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * increases ref count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @uuid: me client uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * @client_id: me client id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Return: me client or null if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Locking: dev->me_clients_rwsem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) const uuid_le *uuid, u8 client_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct mei_me_client *me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) const uuid_le *pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) list_for_each_entry(me_cl, &dev->me_clients, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pn = &me_cl->props.protocol_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (uuid_le_cmp(*uuid, *pn) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) me_cl->client_id == client_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return mei_me_cl_get(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * mei_me_cl_by_uuid_id - locate me client by client id and uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * increases ref count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @uuid: me client uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @client_id: me client id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Return: me client or null if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) const uuid_le *uuid, u8 client_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct mei_me_client *me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) down_read(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) up_read(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @uuid: me client uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct mei_me_client *me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dev_dbg(dev->dev, "remove %pUl\n", uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) down_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) me_cl = __mei_me_cl_by_uuid(dev, uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __mei_me_cl_del(dev, me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mei_me_cl_put(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) up_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @uuid: me client uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @id: me client id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct mei_me_client *me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) down_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) __mei_me_cl_del(dev, me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) mei_me_cl_put(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) up_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * mei_me_cl_rm_all - remove all me clients
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) void mei_me_cl_rm_all(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct mei_me_client *me_cl, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) down_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) __mei_me_cl_del(dev, me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) up_write(&dev->me_clients_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * mei_io_cb_free - free mei_cb_private related memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @cb: mei callback struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) void mei_io_cb_free(struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (cb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) list_del(&cb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) kfree(cb->buf.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) kfree(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * mei_tx_cb_queue - queue tx callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * @cb: mei callback struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @head: an instance of list to queue on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) list_add_tail(&cb->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) cb->cl->tx_cb_queued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * mei_tx_cb_dequeue - dequeue tx callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @cb: mei callback struct to dequeue and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!WARN_ON(cb->cl->tx_cb_queued == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) cb->cl->tx_cb_queued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @cl: mei client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * @fp: pointer to file structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct mei_cl_vtag *cl_vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (cl_vtag->fp == fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) cl_vtag->pending_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * mei_io_cb_init - allocate and initialize io callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @cl: mei client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @type: operation type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @fp: pointer to file structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * Return: mei_cl_cb pointer or NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) enum mei_cb_file_ops type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) cb = kzalloc(sizeof(*cb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) INIT_LIST_HEAD(&cb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) cb->fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cb->cl = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cb->buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cb->fop_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) cb->vtag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * mei_io_list_flush_cl - removes cbs belonging to the cl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @head: an instance of our list structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void mei_io_list_flush_cl(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) const struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct mei_cl_cb *cb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) list_for_each_entry_safe(cb, next, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (cl == cb->cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_del_init(&cb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (cb->fop_type == MEI_FOP_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @head: An instance of our list structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * @fp: file pointer (matching cb file object), may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void mei_io_tx_list_free_cl(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const struct mei_cl *cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct mei_cl_cb *cb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) list_for_each_entry_safe(cb, next, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (cl == cb->cl && (!fp || fp == cb->fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mei_tx_cb_dequeue(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * mei_io_list_free_fp - free cb from a list that matches file pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @head: io list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @fp: file pointer (matching cb file object), may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct mei_cl_cb *cb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) list_for_each_entry_safe(cb, next, head, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!fp || fp == cb->fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * mei_cl_free_pending - free pending cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static void mei_cl_free_pending(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @length: size of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * @fop_type: operation type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * @fp: associated file pointer (might be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * Return: cb on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) enum mei_cb_file_ops fop_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) cb = mei_io_cb_init(cl, fop_type, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!cb->buf.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cb->buf.size = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * and enqueuing of the control commands cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @length: size of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * @fop_type: operation type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * @fp: associated file pointer (might be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Return: cb on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) enum mei_cb_file_ops fop_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* for RX always allocate at least client's mtu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) length = max_t(size_t, length, mei_cl_mtu(cl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * mei_cl_read_cb - find this cl's callback in the read list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * for a specific file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * @fp: file pointer (matching cb file object), may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Return: cb on success, NULL if cb is not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct mei_cl_cb *ret_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spin_lock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) list_for_each_entry(cb, &cl->rd_completed, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!fp || fp == cb->fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret_cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return ret_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * mei_cl_flush_queues - flushes queue lists belonging to cl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @fp: file pointer (matching cb file object), may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cl_dbg(dev, cl, "remove list entry belonging to cl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* free pending and control cb only in final flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) mei_cl_free_pending(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_lock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mei_io_list_free_fp(&cl->rd_completed, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) spin_unlock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * mei_cl_init - initializes cl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * @cl: host client to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) memset(cl, 0, sizeof(*cl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) init_waitqueue_head(&cl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) init_waitqueue_head(&cl->rx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) init_waitqueue_head(&cl->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) init_waitqueue_head(&cl->ev_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) INIT_LIST_HEAD(&cl->vtag_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) spin_lock_init(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) INIT_LIST_HEAD(&cl->rd_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) INIT_LIST_HEAD(&cl->rd_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) INIT_LIST_HEAD(&cl->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) cl->writing_state = MEI_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) cl->state = MEI_FILE_UNINITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) cl->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * mei_cl_allocate - allocates cl structure and sets it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Return: The allocated file or NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct mei_cl *mei_cl_allocate(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct mei_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) cl = kmalloc(sizeof(*cl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mei_cl_init(cl, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * mei_cl_link - allocate host id in the host map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * -EINVAL on incorrect values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * -EMFILE if open count exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int mei_cl_link(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (id >= MEI_CLIENTS_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return -EMFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_err(dev->dev, "open_handle_count exceeded %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) MEI_MAX_OPEN_HANDLE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EMFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dev->open_handle_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) cl->host_client_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) list_add_tail(&cl->link, &dev->file_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) set_bit(id, dev->host_clients_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) cl->state = MEI_FILE_INITIALIZING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) cl_dbg(dev, cl, "link cl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * mei_cl_unlink - remove host client from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Return: always 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int mei_cl_unlink(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* don't shout on error exit path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (WARN_ON(!cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) cl_dbg(dev, cl, "unlink client");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (dev->open_handle_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dev->open_handle_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* never clear the 0 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (cl->host_client_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) clear_bit(cl->host_client_id, dev->host_clients_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) list_del_init(&cl->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cl->state = MEI_FILE_UNINITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cl->writing_state = MEI_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) WARN_ON(!list_empty(&cl->rd_completed) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) !list_empty(&cl->rd_pending) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) !list_empty(&cl->link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void mei_host_client_init(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mei_set_devstate(dev, MEI_DEV_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dev->reset_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) schedule_work(&dev->bus_rescan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dev_dbg(dev->dev, "rpm: autosuspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) pm_request_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * mei_hbuf_acquire - try to acquire host buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Return: true if host buffer was acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) bool mei_hbuf_acquire(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (mei_pg_state(dev) == MEI_PG_ON ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) mei_pg_in_transition(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dev_dbg(dev->dev, "device is in pg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!dev->hbuf_is_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) dev_dbg(dev->dev, "hbuf is not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev->hbuf_is_ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * mei_cl_wake_all - wake up readers, writers and event waiters so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * they can be interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static void mei_cl_wake_all(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* synchronized under device mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (waitqueue_active(&cl->rx_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) cl_dbg(dev, cl, "Waking up reading client!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) wake_up_interruptible(&cl->rx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* synchronized under device mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (waitqueue_active(&cl->tx_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) cl_dbg(dev, cl, "Waking up writing client!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) wake_up_interruptible(&cl->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* synchronized under device mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (waitqueue_active(&cl->ev_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) wake_up_interruptible(&cl->ev_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* synchronized under device mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (waitqueue_active(&cl->wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) wake_up(&cl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * mei_cl_set_disconnected - set disconnected state and clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * associated states and resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static void mei_cl_set_disconnected(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (cl->state == MEI_FILE_DISCONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) cl->state <= MEI_FILE_INITIALIZING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) cl->state = MEI_FILE_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mei_cl_wake_all(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) cl->rx_flow_ctrl_creds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) cl->tx_flow_ctrl_creds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) cl->timer_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!cl->me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (!WARN_ON(cl->me_cl->connect_count == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) cl->me_cl->connect_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (cl->me_cl->connect_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) cl->me_cl->tx_flow_ctrl_creds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) mei_me_cl_put(cl->me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cl->me_cl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!mei_me_cl_get(me_cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* only one connection is allowed for fixed address clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (me_cl->props.fixed_address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (me_cl->connect_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) mei_me_cl_put(me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cl->me_cl = me_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cl->state = MEI_FILE_CONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) cl->me_cl->connect_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * mei_cl_send_disconnect - send disconnect request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * @cb: callback block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Return: 0, OK; otherwise, error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ret = mei_hbm_cl_disconnect_req(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) cl->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) cl->state = MEI_FILE_DISCONNECT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) list_move_tail(&cb->list, &dev->ctrl_rd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) cl->timer_count = MEI_CONNECT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) mei_schedule_stall_timer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * mei_cl_irq_disconnect - processes close related operation from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * interrupt thread context - send disconnect request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * @cl: client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * @cb: callback block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @cmpl_list: complete list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * Return: 0, OK; otherwise, error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct list_head *cmpl_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) u32 msg_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) slots = mei_hbuf_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (slots < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if ((u32)slots < msg_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ret = mei_cl_send_disconnect(cl, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_move_tail(&cb->list, cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * __mei_cl_disconnect - disconnect host client from the me one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * internal function runtime pm has to be already acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * Return: 0 on success, <0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static int __mei_cl_disconnect(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) cl->state = MEI_FILE_DISCONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) rets = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (mei_hbuf_acquire(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) rets = mei_cl_send_disconnect(cl, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (rets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) cl_err(dev, cl, "failed to disconnect.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) wait_event_timeout(cl->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) cl->state == MEI_FILE_DISCONNECT_REPLY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) cl->state == MEI_FILE_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) rets = cl->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) cl->state != MEI_FILE_DISCONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) rets = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* we disconnect also on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mei_cl_set_disconnected(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * mei_cl_disconnect - disconnect host client from the me one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * Return: 0 on success, <0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int mei_cl_disconnect(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) cl_dbg(dev, cl, "disconnecting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!mei_cl_is_connected(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (mei_cl_is_fixed_address(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) mei_cl_set_disconnected(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (dev->dev_state == MEI_DEV_POWER_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) mei_cl_set_disconnected(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) rets = pm_runtime_get(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (rets < 0 && rets != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pm_runtime_put_noidle(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) cl_err(dev, cl, "rpm: get failed %d\n", rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) rets = __mei_cl_disconnect(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) cl_dbg(dev, cl, "rpm: autosuspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pm_runtime_put_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * mei_cl_is_other_connecting - checks if other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * client with the same me client id is connecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * @cl: private data of the file object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * Return: true if other client is connected, false - otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static bool mei_cl_is_other_connecting(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (cb->fop_type == MEI_FOP_CONNECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * mei_cl_send_connect - send connect request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * @cb: callback block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * Return: 0, OK; otherwise, error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ret = mei_hbm_cl_connect_req(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) cl->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) cl->state = MEI_FILE_DISCONNECT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) list_move_tail(&cb->list, &dev->ctrl_rd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) cl->timer_count = MEI_CONNECT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) mei_schedule_stall_timer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * mei_cl_irq_connect - send connect request in irq_thread context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * @cb: callback block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * @cmpl_list: complete list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * Return: 0, OK; otherwise, error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct list_head *cmpl_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) u32 msg_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (mei_cl_is_other_connecting(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) slots = mei_hbuf_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (slots < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if ((u32)slots < msg_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) rets = mei_cl_send_connect(cl, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) list_move_tail(&cb->list, cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * mei_cl_connect - connect host client to the me one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * @me_cl: me client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * @fp: pointer to file structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Return: 0 on success, <0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (WARN_ON(!cl || !cl->dev || !me_cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) rets = mei_cl_set_connecting(cl, me_cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) goto nortpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (mei_cl_is_fixed_address(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) cl->state = MEI_FILE_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) rets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) goto nortpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) rets = pm_runtime_get(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (rets < 0 && rets != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) pm_runtime_put_noidle(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) cl_err(dev, cl, "rpm: get failed %d\n", rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) goto nortpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (!cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) rets = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* run hbuf acquire last so we don't have to undo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rets = mei_cl_send_connect(cl, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) wait_event_timeout(cl->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) (cl->state == MEI_FILE_CONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) cl->state == MEI_FILE_DISCONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) cl->state == MEI_FILE_DISCONNECT_REPLY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!mei_cl_is_connected(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* ignore disconnect return valuue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * in case of failure reset will be invoked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) __mei_cl_disconnect(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) rets = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* timeout or something went really wrong */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!cl->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) cl->status = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) rets = cl->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) cl_dbg(dev, cl, "rpm: autosuspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) pm_runtime_put_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) nortpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!mei_cl_is_connected(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) mei_cl_set_disconnected(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * mei_cl_alloc_linked - allocate and link host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @dev: the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * Return: cl on success ERR_PTR on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct mei_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) cl = mei_cl_allocate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (!cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ret = mei_cl_link(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) kfree(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (WARN_ON(!cl || !cl->me_cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (cl->tx_flow_ctrl_creds > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (mei_cl_is_fixed_address(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (mei_cl_is_single_recv_buf(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (cl->me_cl->tx_flow_ctrl_creds > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * for a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * -EINVAL when ctrl credits are <= 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (WARN_ON(!cl || !cl->me_cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (mei_cl_is_fixed_address(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (mei_cl_is_single_recv_buf(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) cl->me_cl->tx_flow_ctrl_creds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) cl->tx_flow_ctrl_creds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * mei_cl_vtag_alloc - allocate and fill the vtag structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * @fp: pointer to file structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * @vtag: vm tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * * Pointer to allocated struct - on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * * ERR_PTR(-ENOMEM) on memory allocation failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct mei_cl_vtag *cl_vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!cl_vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) INIT_LIST_HEAD(&cl_vtag->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) cl_vtag->vtag = vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) cl_vtag->fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return cl_vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * mei_cl_fp_by_vtag - obtain the file pointer by vtag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * @vtag: vm tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * * A file pointer - on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct mei_cl_vtag *vtag_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) list_for_each_entry(vtag_l, &cl->vtag_map, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (vtag_l->vtag == vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return vtag_l->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * @vtag: vm tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct mei_cl_vtag *vtag_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) list_for_each_entry(vtag_l, &cl->vtag_map, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (vtag_l->vtag == vtag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) vtag_l->pending_read = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * mei_cl_read_vtag_add_fc - add flow control for next pending reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * in the vtag list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct mei_cl_vtag *cl_vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (cl_vtag->pending_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (mei_cl_enqueue_ctrl_wr_cb(cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) mei_cl_mtu(cl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) MEI_FOP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) cl_vtag->fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) cl->rx_flow_ctrl_creds++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * mei_cl_vt_support_check - check if client support vtags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * * 0 - supported, or not connected at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * * -EOPNOTSUPP - vtags are not supported by client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int mei_cl_vt_support_check(const struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!dev->hbm_f_vt_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (!cl->me_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * mei_cl_add_rd_completed - add read completed callback to list with lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * and vtag check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * @cb: callback block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) const struct file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!mei_cl_vt_support_check(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) fp = mei_cl_fp_by_vtag(cl, cb->vtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (IS_ERR(fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* client already disconnected, discarding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) cb->fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) mei_cl_reset_read_by_vtag(cl, cb->vtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) mei_cl_read_vtag_add_fc(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) spin_lock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) list_add_tail(&cb->list, &cl->rd_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) spin_unlock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * mei_cl_del_rd_completed - free read completed callback with lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * @cb: callback block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) spin_lock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) spin_unlock(&cl->rd_completed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * mei_cl_notify_fop2req - convert fop to proper request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * @fop: client notification start response command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * Return: MEI_HBM_NOTIFICATION_START/STOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (fop == MEI_FOP_NOTIFY_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return MEI_HBM_NOTIFICATION_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return MEI_HBM_NOTIFICATION_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * mei_cl_notify_req2fop - convert notification request top file operation type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * @req: hbm notification request type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * Return: MEI_FOP_NOTIFY_START/STOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (req == MEI_HBM_NOTIFICATION_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return MEI_FOP_NOTIFY_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return MEI_FOP_NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * mei_cl_irq_notify - send notification request in irq_thread context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * @cl: client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * @cb: callback block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * @cmpl_list: complete list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * Return: 0 on such and error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct list_head *cmpl_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) u32 msg_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) bool request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) slots = mei_hbuf_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (slots < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if ((u32)slots < msg_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) request = mei_cl_notify_fop2req(cb->fop_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ret = mei_hbm_cl_notify_req(dev, cl, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) cl->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) list_move_tail(&cb->list, cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) list_move_tail(&cb->list, &dev->ctrl_rd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * mei_cl_notify_request - send notification stop/start request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * @fp: associate request with file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * @request: 1 for start or 0 for stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * Return: 0 on such and error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) int mei_cl_notify_request(struct mei_cl *cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) const struct file *fp, u8 request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) enum mei_cb_file_ops fop_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (!dev->hbm_f_ev_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) cl_dbg(dev, cl, "notifications not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (!mei_cl_is_connected(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) rets = pm_runtime_get(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (rets < 0 && rets != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) pm_runtime_put_noidle(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) cl_err(dev, cl, "rpm: get failed %d\n", rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) fop_type = mei_cl_notify_req2fop(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (!cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) rets = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (mei_hbuf_acquire(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (mei_hbm_cl_notify_req(dev, cl, request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) rets = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) list_move_tail(&cb->list, &dev->ctrl_rd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) wait_event_timeout(cl->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) cl->notify_en == request ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cl->status ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) !mei_cl_is_connected(cl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (cl->notify_en != request && !cl->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) cl->status = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) rets = cl->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) cl_dbg(dev, cl, "rpm: autosuspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) pm_runtime_put_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * mei_cl_notify - raise notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) void mei_cl_notify(struct mei_cl *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (!cl || !cl->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (!cl->notify_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) cl_dbg(dev, cl, "notify event");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) cl->notify_ev = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (!mei_cl_bus_notify_event(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) wake_up_interruptible(&cl->ev_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (cl->ev_async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * mei_cl_notify_get - get or wait for notification event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * @block: this request is blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * @notify_ev: true if notification event was received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * Locking: called under "dev->device_lock" lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * Return: 0 on such and error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) *notify_ev = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (!dev->hbm_f_ev_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) cl_dbg(dev, cl, "notifications not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (!mei_cl_is_connected(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (cl->notify_ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (rets < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) *notify_ev = cl->notify_ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) cl->notify_ev = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * mei_cl_read_start - the start read client message function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * @length: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * @fp: pointer to file structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * Return: 0 on success, <0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct mei_cl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (!mei_cl_is_connected(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!mei_me_cl_is_active(cl->me_cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) cl_err(dev, cl, "no such me client\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (mei_cl_is_fixed_address(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /* HW currently supports only one pending read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (cl->rx_flow_ctrl_creds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) mei_cl_set_read_by_fp(cl, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) mei_cl_set_read_by_fp(cl, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) rets = pm_runtime_get(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (rets < 0 && rets != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) pm_runtime_put_noidle(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) cl_err(dev, cl, "rpm: get failed %d\n", rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) goto nortpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) rets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (mei_hbuf_acquire(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) rets = mei_hbm_cl_flow_control_req(dev, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (rets < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) list_move_tail(&cb->list, &cl->rd_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) cl->rx_flow_ctrl_creds++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) cl_dbg(dev, cl, "rpm: autosuspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) pm_runtime_put_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) nortpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) ext->type = MEI_EXT_HDR_VTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ext->ext_payload[0] = vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) ext->length = mei_data2slots(sizeof(*ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return ext->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * mei_msg_hdr_init - allocate and initialize mei message header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * @cb: message callback structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * Return: a pointer to initialized header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) size_t hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct mei_ext_meta_hdr *meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct mei_ext_hdr *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct mei_msg_hdr *mei_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) bool is_ext, is_vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* Extended header for vtag is attached only on the first fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) is_vtag = (cb->vtag && cb->buf_idx == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) is_ext = is_vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /* Compute extended header size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) hdr_len = sizeof(*mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (!is_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) goto setup_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) hdr_len += sizeof(*meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (is_vtag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) hdr_len += sizeof(*ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) setup_hdr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (!mei_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) mei_hdr->me_addr = mei_cl_me_id(cb->cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) mei_hdr->internal = cb->internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) mei_hdr->extended = is_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!is_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (is_vtag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) meta->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) mei_hdr->length = hdr_len - sizeof(*mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) return mei_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * mei_cl_irq_write - write a message to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * from the interrupt thread context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * @cl: client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * @cb: callback block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * @cmpl_list: complete list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * Return: 0, OK; otherwise error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct list_head *cmpl_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct mei_msg_data *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct mei_msg_hdr *mei_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) size_t hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) size_t hbuf_len, dr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) size_t buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) size_t data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int hbuf_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) u32 dr_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) u32 dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) int rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) bool first_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) const void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) buf = &cb->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) first_chunk = cb->buf_idx == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (rets < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (rets == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) buf_len = buf->size - cb->buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) data = buf->data + cb->buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) hbuf_slots = mei_hbuf_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (hbuf_slots < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) rets = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) dr_slots = mei_dma_ring_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) dr_len = mei_slots2data(dr_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) mei_hdr = mei_msg_hdr_init(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (IS_ERR(mei_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) rets = PTR_ERR(mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) mei_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) mei_hdr->extended, cb->vtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * Split the message only if we can write the whole host buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * otherwise wait for next time the host buffer is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (hdr_len + buf_len <= hbuf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) data_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) mei_hdr->msg_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) mei_hdr->dma_ring = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (buf_len > dr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) buf_len = dr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) mei_hdr->msg_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) data_len = sizeof(dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) dma_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) data = &dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) buf_len = hbuf_len - hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) data_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) kfree(mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) mei_hdr->length += data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (mei_hdr->dma_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) cl->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) cl->writing_state = MEI_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) cb->buf_idx += buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (first_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) rets = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (mei_hdr->msg_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) list_move_tail(&cb->list, &dev->write_waiting_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) kfree(mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) kfree(mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) cl->status = rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) list_move_tail(&cb->list, cmpl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * mei_cl_write - submit a write cb to mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * assumes device_lock is locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * @cl: host client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * @cb: write callback with filled data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * Return: number of bytes sent on success, <0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct mei_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct mei_msg_data *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct mei_msg_hdr *mei_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) size_t hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) size_t hbuf_len, dr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) size_t buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) size_t data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) int hbuf_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) u32 dr_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) u32 dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ssize_t rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) bool blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) const void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (WARN_ON(!cl || !cl->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (WARN_ON(!cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) buf = &cb->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) buf_len = buf->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) blocking = cb->blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) data = buf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) rets = pm_runtime_get(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (rets < 0 && rets != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) pm_runtime_put_noidle(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) cl_err(dev, cl, "rpm: get failed %zd\n", rets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) cb->buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) cl->writing_state = MEI_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) rets = mei_cl_tx_flow_ctrl_creds(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (rets < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) mei_hdr = mei_msg_hdr_init(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (IS_ERR(mei_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) rets = -PTR_ERR(mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) mei_hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) mei_hdr->extended, cb->vtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (rets == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) rets = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (!mei_hbuf_acquire(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) rets = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) hbuf_slots = mei_hbuf_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (hbuf_slots < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) rets = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) dr_slots = mei_dma_ring_empty_slots(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) dr_len = mei_slots2data(dr_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (hdr_len + buf_len <= hbuf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) data_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) mei_hdr->msg_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) mei_hdr->dma_ring = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (buf_len > dr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) buf_len = dr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) mei_hdr->msg_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) data_len = sizeof(dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) dma_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) data = &dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) buf_len = hbuf_len - hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) data_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) mei_hdr->length += data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (mei_hdr->dma_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) mei_dma_ring_write(dev, buf->data, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (rets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) cl->writing_state = MEI_WRITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) cb->buf_idx = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) /* restore return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) buf_len = buf->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (mei_hdr->msg_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) mei_tx_cb_enqueue(cb, &dev->write_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) mutex_unlock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) rets = wait_event_interruptible(cl->tx_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) cl->writing_state == MEI_WRITE_COMPLETE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) (!mei_cl_is_connected(cl)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) mutex_lock(&dev->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /* wait_event_interruptible returns -ERESTARTSYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (rets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) rets = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (cl->writing_state != MEI_WRITE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) rets = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) rets = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) cl_dbg(dev, cl, "rpm: autosuspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) pm_runtime_put_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) kfree(mei_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return rets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * mei_cl_complete - processes completed operation for a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * @cl: private data of the file object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * @cb: callback block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct mei_device *dev = cl->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) switch (cb->fop_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) case MEI_FOP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) mei_tx_cb_dequeue(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) cl->writing_state = MEI_WRITE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (waitqueue_active(&cl->tx_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) wake_up_interruptible(&cl->tx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) pm_request_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) case MEI_FOP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) mei_cl_add_rd_completed(cl, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (!mei_cl_is_fixed_address(cl) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) !WARN_ON(!cl->rx_flow_ctrl_creds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) cl->rx_flow_ctrl_creds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (!mei_cl_bus_rx_event(cl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) wake_up_interruptible(&cl->rx_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case MEI_FOP_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) case MEI_FOP_DISCONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) case MEI_FOP_NOTIFY_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) case MEI_FOP_NOTIFY_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (waitqueue_active(&cl->wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) wake_up(&cl->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) case MEI_FOP_DISCONNECT_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) mei_io_cb_free(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) mei_cl_set_disconnected(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) BUG_ON(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * mei_cl_all_disconnect - disconnect forcefully all connected clients
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * @dev: mei device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) void mei_cl_all_disconnect(struct mei_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct mei_cl *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) list_for_each_entry(cl, &dev->file_list, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) mei_cl_set_disconnected(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }