^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Crude resource management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "iw_cxgb4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (c4iw_id_table_alloc(&rdev->resource.qid_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) rdev->lldi.vr->qp.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) rdev->lldi.vr->qp.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) rdev->lldi.vr->qp.size, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) for (i = rdev->lldi.vr->qp.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!(i & rdev->qpmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) c4iw_id_free(&rdev->resource.qid_table, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* nr_* must be power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 nr_pdid, u32 nr_srqt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) C4IW_ID_TABLE_F_RANDOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) goto tpt_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) err = c4iw_init_qid_table(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto qid_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) nr_pdid, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) goto pdid_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!nr_srqt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 1, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) nr_srqt, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) goto srq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) srq_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) c4iw_id_table_free(&rdev->resource.pdid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) pdid_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) c4iw_id_table_free(&rdev->resource.qid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) qid_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) c4iw_id_table_free(&rdev->resource.tpt_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) tpt_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * returns 0 if no resource available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 c4iw_get_resource(struct c4iw_id_table *id_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) entry = c4iw_id_alloc(id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (entry == (u32)(-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pr_debug("entry 0x%x\n", entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) c4iw_id_free(id_table, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct c4iw_qid_list *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mutex_lock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!list_empty(&uctx->cqids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) list_del(&entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) qid = entry->qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) qid = c4iw_get_resource(&rdev->resource.qid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rdev->stats.qid.cur += rdev->qpmask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) for (i = qid+1; i & rdev->qpmask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) entry->qid = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) list_add_tail(&entry->entry, &uctx->cqids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * now put the same ids on the qp list since they all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * map to the same db/gts page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) entry->qid = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) list_add_tail(&entry->entry, &uctx->qpids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) for (i = qid+1; i & rdev->qpmask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) entry->qid = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) list_add_tail(&entry->entry, &uctx->qpids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mutex_unlock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pr_debug("qid 0x%x\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (rdev->stats.qid.cur > rdev->stats.qid.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rdev->stats.qid.max = rdev->stats.qid.cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct c4iw_dev_ucontext *uctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct c4iw_qid_list *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pr_debug("qid 0x%x\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) entry->qid = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mutex_lock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) list_add_tail(&entry->entry, &uctx->cqids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mutex_unlock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct c4iw_qid_list *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) mutex_lock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!list_empty(&uctx->qpids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_del(&entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) qid = entry->qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) qid = c4iw_get_resource(&rdev->resource.qid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!qid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rdev->stats.qid.fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rdev->stats.qid.cur += rdev->qpmask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) for (i = qid+1; i & rdev->qpmask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) entry->qid = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) list_add_tail(&entry->entry, &uctx->qpids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * now put the same ids on the cq list since they all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * map to the same db/gts page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) entry->qid = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) list_add_tail(&entry->entry, &uctx->cqids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) for (i = qid + 1; i & rdev->qpmask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) entry->qid = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_add_tail(&entry->entry, &uctx->cqids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mutex_unlock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pr_debug("qid 0x%x\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (rdev->stats.qid.cur > rdev->stats.qid.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) rdev->stats.qid.max = rdev->stats.qid.cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct c4iw_dev_ucontext *uctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct c4iw_qid_list *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pr_debug("qid 0x%x\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) entry->qid = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) mutex_lock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_add_tail(&entry->entry, &uctx->qpids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mutex_unlock(&uctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void c4iw_destroy_resource(struct c4iw_resource *rscp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) c4iw_id_table_free(&rscp->tpt_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) c4iw_id_table_free(&rscp->qid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) c4iw_id_table_free(&rscp->pdid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * PBL Memory Manager. Uses Linux generic allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pr_debug("addr 0x%x size %d\n", (u32)addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rdev->stats.pbl.max = rdev->stats.pbl.cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) kref_get(&rdev->pbl_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rdev->stats.pbl.fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return (u32)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void destroy_pblpool(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct c4iw_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) gen_pool_destroy(rdev->pbl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) complete(&rdev->pbl_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pr_debug("addr 0x%x size %d\n", addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) kref_put(&rdev->pbl_kref, destroy_pblpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int c4iw_pblpool_create(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned pbl_start, pbl_chunk, pbl_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!rdev->pbl_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pbl_start = rdev->lldi.vr->pbl.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pbl_chunk = rdev->lldi.vr->pbl.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pbl_top = pbl_start + pbl_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) while (pbl_start < pbl_top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pr_debug("failed to add PBL chunk (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pbl_start, pbl_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pr_warn("Failed to add all PBL chunks (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pbl_start, pbl_top - pbl_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pbl_chunk >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pr_debug("added PBL chunk (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pbl_start, pbl_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pbl_start += pbl_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) kref_put(&rdev->pbl_kref, destroy_pblpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * RQT Memory Manager. Uses Linux generic allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pr_warn_ratelimited("%s: Out of RQT memory\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pci_name(rdev->lldi.pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rdev->stats.rqt.max = rdev->stats.rqt.cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) kref_get(&rdev->rqt_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rdev->stats.rqt.fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return (u32)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void destroy_rqtpool(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct c4iw_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) gen_pool_destroy(rdev->rqt_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) complete(&rdev->rqt_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pr_debug("addr 0x%x size %d\n", addr, size << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) kref_put(&rdev->rqt_kref, destroy_rqtpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned rqt_start, rqt_chunk, rqt_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!rdev->rqt_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * If SRQs are supported, then never use the first RQE from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * the RQT region. This is because HW uses RQT index 0 as NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (rdev->lldi.vr->srq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) skip = T4_RQT_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rqt_start = rdev->lldi.vr->rq.start + skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) rqt_chunk = rdev->lldi.vr->rq.size - skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) rqt_top = rqt_start + rqt_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) while (rqt_start < rqt_top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pr_debug("failed to add RQT chunk (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) rqt_start, rqt_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) pr_warn("Failed to add all RQT chunks (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rqt_start, rqt_top - rqt_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) rqt_chunk >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) pr_debug("added RQT chunk (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rqt_start, rqt_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rqt_start += rqt_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) kref_put(&rdev->rqt_kref, destroy_rqtpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) idx = c4iw_id_alloc(&rdev->resource.srq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) rdev->stats.srqt.fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) rdev->stats.srqt.cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (rdev->stats.srqt.cur > rdev->stats.srqt.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rdev->stats.srqt.max = rdev->stats.srqt.cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) c4iw_id_free(&rdev->resource.srq_table, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rdev->stats.srqt.cur--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * On-Chip QP Memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pr_debug("addr 0x%x size %d\n", (u32)addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return (u32)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pr_debug("addr 0x%x size %d\n", addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mutex_lock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) mutex_unlock(&rdev->stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned start, chunk, top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!rdev->ocqp_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) start = rdev->lldi.vr->ocq.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) chunk = rdev->lldi.vr->ocq.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) top = start + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) while (start < top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) chunk = min(top - start + 1, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pr_debug("failed to add OCQP chunk (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) start, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (chunk <= 1024 << MIN_OCQP_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) start, top - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) chunk >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) pr_debug("added OCQP chunk (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) start, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) start += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) gen_pool_destroy(rdev->ocqp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }