^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2018-2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/build_bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "ipa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "ipa_version.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "ipa_endpoint.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "ipa_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "ipa_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "ipa_mem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "ipa_cmd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "gsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "gsi_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * DOC: IPA Filter and Route Tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * The IPA has tables defined in its local shared memory that define filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * and routing rules. Each entry in these tables contains a 64-bit DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * address that refers to DRAM (system memory) containing a rule definition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * A rule consists of a contiguous block of 32-bit values terminated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * represents "no filtering" or "no routing," and is the reset value for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * filter or route table rules. Separate tables (both filter and route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * used for IPv4 and IPv6. Additionally, there can be hashed filter or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * route tables, which are used when a hash of message metadata matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Hashed operation is not supported by all IPA hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Each filter rule is associated with an AP or modem TX endpoint, though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * not all TX endpoints support filtering. The first 64-bit entry in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * filter table is a bitmap indicating which endpoints have entries in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * the table. The low-order bit (bit 0) in this bitmap represents a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * special global filter, which applies to all traffic. This is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * used in the current code. Bit 1, if set, indicates that there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * table. Bit 2, if set, indicates there is an entry for endpoint 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * and so on. Space is set aside in IPA local memory to hold as many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * filter table entries as might be required, but typically they are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * all used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * The AP initializes all entries in a filter table to refer to a "zero"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * entry. Once initialized the modem and AP update the entries for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * endpoints they "own" directly. Currently the AP does not use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * IPA filtering functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * IPA Filter Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * ----------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * (unused) | | (Unused space in filter table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * . . .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * (unused) | | (Unused space in filter table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * ----------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * The set of available route rules is divided about equally between the AP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * and modem. The AP initializes all entries in a route table to refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * a "zero entry". Once initialized, the modem and AP are responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * updating their own entries. All entries in a route table are usable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * though the AP currently does not use the IPA routing functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * IPA Route Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * ----------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * 1st modem route | 0x0001234500001100 | DMA address for first route rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * . . .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * . . .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * |--------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Last AP route | 0x0001234500002280 | DMA address for last route rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * ----------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* IPA hardware constrains filter and route tables alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Assignment of route table entries to the modem and AP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define IPA_ROUTE_MODEM_MIN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define IPA_ROUTE_MODEM_COUNT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define IPA_ROUTE_AP_COUNT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Filter or route rules consist of a set of 32-bit values followed by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * 32-bit all-zero rule list terminator. The "zero rule" is simply an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * all-zero rule followed by the list terminator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef IPA_VALIDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Check things that can be validated at build time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void ipa_table_validate_build(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* IPA hardware accesses memory 128 bytes at a time. Addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * referred to by entries in filter and route tables must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * aligned on 128-byte byte boundaries. The only rule address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * ever use is the "zero rule", and it's aligned at the base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * of a coherent DMA allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Filter and route tables contain DMA addresses that refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * filter or route rules. We use a fixed constant to represent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * the size of either type of table entry. Code in ipa_table_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * uses a pointer to __le64 to initialize table entriews.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* A "zero rule" is used to represent no filtering or no routing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * assumes that it can be written using a pointer to __le64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Impose a practical limit on the number of routes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* The modem must be allotted at least one route table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* But it can't have more than what is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) const struct ipa_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (route) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (ipv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) : &ipa->mem[IPA_MEM_V6_ROUTE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) : &ipa->mem[IPA_MEM_V4_ROUTE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (ipv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) : &ipa->mem[IPA_MEM_V6_FILTER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) : &ipa->mem[IPA_MEM_V4_FILTER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* mem->size >= size is sufficient, but we'll demand more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (mem->size == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Hashed table regions can be zero size if hashing is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (hashed && !mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ipv6 ? '6' : '4', hashed ? "hashed " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) route ? "route" : "filter", mem->size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Verify the filter and route table memory regions are the expected size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bool ipa_table_valid(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bool valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) valid = valid && ipa_table_valid_one(ipa, false, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) valid = valid && ipa_table_valid_one(ipa, false, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) valid = valid && ipa_table_valid_one(ipa, false, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) valid = valid && ipa_table_valid_one(ipa, false, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) valid = valid && ipa_table_valid_one(ipa, true, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) valid = valid && ipa_table_valid_one(ipa, true, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) valid = valid && ipa_table_valid_one(ipa, true, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) valid = valid && ipa_table_valid_one(ipa, true, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!filter_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dev_err(dev, "at least one filtering endpoint is required\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) count = hweight32(filter_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (count > IPA_FILTER_COUNT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) count, IPA_FILTER_COUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #else /* !IPA_VALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void ipa_table_validate_build(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #endif /* !IPA_VALIDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Zero entry count means no table, so just return a 0 address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Skip over the zero rule and possibly the filter mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) skip = filter_mask ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ipa->table_addr + skip * sizeof(*ipa->table_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u16 first, u16 count, const struct ipa_mem *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Nothing to do if the table memory regions is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) first++; /* skip over bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) size = count * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) addr = ipa_table_addr(ipa, false, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Reset entries in a single filter table belonging to either the AP or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * modem to refer to the zero entry. The memory region supplied will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * for the IPv4 and IPv6 non-hashed and hashed filter tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 ep_mask = ipa->filter_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 count = hweight32(ep_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) enum gsi_ee_id ee_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!mem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) trans = ipa_cmd_trans_alloc(ipa, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) "no transaction for %s filter reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) modem ? "modem" : "AP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) while (ep_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 endpoint_id = __ffs(ep_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct ipa_endpoint *endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ep_mask ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) endpoint = &ipa->endpoint[endpoint_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (endpoint->ee_id != ee_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Theoretically, each filter table could have more filter slots to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * update than the maximum number of commands in a transaction. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * we do each table separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int ipa_filter_reset(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) modem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) modem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* The AP routes and modem routes are each contiguous within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * table. We can update each table with a single command, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * won't exceed the per-transaction command limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int ipa_route_reset(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u16 first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) trans = ipa_cmd_trans_alloc(ipa, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dev_err(&ipa->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) "no transaction for %s route reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) modem ? "modem" : "AP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (modem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) first = IPA_ROUTE_MODEM_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) count = IPA_ROUTE_MODEM_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) first = IPA_ROUTE_AP_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) count = IPA_ROUTE_AP_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ipa_table_reset_add(trans, false, first, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) &ipa->mem[IPA_MEM_V4_ROUTE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ipa_table_reset_add(trans, false, first, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ipa_table_reset_add(trans, false, first, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) &ipa->mem[IPA_MEM_V6_ROUTE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ipa_table_reset_add(trans, false, first, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) void ipa_table_reset(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) const char *ee_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ee_name = modem ? "modem" : "AP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Report errors, but reset filter and route tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ret = ipa_filter_reset(ipa, modem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dev_err(dev, "error %d resetting filter table for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret, ee_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ret = ipa_route_reset(ipa, modem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dev_err(dev, "error %d resetting route table for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ret, ee_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int ipa_table_hash_flush(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* IPA version 4.2 does not support hashed tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (ipa->version == IPA_VERSION_4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) trans = ipa_cmd_trans_alloc(ipa, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ipa_cmd_register_write_add(trans, offset, val, val, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) enum ipa_cmd_opcode opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) const struct ipa_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) const struct ipa_mem *hash_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dma_addr_t hash_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u16 hash_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u16 hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) u16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* The number of filtering endpoints determines number of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * in the filter table. The hashed and non-hashed filter table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * will have the same number of entries. The size of the route
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * table region determines the number of entries it has.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Include one extra "slot" to hold the filter map itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) count = 1 + hweight32(ipa->filter_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) hash_count = hash_mem->size ? count : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) count = mem->size / IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) size = count * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) addr = ipa_table_addr(ipa, filter, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hash_addr = ipa_table_addr(ipa, filter, hash_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) hash_size, hash_mem->offset, hash_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int ipa_table_setup(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct gsi_trans *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) trans = ipa_cmd_trans_alloc(ipa, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) &ipa->mem[IPA_MEM_V4_ROUTE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) &ipa->mem[IPA_MEM_V6_ROUTE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) &ipa->mem[IPA_MEM_V4_FILTER],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) &ipa->mem[IPA_MEM_V6_FILTER],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) gsi_trans_commit_wait(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) void ipa_table_teardown(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Nothing to do */ /* XXX Maybe reset the tables? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @endpoint: Endpoint whose filter hash tuple should be zeroed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * Endpoint must be for the AP (not modem) and support filtering. Updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * the filter hash values without changing route ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u32 endpoint_id = endpoint->endpoint_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) val = ioread32(endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* Zero all filter-related fields, preserving the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) iowrite32(val, endpoint->ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void ipa_filter_config(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u32 ep_mask = ipa->filter_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* IPA version 4.2 has no hashed route tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (ipa->version == IPA_VERSION_4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) while (ep_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) u32 endpoint_id = __ffs(ep_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct ipa_endpoint *endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ep_mask ^= BIT(endpoint_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) endpoint = &ipa->endpoint[endpoint_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (endpoint->ee_id == ee_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ipa_filter_tuple_zero(endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void ipa_filter_deconfig(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static bool ipa_route_id_modem(u32 route_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return route_id >= IPA_ROUTE_MODEM_MIN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * @ipa: IPA pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @route_id: Route table entry whose hash tuple should be zeroed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Updates the route hash values without changing filter ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) val = ioread32(ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Zero all route-related fields, preserving the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) iowrite32(val, ipa->reg_virt + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static void ipa_route_config(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u32 route_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* IPA version 4.2 has no hashed route tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (ipa->version == IPA_VERSION_4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (ipa_route_id_modem(route_id) == modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ipa_route_tuple_zero(ipa, route_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static void ipa_route_deconfig(struct ipa *ipa, bool modem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) void ipa_table_config(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ipa_filter_config(ipa, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ipa_filter_config(ipa, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ipa_route_config(ipa, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ipa_route_config(ipa, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) void ipa_table_deconfig(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ipa_route_deconfig(ipa, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ipa_route_deconfig(ipa, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ipa_filter_deconfig(ipa, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ipa_filter_deconfig(ipa, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Initialize a coherent DMA allocation containing initialized filter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * route table data. This is used when initializing or resetting the IPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * filter or route table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * The first entry in a filter table contains a bitmap indicating which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * endpoints contain entries in the table. In addition to that first entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * entries are 64 bits wide, and (other than the bitmap) contain the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * address of a filter rule. A "zero rule" indicates no filtering, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * consists of 64 bits of zeroes. When a filter table is initialized (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * reset) its entries are made to refer to the zero rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Each entry in a route table is the DMA address of a routing rule. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * routing there is also a 64-bit "zero rule" that means no routing, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * when a route table is initialized or reset, its entries are made to refer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * to the zero rule. The zero rule is shared for route and filter tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Note that the IPA hardware requires a filter or route rule address to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * has a minimum alignment, and we place the zero rule at the base of that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * allocated space. In ipa_table_init() we verify the minimum DMA allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * meets our requirement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * +-------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * --> | zero rule |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * / |-------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * | | filter mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * |\ |-------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * | ---- zero rule address | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * |\ |-------------------| |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * | ... | whichever is greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * \ |-------------------| |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * ---- zero rule address | /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * +-------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int ipa_table_init(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) __le64 le_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) __le64 *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ipa_table_validate_build();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ipa->table_virt = virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ipa->table_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* First slot is the zero rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) *virt++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Next is the filter table bitmap. The "soft" bitmap value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * must be converted to the hardware representation by shifting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * it left one position. (Bit 0 repesents global filtering,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * which is possible but not used.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* All the rest contain the DMA address of the zero rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) le_addr = cpu_to_le64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *virt++ = le_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) void ipa_table_exit(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct device *dev = &ipa->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ipa->table_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ipa->table_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }