^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Adaptec AAC series RAID controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (c) Copyright 2001 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * based on the old aacraid driver that is..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Adaptec aacraid device driver for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2000-2010 Adaptec, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Module Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * dpcsup.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Abstract: All DPC processing routines for the cyclone board occur here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "aacraid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * aac_response_normal - Handle command replies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @q: Queue to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * This DPC routine will be run when the adapter interrupts us to let us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * know there is a response on our normal priority queue. We will pull off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * all QE there are and wake up all the waiters before exiting. We will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * take a spinlock out on the queue before operating on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int aac_response_normal(struct aac_queue * q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct aac_dev * dev = q->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct aac_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct hw_fib * hwfib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct fib * fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long flags, mflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) spin_lock_irqsave(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Keep pulling response QEs off the response queue and waking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * up the waiters until there are no more QEs. We then return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * back to the system. If no response was requested we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * deallocate the Fib here and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) while(aac_consumer_get(dev, q, &entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int fast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 index = le32_to_cpu(entry->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) fast = index & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) fib = &dev->fibs[index >> 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) hwfib = fib->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) aac_consumer_free(dev, q, HostNormRespQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Remove this fib from the Outstanding I/O queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * But only if it has not already been timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * If the fib has been timed out already, then just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * continue. The caller has already been notified that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * the fib timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) spin_unlock_irqrestore(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) aac_fib_complete(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) aac_fib_free(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_lock_irqsave(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spin_unlock_irqrestore(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (fast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Doctor the fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) FIB_COUNTER_INCREMENT(aac_config.FibRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __le32 *pstatus = (__le32 *)hwfib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (*pstatus & cpu_to_le32(0xffff0000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *pstatus = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * NOTE: we cannot touch the fib after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * call, because it may have been deallocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) fib->callback(fib->callback_data, fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long flagv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_lock_irqsave(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!fib->done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) fib->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) complete(&fib->event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_unlock_irqrestore(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dev->management_fib_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (fib->done == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spin_lock_irqsave(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) fib->done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spin_unlock_irqrestore(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) aac_fib_complete(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) aac_fib_free(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) consumed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_irqsave(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (consumed > aac_config.peak_fibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) aac_config.peak_fibs = consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (consumed == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) aac_config.zero_fibs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_unlock_irqrestore(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * aac_command_normal - handle commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @q: queue to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * This DPC routine will be queued when the adapter interrupts us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * let us know there is a command on our normal priority queue. We will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * pull off all QE there are and wake up all the waiters before exiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * We will take a spinlock out on the queue before operating on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int aac_command_normal(struct aac_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct aac_dev * dev = q->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct aac_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) spin_lock_irqsave(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Keep pulling response QEs off the response queue and waking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * up the waiters until there are no more QEs. We then return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * back to the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) while(aac_consumer_get(dev, q, &entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct fib fibctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct hw_fib * hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct fib *fib = &fibctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) hw_fib = &dev->aif_base_va[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Allocate a FIB at all costs. For non queued stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * we can just use the stack so we are happy. We need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * a fib object in order to manage the linked lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (dev->aif_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) fib = &fibctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) memset(fib, 0, sizeof(struct fib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) INIT_LIST_HEAD(&fib->fiblink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) fib->type = FSAFS_NTC_FIB_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) fib->size = sizeof(struct fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) fib->hw_fib_va = hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) fib->data = hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) fib->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (dev->aif_thread && fib != &fibctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) list_add_tail(&fib->fiblink, &q->cmdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) aac_consumer_free(dev, q, HostNormCmdQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) wake_up_interruptible(&q->cmdready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) aac_consumer_free(dev, q, HostNormCmdQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) spin_unlock_irqrestore(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Set the status of this FIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) aac_fib_adapter_complete(fib, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_lock_irqsave(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_unlock_irqrestore(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * aac_aif_callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @context: the context set in the fib - here it is scsi cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @fibptr: pointer to the fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Handles the AIFs - new method (SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void aac_aif_callback(void *context, struct fib * fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct fib *fibctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct aac_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct aac_aifcmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) fibctx = (struct fib *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) BUG_ON(fibptr == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dev = fibptr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if ((fibptr->hw_fib_va->header.XferState &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cpu_to_le32(NoMoreAifDataAvailable)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dev->sa_firmware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) aac_fib_complete(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) aac_fib_free(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) aac_fib_init(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cmd = (struct aac_aifcmd *) fib_data(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cmd->command = cpu_to_le32(AifReqEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) aac_fib_send(AifRequest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) fibctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) FsaNormal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) (fib_callback)aac_aif_callback, fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * aac_intr_normal - Handle command replies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @dev: Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @index: completion reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * This DPC routine will be run when the adapter interrupts us to let us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * know there is a response on our normal priority queue. We will pull off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * all QE there are and wake up all the waiters before exiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int isFastResponse, struct hw_fib *aif_fib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned long mflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (isAif == 1) { /* AIF - common */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct hw_fib * hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct fib * fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Allocate a FIB. For non queued stuff we can just use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * the stack so we are happy. We need a fib object in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * manage the linked lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if ((!dev->aif_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) kfree (fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (dev->sa_firmware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) fib->hbacmd_size = index; /* store event type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else if (aif_fib != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) memcpy(hw_fib, (struct hw_fib *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) (((uintptr_t)(dev->regs.sa)) + index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sizeof(struct hw_fib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) INIT_LIST_HEAD(&fib->fiblink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) fib->type = FSAFS_NTC_FIB_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) fib->size = sizeof(struct fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) fib->hw_fib_va = hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) fib->data = hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) fib->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_irqsave(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) list_add_tail(&fib->fiblink, &q->cmdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) wake_up_interruptible(&q->cmdready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_unlock_irqrestore(q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) } else if (isAif == 2) { /* AIF - new (SRC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct fib *fibctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct aac_aifcmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) fibctx = aac_fib_alloc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!fibctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) aac_fib_init(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) cmd = (struct aac_aifcmd *) fib_data(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cmd->command = cpu_to_le32(AifReqEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return aac_fib_send(AifRequest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) fibctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) FsaNormal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (fib_callback)aac_aif_callback, fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct fib *fib = &dev->fibs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int start_callback = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Remove this fib from the Outstanding I/O queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * But only if it has not already been timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * If the fib has been timed out already, then just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * continue. The caller has already been notified that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * the fib timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) aac_fib_complete(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) aac_fib_free(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) FIB_COUNTER_INCREMENT(aac_config.FibRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (isFastResponse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (fib->callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) start_callback = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned long flagv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dprintk((KERN_INFO "event_wait up\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) spin_lock_irqsave(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (fib->done == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) fib->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) completed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) fib->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) complete(&fib->event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_unlock_irqrestore(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dev->management_fib_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) spin_unlock_irqrestore(&dev->manage_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) aac_fib_complete(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct hw_fib *hwfib = fib->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (isFastResponse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Doctor the fib */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) hwfib->header.XferState |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cpu_to_le32(AdapterProcessed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (hwfib->header.Command ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) cpu_to_le16(NuFileSystem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) __le32 *pstatus = (__le32 *)hwfib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (*pstatus & cpu_to_le32(0xffff0000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) *pstatus = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (hwfib->header.XferState &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cpu_to_le32(NoResponseExpected | Async)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (hwfib->header.XferState & cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) NoResponseExpected)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) FIB_COUNTER_INCREMENT(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) aac_config.NoResponseRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) FIB_COUNTER_INCREMENT(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) aac_config.AsyncRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) start_callback = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned long flagv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dprintk((KERN_INFO "event_wait up\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) spin_lock_irqsave(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (fib->done == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) fib->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) completed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) fib->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) complete(&fib->event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock_irqrestore(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev->management_fib_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spin_unlock_irqrestore(&dev->manage_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) aac_fib_complete(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (start_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * NOTE: we cannot touch the fib after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * call, because it may have been deallocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (likely(fib->callback && fib->callback_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fib->callback(fib->callback_data, fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) aac_fib_complete(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) aac_fib_free(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }