^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2009 by Martin Fuzzey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /* this file is part of imx21-hcd.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifdef CONFIG_DYNAMIC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifndef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static inline void create_debug_files(struct imx21 *imx21) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline void remove_debug_files(struct imx21 *imx21) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int status) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct urb *urb) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct urb *urb) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static inline void debug_etd_allocated(struct imx21 *imx21) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline void debug_etd_freed(struct imx21 *imx21) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline void debug_isoc_submitted(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int frame, struct td *td) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline void debug_isoc_completed(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int frame, struct td *td, int cc, int len) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static const char *dir_labels[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "TD 0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "OUT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "IN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) "TD 1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const char *speed_labels[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "Full",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) "Low"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static const char *format_labels[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) "Control",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) "ISO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) "Bulk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "Interrupt"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return usb_pipeisoc(urb->pipe) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) &imx21->isoc_stats : &imx21->nonisoc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) stats_for_urb(imx21, urb)->submitted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) stats_for_urb(imx21, urb)->completed_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) stats_for_urb(imx21, urb)->completed_ok++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) stats_for_urb(imx21, urb)->unlinked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) stats_for_urb(imx21, urb)->queue_etd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) stats_for_urb(imx21, urb)->queue_dmem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline void debug_etd_allocated(struct imx21 *imx21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) imx21->etd_usage.maximum = max(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ++(imx21->etd_usage.value),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) imx21->etd_usage.maximum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static inline void debug_etd_freed(struct imx21 *imx21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) imx21->etd_usage.value--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) imx21->dmem_usage.value += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) imx21->dmem_usage.maximum = max(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) imx21->dmem_usage.value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) imx21->dmem_usage.maximum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline void debug_dmem_freed(struct imx21 *imx21, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) imx21->dmem_usage.value -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void debug_isoc_submitted(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int frame, struct td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct debug_isoc_trace *trace = &imx21->isoc_trace[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) imx21->isoc_trace_index++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) trace->schedule_frame = td->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) trace->submit_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) trace->request_len = td->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) trace->td = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void debug_isoc_completed(struct imx21 *imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int frame, struct td *td, int cc, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct debug_isoc_trace *trace, *trace_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) trace = imx21->isoc_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (trace->td == td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) trace->done_frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) trace->done_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) trace->cc = cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trace->td = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (found && cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) trace_failed = &imx21->isoc_trace_failed[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) imx21->isoc_trace_index_failed++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) imx21->isoc_trace_index_failed %= ARRAY_SIZE(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) imx21->isoc_trace_failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *trace_failed = *trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ep->desc.bEndpointAddress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) usb_endpoint_type(&ep->desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) snprintf(buf, bufsize, "none");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static char *format_etd_dword0(u32 value, char *buf, int bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) snprintf(buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) value & 0x7F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) (value >> DW0_ENDPNT) & 0x0F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dir_labels[(value >> DW0_DIRECT) & 0x03],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) speed_labels[(value >> DW0_SPEED) & 0x01],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) format_labels[(value >> DW0_FORMAT) & 0x03],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (value >> DW0_HALTED) & 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int debug_status_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct imx21 *imx21 = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int etds_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int etds_sw_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int etds_hw_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int dmem_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int queued_for_etd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int queued_for_dmem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int dmem_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct etd_priv *etd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 etd_enable_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct imx21_dmem_area *dmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct ep_priv *ep_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_lock_irqsave(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (etd->alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) etds_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (etd->urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) etds_sw_busy++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (etd_enable_mask & (1<<i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) etds_hw_busy++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) list_for_each_entry(dmem, &imx21->dmem_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dmem_bytes += dmem->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dmem_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) queued_for_etd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) queued_for_dmem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) spin_unlock_irqrestore(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) "Frame: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) "ETDs allocated: %d/%d (max=%d)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) "ETDs in use sw: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "ETDs in use hw: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "DMEM allocated: %d/%d (max=%d)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) "DMEM blocks: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) "Queued waiting for ETD: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "Queued waiting for DMEM: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) etds_sw_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) etds_hw_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dmem_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) queued_for_etd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) queued_for_dmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) DEFINE_SHOW_ATTRIBUTE(debug_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int debug_dmem_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct imx21 *imx21 = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct imx21_dmem_area *dmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) char ep_text[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_lock_irqsave(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_for_each_entry(dmem, &imx21->dmem_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) "%04X: size=0x%X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) "ep=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dmem->offset, dmem->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) format_ep(dmem->ep, ep_text, sizeof(ep_text)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) spin_unlock_irqrestore(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) DEFINE_SHOW_ATTRIBUTE(debug_dmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int debug_etd_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct imx21 *imx21 = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct etd_priv *etd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) char buf[60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 dword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) spin_lock_irqsave(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int state = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct urb_priv *urb_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (etd->urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) urb_priv = etd->urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (urb_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) state = urb_priv->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) "etd_num: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) "ep: %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) "alloc: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) "len: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) "busy sw: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) "busy hw: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) "urb state: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) "current urb: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) format_ep(etd->ep, buf, sizeof(buf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) etd->alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) etd->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) etd->urb != NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) etd->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (j = 0; j < 4; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dword = etd_readl(imx21, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) switch (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) format_etd_dword0(dword, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) snprintf(buf, sizeof(buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) "cc=0X%02X", dword >> DW2_COMPCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *buf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) "dword %d: submitted=%08X cur=%08X [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) etd->submitted_dwords[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dword,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) seq_printf(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_unlock_irqrestore(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) DEFINE_SHOW_ATTRIBUTE(debug_etd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void debug_statistics_show_one(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) const char *name, struct debug_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) seq_printf(s, "%s:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) "submitted URBs: %lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) "completed OK: %lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) "completed failed: %lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) "unlinked: %lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) "queued for ETD: %lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) "queued for DMEM: %lu\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) stats->submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) stats->completed_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) stats->completed_failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) stats->unlinked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) stats->queue_etd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) stats->queue_dmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int debug_statistics_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct imx21 *imx21 = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_lock_irqsave(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) spin_unlock_irqrestore(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) DEFINE_SHOW_ATTRIBUTE(debug_statistics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void debug_isoc_show_one(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) const char *name, int index, struct debug_isoc_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) seq_printf(s, "%s %d:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) "cc=0X%02X\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) "scheduled frame %d (%d)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) "submitted frame %d (%d)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) "completed frame %d (%d)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) "requested length=%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) "completed length=%d\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) name, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) trace->cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) trace->schedule_frame, trace->schedule_frame & 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) trace->submit_frame, trace->submit_frame & 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) trace->done_frame, trace->done_frame & 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) trace->request_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) trace->done_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int debug_isoc_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct imx21 *imx21 = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct debug_isoc_trace *trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_lock_irqsave(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) trace = imx21->isoc_trace_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) debug_isoc_show_one(s, "isoc failed", i, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) trace = imx21->isoc_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) debug_isoc_show_one(s, "isoc", i, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_unlock_irqrestore(&imx21->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) DEFINE_SHOW_ATTRIBUTE(debug_isoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void create_debug_files(struct imx21 *imx21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) root = debugfs_create_dir(dev_name(imx21->dev), usb_debug_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) imx21->debug_root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) debugfs_create_file("dmem", S_IRUGO, root, imx21, &debug_dmem_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) debugfs_create_file("etd", S_IRUGO, root, imx21, &debug_etd_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) debugfs_create_file("statistics", S_IRUGO, root, imx21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) &debug_statistics_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) debugfs_create_file("isoc", S_IRUGO, root, imx21, &debug_isoc_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void remove_debug_files(struct imx21 *imx21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) debugfs_remove_recursive(imx21->debug_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)