^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2005 Mike Isely <isely@pobox.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "pvrusb2-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "pvrusb2-debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define BUFFER_SIG 0x47653271
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) // #define SANITY_CHECK_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef SANITY_CHECK_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define BUFFER_CHECK(bp) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if ((bp)->signature != BUFFER_SIG) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) "Buffer %p is bad at %s:%d", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) (bp), __FILE__, __LINE__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) pvr2_buffer_describe(bp, "BadSig"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) BUG(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define BUFFER_CHECK(bp) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct pvr2_stream {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Buffers queued for reading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct list_head queued_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int q_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int q_bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Buffers with retrieved data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct list_head ready_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int r_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int r_bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Buffers available for use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct list_head idle_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned int i_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int i_bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Pointers to all buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct pvr2_buffer **buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Array size of buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int buffer_slot_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Total buffers actually in circulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int buffer_total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Designed number of buffers to be in circulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int buffer_target_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Executed when ready list become non-empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pvr2_stream_callback callback_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void *callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Context for transfer endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct usb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Overhead for mutex enforcement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) spinlock_t list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Tracking state for tolerating errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned int fail_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int fail_tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int buffers_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int buffers_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int bytes_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct pvr2_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) enum pvr2_buffer_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void *ptr; /* Pointer to storage area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int max_count; /* Size of storage area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int used_count; /* Amount of valid data in storage area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int status; /* Transfer result status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct pvr2_stream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct list_head list_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct urb *purb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) switch (st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case pvr2_buffer_state_none: return "none";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case pvr2_buffer_state_idle: return "idle";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) case pvr2_buffer_state_queued: return "queued";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case pvr2_buffer_state_ready: return "ready";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef SANITY_CHECK_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void pvr2_buffer_describe(struct pvr2_buffer *bp, const char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pvr2_trace(PVR2_TRACE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (msg ? " " : ""),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) (msg ? msg : ""),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) (bp ? bp->id : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (bp ? bp->status : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) (bp ? bp->stream : NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (bp ? bp->purb : NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) (bp ? bp->signature : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif /* SANITY_CHECK_BUFFERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void pvr2_buffer_remove(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned int *cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned int *bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int ccnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct pvr2_stream *sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) switch (bp->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) case pvr2_buffer_state_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cnt = &sp->i_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) bcnt = &sp->i_bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ccnt = bp->max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) case pvr2_buffer_state_queued:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cnt = &sp->q_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) bcnt = &sp->q_bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ccnt = bp->max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) case pvr2_buffer_state_ready:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) cnt = &sp->r_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bcnt = &sp->r_bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ccnt = bp->used_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) list_del_init(&bp->list_overhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (*cnt)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) (*bcnt) -= ccnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) "/*---TRACE_FLOW---*/ bufferPool %8s dec cap=%07d cnt=%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pvr2_buffer_state_decode(bp->state), *bcnt, *cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bp->state = pvr2_buffer_state_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) BUFFER_CHECK(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pvr2_buffer_state_decode(pvr2_buffer_state_none));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pvr2_buffer_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) BUFFER_CHECK(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pvr2_buffer_state_decode(pvr2_buffer_state_ready));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) fl = (sp->r_count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pvr2_buffer_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) list_add_tail(&bp->list_overhead, &sp->ready_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bp->state = pvr2_buffer_state_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (sp->r_count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) sp->r_bcount += bp->used_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sp->r_bcount, sp->r_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) BUFFER_CHECK(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pvr2_buffer_state_decode(pvr2_buffer_state_idle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pvr2_buffer_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) list_add_tail(&bp->list_overhead, &sp->idle_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bp->state = pvr2_buffer_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) (sp->i_count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) sp->i_bcount += bp->max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sp->i_bcount, sp->i_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) BUFFER_CHECK(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pvr2_buffer_state_decode(pvr2_buffer_state_queued));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pvr2_buffer_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) list_add_tail(&bp->list_overhead, &sp->queued_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bp->state = pvr2_buffer_state_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) (sp->q_count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sp->q_bcount += bp->max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pvr2_buffer_state_decode(bp->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) sp->q_bcount, sp->q_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (bp->state == pvr2_buffer_state_queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) usb_kill_urb(bp->purb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int pvr2_buffer_init(struct pvr2_buffer *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct pvr2_stream *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) memset(bp, 0, sizeof(*bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bp->signature = BUFFER_SIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bp->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pvr2_trace(PVR2_TRACE_BUF_POOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) "/*---TRACE_FLOW---*/ bufferInit %p stream=%p", bp, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bp->stream = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bp->state = pvr2_buffer_state_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) INIT_LIST_HEAD(&bp->list_overhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bp->purb = usb_alloc_urb(0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (! bp->purb) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #ifdef SANITY_CHECK_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pvr2_buffer_describe(bp, "create");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void pvr2_buffer_done(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #ifdef SANITY_CHECK_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pvr2_buffer_describe(bp, "delete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pvr2_buffer_wipe(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pvr2_buffer_set_none(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) bp->signature = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) bp->stream = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) usb_free_urb(bp->purb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferDone %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int pvr2_stream_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned int scnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Allocate buffers pointer array in multiples of 32 entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (cnt == sp->buffer_total_count) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pvr2_trace(PVR2_TRACE_BUF_POOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) "/*---TRACE_FLOW---*/ poolResize stream=%p cur=%d adj=%+d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sp->buffer_total_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) cnt-sp->buffer_total_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) scnt = cnt & ~0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (cnt > scnt) scnt += 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (cnt > sp->buffer_total_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (scnt > sp->buffer_slot_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct pvr2_buffer **nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) nb = kmalloc_array(scnt, sizeof(*nb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!nb) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (sp->buffer_slot_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) memcpy(nb, sp->buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) sp->buffer_slot_count * sizeof(*nb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kfree(sp->buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sp->buffers = nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sp->buffer_slot_count = scnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) while (sp->buffer_total_count < cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct pvr2_buffer *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bp = kmalloc(sizeof(*bp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!bp) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ret = pvr2_buffer_init(bp, sp, sp->buffer_total_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) kfree(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sp->buffers[sp->buffer_total_count] = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) (sp->buffer_total_count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pvr2_buffer_set_idle(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) while (sp->buffer_total_count > cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct pvr2_buffer *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) bp = sp->buffers[sp->buffer_total_count - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sp->buffers[sp->buffer_total_count - 1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (sp->buffer_total_count)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pvr2_buffer_done(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kfree(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (scnt < sp->buffer_slot_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct pvr2_buffer **nb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (scnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) nb = kmemdup(sp->buffers, scnt * sizeof(*nb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!nb) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) kfree(sp->buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) sp->buffers = nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sp->buffer_slot_count = scnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct pvr2_buffer *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (sp->buffer_total_count == sp->buffer_target_count) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pvr2_trace(PVR2_TRACE_BUF_POOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) "/*---TRACE_FLOW---*/ poolCheck stream=%p cur=%d tgt=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) sp, sp->buffer_total_count, sp->buffer_target_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (sp->buffer_total_count < sp->buffer_target_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return pvr2_stream_buffer_count(sp, sp->buffer_target_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (bp->state != pvr2_buffer_state_idle) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pvr2_stream_buffer_count(sp, sp->buffer_total_count - cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct list_head *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct pvr2_buffer *bp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) while ((lp = sp->queued_list.next) != &sp->queued_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bp1 = list_entry(lp, struct pvr2_buffer, list_overhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pvr2_buffer_wipe(bp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* At this point, we should be guaranteed that no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) completion callback may happen on this buffer. But it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) possible that it might have completed after we noticed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) it but before we wiped it. So double check its status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) here first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (bp1->state != pvr2_buffer_state_queued) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pvr2_buffer_set_idle(bp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (sp->buffer_total_count != sp->buffer_target_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pvr2_stream_achieve_buffer_count(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void pvr2_stream_init(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) spin_lock_init(&sp->list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mutex_init(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) INIT_LIST_HEAD(&sp->queued_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) INIT_LIST_HEAD(&sp->ready_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) INIT_LIST_HEAD(&sp->idle_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void pvr2_stream_done(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mutex_lock(&sp->mutex); do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pvr2_stream_internal_flush(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pvr2_stream_buffer_count(sp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) } while (0); mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void buffer_complete(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct pvr2_buffer *bp = urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) BUFFER_CHECK(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) bp->used_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) bp->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) bp, urb->status, urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if ((!(urb->status)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) (urb->status == -ENOENT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) (urb->status == -ECONNRESET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) (urb->status == -ESHUTDOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) (sp->buffers_processed)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) sp->bytes_processed += urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) bp->used_count = urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (sp->fail_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pvr2_trace(PVR2_TRACE_TOLERANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) "stream %p transfer ok - fail count reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) sp->fail_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) } else if (sp->fail_count < sp->fail_tolerance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) // We can tolerate this error, because we're below the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) // threshold...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) (sp->fail_count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) (sp->buffers_failed)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) pvr2_trace(PVR2_TRACE_TOLERANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) "stream %p ignoring error %d - fail count increased to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) sp, urb->status, sp->fail_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) (sp->buffers_failed)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) bp->status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pvr2_buffer_set_ready(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (sp->callback_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sp->callback_func(sp->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct pvr2_stream *pvr2_stream_create(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sp = kzalloc(sizeof(*sp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!sp) return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_create: sp=%p", sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) pvr2_stream_init(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void pvr2_stream_destroy(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!sp) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_destroy: sp=%p", sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pvr2_stream_done(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kfree(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) void pvr2_stream_setup(struct pvr2_stream *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct usb_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned int tolerance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) mutex_lock(&sp->mutex); do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pvr2_stream_internal_flush(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) sp->endpoint = endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) sp->fail_tolerance = tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } while (0); mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) void pvr2_stream_set_callback(struct pvr2_stream *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pvr2_stream_callback func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) mutex_lock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sp->callback_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) sp->callback_func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) void pvr2_stream_get_stats(struct pvr2_stream *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct pvr2_stream_stats *stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int zero_counts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) stats->buffers_in_queue = sp->q_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) stats->buffers_in_idle = sp->i_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) stats->buffers_in_ready = sp->r_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) stats->buffers_processed = sp->buffers_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) stats->buffers_failed = sp->buffers_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) stats->bytes_processed = sp->bytes_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (zero_counts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sp->buffers_processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sp->buffers_failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sp->bytes_processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Query / set the nominal buffer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return sp->buffer_target_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int pvr2_stream_set_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (sp->buffer_target_count == cnt) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mutex_lock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) sp->buffer_target_count = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = pvr2_stream_achieve_buffer_count(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct list_head *lp = sp->idle_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (lp == &sp->idle_list) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return list_entry(lp, struct pvr2_buffer, list_overhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct list_head *lp = sp->ready_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (lp == &sp->ready_list) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return list_entry(lp, struct pvr2_buffer, list_overhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (id < 0) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (id >= sp->buffer_total_count) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return sp->buffers[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return sp->r_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) void pvr2_stream_kill(struct pvr2_stream *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct pvr2_buffer *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) mutex_lock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) pvr2_stream_internal_flush(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) pvr2_buffer_set_idle(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (sp->buffer_total_count != sp->buffer_target_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pvr2_stream_achieve_buffer_count(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int pvr2_buffer_queue(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #undef SEED_BUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #ifdef SEED_BUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!bp) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) mutex_lock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) pvr2_buffer_wipe(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (!sp->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pvr2_buffer_set_queued(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #ifdef SEED_BUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) for (idx = 0; idx < (bp->max_count) / 4; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) val = bp->id << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) val |= idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ((unsigned int *)(bp->ptr))[idx] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) bp->status = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) usb_fill_bulk_urb(bp->purb, // struct urb *urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) sp->dev, // struct usb_device *dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) // endpoint (below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) usb_rcvbulkpipe(sp->dev, sp->endpoint),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) bp->ptr, // void *transfer_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) bp->max_count, // int buffer_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) buffer_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) usb_submit_urb(bp->purb, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int pvr2_buffer_set_buffer(struct pvr2_buffer *bp, void *ptr, unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct pvr2_stream *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!bp) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sp = bp->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) mutex_lock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_lock_irqsave(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (bp->state != pvr2_buffer_state_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bp->ptr = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) bp->stream->i_bcount -= bp->max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) bp->max_count = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) bp->stream->i_bcount += bp->max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) pvr2_trace(PVR2_TRACE_BUF_FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) "/*---TRACE_FLOW---*/ bufferPool %8s cap cap=%07d cnt=%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pvr2_buffer_state_decode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pvr2_buffer_state_idle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) bp->stream->i_bcount, bp->stream->i_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) spin_unlock_irqrestore(&sp->list_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mutex_unlock(&sp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return bp->used_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int pvr2_buffer_get_status(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return bp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int pvr2_buffer_get_id(struct pvr2_buffer *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return bp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }