^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * 8253/8254 interval timer emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2004 Fabrice Bellard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2006 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2007 Keir Fraser, XenSource Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2008 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright 2009 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * of this software and associated documentation files (the "Software"), to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * in the Software without restriction, including without limitation the rights
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * copies of the Software, and to permit persons to whom the Software is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Sheng Yang <sheng.yang@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Based on QEMU and Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define pr_fmt(fmt) "pit: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "ioapic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "i8254.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifndef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define mod_64(x, y) ((x) % (y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define RW_STATE_LSB 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define RW_STATE_MSB 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define RW_STATE_WORD0 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define RW_STATE_WORD1 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) switch (c->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* XXX: just disable/enable counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Restart counting on rising edge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (c->gate < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) c->count_load_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) c->gate = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int pit_get_gate(struct kvm_pit *pit, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return pit->pit_state.channels[channel].gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static s64 __kpit_elapsed(struct kvm_pit *pit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) s64 elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ktime_t remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct kvm_kpit_state *ps = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (!ps->period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * The Counter does not stop when it reaches zero. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the highest count, either FFFF hex for binary counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * or 9999 for BCD counting, and continues counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Modes 2 and 3 are periodic; the Counter reloads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * itself with the initial count and continues counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) remaining = hrtimer_get_remaining(&ps->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) elapsed = ps->period - ktime_to_ns(remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (channel == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return __kpit_elapsed(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int pit_get_count(struct kvm_pit *pit, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) s64 d, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) t = kpit_elapsed(pit, c, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) switch (c->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) counter = (c->count - d) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* XXX: may be incorrect for odd counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) counter = c->count - (mod_64((2 * d), c->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) counter = c->count - mod_64(d, c->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int pit_get_out(struct kvm_pit *pit, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) s64 d, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) t = kpit_elapsed(pit, c, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) switch (c->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) out = (d >= c->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) out = (d < c->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) out = ((mod_64(d, c->count) == 0) && (d != 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) out = (d == c->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void pit_latch_count(struct kvm_pit *pit, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!c->count_latched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) c->latched_count = pit_get_count(pit, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) c->count_latched = c->rw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void pit_latch_status(struct kvm_pit *pit, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!c->status_latched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* TODO: Return NULL COUNT (bit 6). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) c->status = ((pit_get_out(pit, channel) << 7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) (c->rw_mode << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) (c->mode << 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) c->bcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) c->status_latched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return container_of(ps, struct kvm_pit, pit_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) irq_ack_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct kvm_pit *pit = pit_state_to_pit(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) atomic_set(&ps->irq_ack, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* irq_ack should be set before pending is read. Order accesses with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (atomic_dec_if_positive(&ps->pending) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) kthread_queue_work(pit->worker, &pit->expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct kvm_pit *pit = vcpu->kvm->arch.vpit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct hrtimer *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!kvm_vcpu_is_bsp(vcpu) || !pit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) timer = &pit->pit_state.timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) mutex_lock(&pit->pit_state.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (hrtimer_cancel(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mutex_unlock(&pit->pit_state.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void destroy_pit_timer(struct kvm_pit *pit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) hrtimer_cancel(&pit->pit_state.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kthread_flush_work(&pit->expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void pit_do_work(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct kvm *kvm = pit->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct kvm_kpit_state *ps = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Provides NMI watchdog support via Virtual Wire mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * The route is: PIT -> LVT0 in NMI mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Note: Our Virtual Wire implementation does not follow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * the MP specification. We propagate a PIT interrupt to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * VCPUs and only when LVT0 is in NMI mode. The interrupt can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * also be simultaneously delivered through PIC and IOAPIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) kvm_for_each_vcpu(i, vcpu, kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kvm_apic_nmi_wd_deliver(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct kvm_pit *pt = pit_state_to_pit(ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (atomic_read(&ps->reinject))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) atomic_inc(&ps->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) kthread_queue_work(pt->worker, &pt->expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (ps->is_periodic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) hrtimer_add_expires_ns(&ps->timer, ps->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) atomic_set(&pit->pit_state.pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) atomic_set(&pit->pit_state.irq_ack, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct kvm_kpit_state *ps = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct kvm *kvm = pit->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (atomic_read(&ps->reinject) == reinject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * AMD SVM AVIC accelerates EOI write and does not trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * This cause in-kernel PIT re-inject mode to fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * since it checks ps->irq_ack before kvm_set_irq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * and relies on the ack notifier to timely queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * the pt->worker work iterm and reinject the missed tick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * So, deactivate APICv when PIT is in reinject mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (reinject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kvm_request_apicv_update(kvm, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) APICV_INHIBIT_REASON_PIT_REINJ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* The initial state is preserved while ps->reinject == 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) kvm_pit_reset_reinject(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) kvm_request_apicv_update(kvm, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) APICV_INHIBIT_REASON_PIT_REINJ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) atomic_set(&ps->reinject, reinject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct kvm_kpit_state *ps = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct kvm *kvm = pit->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) s64 interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!ioapic_in_kernel(kvm) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pr_debug("create pit timer, interval is %llu nsec\n", interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* TODO The new value only affected after the retriggered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) hrtimer_cancel(&ps->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) kthread_flush_work(&pit->expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ps->period = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ps->is_periodic = is_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) kvm_pit_reset_reinject(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Do not allow the guest to program periodic timers with small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * interval, since the hrtimers are not throttled by the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (ps->is_periodic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) s64 min_period = min_timer_period_us * 1000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ps->period < min_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_info_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) "kvm: requested %lld ns "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) "i8254 timer period limited to %lld ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ps->period, min_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ps->period = min_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct kvm_kpit_state *ps = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pr_debug("load_count val is %u, channel is %d\n", val, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * The largest possible initial count is 0; this is equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * to 216 for binary counting and 104 for BCD counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) val = 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ps->channels[channel].count = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (channel != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ps->channels[channel].count_load_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Two types of timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * mode 1 is one shot, mode 2 is period, otherwise del timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) switch (ps->channels[0].mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* FIXME: enhance mode 4 precision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) create_pit_timer(pit, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) create_pit_timer(pit, val, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) destroy_pit_timer(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int hpet_legacy_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u8 saved_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (hpet_legacy_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* save existing mode for later reenablement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) WARN_ON(channel != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) saved_mode = pit->pit_state.channels[0].mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) pit->pit_state.channels[0].mode = 0xff; /* disable timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pit_load_count(pit, channel, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pit->pit_state.channels[0].mode = saved_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pit_load_count(pit, channel, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return container_of(dev, struct kvm_pit, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return container_of(dev, struct kvm_pit, speaker_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static inline int pit_in_range(gpa_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return ((addr >= KVM_PIT_BASE_ADDRESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int pit_ioport_write(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) gpa_t addr, int len, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct kvm_pit *pit = dev_to_pit(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct kvm_kpit_state *pit_state = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int channel, access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct kvm_kpit_channel_state *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) u32 val = *(u32 *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!pit_in_range(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) val &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) addr &= KVM_PIT_CHANNEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mutex_lock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) (unsigned int)addr, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (addr == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) channel = val >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (channel == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* Read-Back Command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) for (channel = 0; channel < 3; channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (val & (2 << channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!(val & 0x20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pit_latch_count(pit, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!(val & 0x10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pit_latch_status(pit, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Select Counter <channel>. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) s = &pit_state->channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (access == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pit_latch_count(pit, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) s->rw_mode = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) s->read_state = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) s->write_state = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) s->mode = (val >> 1) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (s->mode > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) s->mode -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) s->bcd = val & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Write Count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) s = &pit_state->channels[addr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) switch (s->write_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case RW_STATE_LSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pit_load_count(pit, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case RW_STATE_MSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) pit_load_count(pit, addr, val << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case RW_STATE_WORD0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) s->write_latch = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) s->write_state = RW_STATE_WORD1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case RW_STATE_WORD1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pit_load_count(pit, addr, s->write_latch | (val << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) s->write_state = RW_STATE_WORD0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) mutex_unlock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static int pit_ioport_read(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) gpa_t addr, int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct kvm_pit *pit = dev_to_pit(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct kvm_kpit_state *pit_state = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int ret, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct kvm_kpit_channel_state *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!pit_in_range(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) addr &= KVM_PIT_CHANNEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (addr == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) s = &pit_state->channels[addr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) mutex_lock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (s->status_latched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) s->status_latched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = s->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) } else if (s->count_latched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) switch (s->count_latched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case RW_STATE_LSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ret = s->latched_count & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) s->count_latched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) case RW_STATE_MSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ret = s->latched_count >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) s->count_latched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case RW_STATE_WORD0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ret = s->latched_count & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) s->count_latched = RW_STATE_MSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) switch (s->read_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) case RW_STATE_LSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) count = pit_get_count(pit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ret = count & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) case RW_STATE_MSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) count = pit_get_count(pit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = (count >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) case RW_STATE_WORD0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) count = pit_get_count(pit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ret = count & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) s->read_state = RW_STATE_WORD1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) case RW_STATE_WORD1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) count = pit_get_count(pit, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = (count >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) s->read_state = RW_STATE_WORD0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (len > sizeof(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) len = sizeof(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) memcpy(data, (char *)&ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mutex_unlock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int speaker_ioport_write(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) gpa_t addr, int len, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct kvm_pit *pit = speaker_to_pit(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct kvm_kpit_state *pit_state = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u32 val = *(u32 *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (addr != KVM_SPEAKER_BASE_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) mutex_lock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pit_state->speaker_data_on = (val >> 1) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) pit_set_gate(pit, 2, val & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) mutex_unlock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int speaker_ioport_read(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) gpa_t addr, int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct kvm_pit *pit = speaker_to_pit(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct kvm_kpit_state *pit_state = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned int refresh_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (addr != KVM_SPEAKER_BASE_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) mutex_lock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (len > sizeof(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) len = sizeof(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) memcpy(data, (char *)&ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) mutex_unlock(&pit_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void kvm_pit_reset(struct kvm_pit *pit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct kvm_kpit_channel_state *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pit->pit_state.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) c = &pit->pit_state.channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) c->mode = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) c->gate = (i != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pit_load_count(pit, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) kvm_pit_reset_reinject(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) kvm_pit_reset_reinject(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static const struct kvm_io_device_ops pit_dev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .read = pit_ioport_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .write = pit_ioport_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static const struct kvm_io_device_ops speaker_dev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) .read = speaker_ioport_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .write = speaker_ioport_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct kvm_pit *pit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct kvm_kpit_state *pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct pid *pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pid_t pid_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!pit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pit->irq_source_id = kvm_request_irq_source_id(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (pit->irq_source_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) goto fail_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mutex_init(&pit->pit_state.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pid = get_pid(task_tgid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pid_nr = pid_vnr(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) put_pid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (IS_ERR(pit->worker))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto fail_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) kthread_init_work(&pit->expired, pit_do_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) pit->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pit_state = &pit->pit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) pit_state->timer.function = pit_timer_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) pit_state->irq_ack_notifier.gsi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pit->mask_notifier.func = pit_mask_notifer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) kvm_pit_reset(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) kvm_pit_set_reinject(pit, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) kvm_iodevice_init(&pit->dev, &pit_dev_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) KVM_PIT_MEM_LENGTH, &pit->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto fail_register_pit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (flags & KVM_PIT_SPEAKER_DUMMY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) KVM_SPEAKER_BASE_ADDRESS, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) &pit->speaker_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto fail_register_speaker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return pit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) fail_register_speaker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) fail_register_pit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) kvm_pit_set_reinject(pit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) kthread_destroy_worker(pit->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) fail_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) kvm_free_irq_source_id(kvm, pit->irq_source_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) fail_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) kfree(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) void kvm_free_pit(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct kvm_pit *pit = kvm->arch.vpit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (pit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) kvm_pit_set_reinject(pit, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) hrtimer_cancel(&pit->pit_state.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) kthread_destroy_worker(pit->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) kvm_free_irq_source_id(kvm, pit->irq_source_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) kfree(pit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }