^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * the linux kernel to help device drivers mirror a process address space in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * the device. This allows the device to use the same address space which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * makes communication and data exchange a lot easier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This framework's sole purpose is to exercise various code paths inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * the kernel to make sure that HMM performs as expected and to flush out any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * bugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "../kselftest_harness.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <stdint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <strings.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <hugetlbfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <sys/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <sys/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * This is a private UAPI to the kernel test module so it isn't exported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * in the usual include/uapi/... directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "../../../../lib/test_hmm_uapi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct hmm_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) uint64_t cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) uint64_t faults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define TWOMEG (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define HMM_BUFFER_SIZE (1024 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define HMM_PATH_MAX 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define NTIMES 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) FIXTURE(hmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) FIXTURE(hmm2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int fd0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int fd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int hmm_open(int unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) char pathname[HMM_PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) fd = open(pathname, O_RDWR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) FIXTURE_SETUP(hmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) self->page_size = sysconf(_SC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) self->page_shift = ffs(self->page_size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) self->fd = hmm_open(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ASSERT_GE(self->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) FIXTURE_SETUP(hmm2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) self->page_size = sysconf(_SC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) self->page_shift = ffs(self->page_size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) self->fd0 = hmm_open(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ASSERT_GE(self->fd0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) self->fd1 = hmm_open(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ASSERT_GE(self->fd1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) FIXTURE_TEARDOWN(hmm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int ret = close(self->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) self->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) FIXTURE_TEARDOWN(hmm2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int ret = close(self->fd0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) self->fd0 = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ret = close(self->fd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) self->fd1 = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int hmm_dmirror_cmd(int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct hmm_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct hmm_dmirror_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Simulate a device reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) cmd.addr = (__u64)buffer->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cmd.ptr = (__u64)buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cmd.npages = npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ret = ioctl(fd, request, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (errno == EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) buffer->cpages = cmd.cpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) buffer->faults = cmd.faults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void hmm_buffer_free(struct hmm_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (buffer == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (buffer->ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) munmap(buffer->ptr, buffer->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) free(buffer->mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Create a temporary file that will be deleted on close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int hmm_create_file(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) char path[HMM_PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) strcpy(path, "/tmp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) r = ftruncate(fd, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) } while (r == -1 && errno == EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Return a random unsigned number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static unsigned int hmm_random(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) fd = open("/dev/urandom", O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) read(fd, &r, sizeof(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void hmm_nanosleep(unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct timespec t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) t.tv_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) t.tv_nsec = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) nanosleep(&t, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Simple NULL test of device open/close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) TEST_F(hmm, open_close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Read private anonymous memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) TEST_F(hmm, anon_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Initialize buffer in system memory but leave the first two pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * zero (pte_none and pfn_zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) i = 2 * self->page_size / sizeof(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Set buffer permission to read-only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ret = mprotect(buffer->ptr, size, PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* Populate the CPU page table with a special zero page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) val = *(int *)(buffer->ptr + self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ASSERT_EQ(val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Simulate a device reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ptr = buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ASSERT_EQ(ptr[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for (; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Read private anonymous memory which has been protected with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * mprotect() PROT_NONE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) TEST_F(hmm, anon_read_prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Initialize mirror buffer so we can verify it isn't written. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ptr[i] = -i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Protect buffer from reading. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = mprotect(buffer->ptr, size, PROT_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Simulate a device reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ASSERT_EQ(ret, -EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* Allow CPU to read the buffer so we can check it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = mprotect(buffer->ptr, size, PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ASSERT_EQ(ptr[i], -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Write private anonymous memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) TEST_F(hmm, anon_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Write private anonymous memory which has been protected with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * mprotect() PROT_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) TEST_F(hmm, anon_write_prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) PROT_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Simulate a device reading a zero page of memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ASSERT_EQ(buffer->cpages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ASSERT_EQ(ret, -EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ASSERT_EQ(ptr[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Now allow writing and see that the zero page is replaced. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Check that a device writing an anonymous private mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * will copy-on-write if a child process inherits the mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) TEST_F(hmm, anon_write_child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int child_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Initialize buffer->ptr so we can tell if it is written. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ptr[i] = -i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pid = fork();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (pid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ASSERT_EQ(pid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (pid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) waitpid(pid, &ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ASSERT_EQ(WIFEXITED(ret), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Check that the parent's buffer did not change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* Check that we see the parent's values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ASSERT_EQ(ptr[i], -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* The child process needs its own mirror to its own mm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) child_fd = hmm_open(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ASSERT_GE(child_fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ASSERT_EQ(ptr[i], -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) close(child_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) exit(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Check that a device writing an anonymous shared mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * will not copy-on-write if a child process inherits the mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) TEST_F(hmm, anon_write_child_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int child_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) MAP_SHARED | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* Initialize buffer->ptr so we can tell if it is written. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ptr[i] = -i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) pid = fork();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (pid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ASSERT_EQ(pid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (pid != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) waitpid(pid, &ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ASSERT_EQ(WIFEXITED(ret), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Check that the parent's buffer did change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ASSERT_EQ(ptr[i], -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Check that we see the parent's values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ASSERT_EQ(ptr[i], -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* The child process needs its own mirror to its own mm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) child_fd = hmm_open(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ASSERT_GE(child_fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ASSERT_EQ(ptr[i], -i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) close(child_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) exit(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Write private anonymous huge page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) TEST_F(hmm, anon_write_huge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) void *old_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) void *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) size = 2 * TWOMEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) size = TWOMEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) npages = size >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ret = madvise(map, size, MADV_HUGEPAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) old_ptr = buffer->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) buffer->ptr = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) buffer->ptr = old_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Write huge TLBFS page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) TEST_F(hmm, anon_write_hugetlbfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) long pagesizes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int n, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Skip test if we can't allocate a hugetlbfs page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) n = gethugepagesizes(pagesizes, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (n <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) SKIP(return, "Huge page size could not be determined");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) for (idx = 0; --n > 0; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (pagesizes[n] < pagesizes[idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) idx = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) size = ALIGN(TWOMEG, pagesizes[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) npages = size >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) buffer->ptr = get_hugepage_region(size, GHR_STRICT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (buffer->ptr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) SKIP(return, "Huge page could not be allocated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) free_hugepage_region(buffer->ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) buffer->ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Read mmap'ed file memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) TEST_F(hmm, file_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) fd = hmm_create_file(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ASSERT_GE(fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) buffer->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* Write initial contents of the file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) len = pwrite(fd, buffer->mirror, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ASSERT_EQ(len, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) memset(buffer->mirror, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) PROT_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) MAP_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Simulate a device reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * Write mmap'ed file memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) TEST_F(hmm, file_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) fd = hmm_create_file(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ASSERT_GE(fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) buffer->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) MAP_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Initialize data that the device will write to buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Simulate a device writing system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* Check what the device wrote. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Check that the device also wrote the file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) len = pread(fd, buffer->mirror, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ASSERT_EQ(len, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Migrate anonymous memory to device private memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) TEST_F(hmm, migrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Migrate memory to device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * Migrate anonymous memory to device private memory and fault some of it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * to system memory, then try migrating the resulting mix of system and device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * private memory to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) TEST_F(hmm, migrate_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* Migrate memory to device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Fault half the pages back to system memory and check them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* Migrate memory to the device again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * Migrate anonymous shared memory to device private memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) TEST_F(hmm, migrate_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) MAP_SHARED | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* Migrate memory to device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ASSERT_EQ(ret, -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Try to migrate various memory types to device private memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) TEST_F(hmm2, migrate_mixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) npages = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* Reserve a range of addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) PROT_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) p = buffer->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* Migrating a protected area should be an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ASSERT_EQ(ret, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* Punch a hole after the first page address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret = munmap(buffer->ptr + self->page_size, self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* We expect an error if the vma doesn't cover the range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ASSERT_EQ(ret, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* Page 2 will be a read-only zero page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ptr = (int *)(buffer->ptr + 2 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) val = *ptr + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ASSERT_EQ(val, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Page 3 will be read-only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) PROT_READ | PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ptr = (int *)(buffer->ptr + 3 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) *ptr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* Page 4-5 will be read-write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) PROT_READ | PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ptr = (int *)(buffer->ptr + 4 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) *ptr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ptr = (int *)(buffer->ptr + 5 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) *ptr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Now try to migrate pages 2-5 to device 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) buffer->ptr = p + 2 * self->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ASSERT_EQ(buffer->cpages, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Page 5 won't be migrated to device 0 because it's on device 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) buffer->ptr = p + 5 * self->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ASSERT_EQ(ret, -ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) buffer->ptr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) buffer->ptr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * Migrate anonymous memory to device private memory and fault it back to system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * memory multiple times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) TEST_F(hmm, migrate_multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) unsigned long c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) for (c = 0; c < NTIMES; c++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Migrate memory to device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Fault pages back to system memory and check them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Read anonymous memory multiple times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) TEST_F(hmm, anon_read_multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) unsigned long c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) for (c = 0; c < NTIMES; c++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ptr[i] = i + c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* Simulate a device reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ASSERT_EQ(ptr[i], i + c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) void *unmap_buffer(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct hmm_buffer *buffer = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Delay for a bit and then unmap buffer while it is being read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) hmm_nanosleep(hmm_random() % 32000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) buffer->ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * Try reading anonymous memory while it is being unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) TEST_F(hmm, anon_teardown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) unsigned long c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ASSERT_NE(npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) for (c = 0; c < NTIMES; ++c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) pthread_t thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) buffer->mirror = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ptr[i] = i + c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ASSERT_EQ(rc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* Simulate a device reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) for (i = 0, ptr = buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) i < size / sizeof(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ASSERT_EQ(ptr[i], i + c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) pthread_join(thread, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Test memory snapshot without faulting in pages accessed by the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) TEST_F(hmm, mixedmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) unsigned char *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) npages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) buffer->mirror = malloc(npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* Reserve a range of addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) MAP_PRIVATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) self->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* Simulate a device snapshotting CPU pagetables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* Check what the device saw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) m = buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * Test memory snapshot without faulting in pages accessed by the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) TEST_F(hmm2, snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) unsigned char *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) npages = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) buffer->mirror = malloc(npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /* Reserve a range of addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) PROT_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) p = buffer->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Punch a hole after the first page address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ret = munmap(buffer->ptr + self->page_size, self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* Page 2 will be read-only zero page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ptr = (int *)(buffer->ptr + 2 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) val = *ptr + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) ASSERT_EQ(val, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* Page 3 will be read-only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) PROT_READ | PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ptr = (int *)(buffer->ptr + 3 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) *ptr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /* Page 4-6 will be read-write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) PROT_READ | PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) ptr = (int *)(buffer->ptr + 4 * self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) *ptr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* Page 5 will be migrated to device 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) buffer->ptr = p + 5 * self->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ASSERT_EQ(buffer->cpages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Page 6 will be migrated to device 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) buffer->ptr = p + 6 * self->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ASSERT_EQ(buffer->cpages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /* Simulate a device snapshotting CPU pagetables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) buffer->ptr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Check what the device saw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) m = buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) HMM_DMIRROR_PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * should be mapped by a large page table entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) TEST_F(hmm, compound)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) unsigned char *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) long pagesizes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int n, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* Skip test if we can't allocate a hugetlbfs page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) n = gethugepagesizes(pagesizes, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (n <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) for (idx = 0; --n > 0; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (pagesizes[n] < pagesizes[idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) idx = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) size = ALIGN(TWOMEG, pagesizes[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) npages = size >> self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) buffer->ptr = get_hugepage_region(size, GHR_STRICT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (buffer->ptr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) buffer->mirror = malloc(npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* Initialize the pages the device will snapshot in buffer->ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* Simulate a device snapshotting CPU pagetables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* Check what the device saw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) m = buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) for (i = 0; i < npages; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) HMM_DMIRROR_PROT_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Make the region read-only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ret = mprotect(buffer->ptr, size, PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* Simulate a device snapshotting CPU pagetables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Check what the device saw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) m = buffer->mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) for (i = 0; i < npages; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) HMM_DMIRROR_PROT_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) free_hugepage_region(buffer->ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) buffer->ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * Test two devices reading the same memory (double mapped).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) TEST_F(hmm2, double_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct hmm_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) npages = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) size = npages << self->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) buffer = malloc(sizeof(*buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ASSERT_NE(buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) buffer->fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) buffer->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) buffer->mirror = malloc(npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ASSERT_NE(buffer->mirror, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* Reserve a range of addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) buffer->ptr = mmap(NULL, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) MAP_PRIVATE | MAP_ANONYMOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) buffer->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) ASSERT_NE(buffer->ptr, MAP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /* Initialize buffer in system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) ptr[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* Make region read-only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) ret = mprotect(buffer->ptr, size, PROT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* Simulate device 0 reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* Simulate device 1 reading system memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ASSERT_EQ(buffer->cpages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ASSERT_EQ(buffer->faults, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) /* Check what the device read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ASSERT_EQ(ptr[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /* Punch a hole after the first page address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ret = munmap(buffer->ptr + self->page_size, self->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ASSERT_EQ(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) hmm_buffer_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) TEST_HARNESS_MAIN