^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/memremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pfn_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "dax-private.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "bus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct device *dev = &dev_dax->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (!dax_alive(dev_dax->dax_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* prevent private mappings from being established */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) dev_info_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) "%s: %s: fail, attempted private mapping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) current->comm, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) mask = dev_dax->align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (vma->vm_start & mask || vma->vm_end & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dev_info_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) current->comm, func, vma->vm_start, vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!vma_is_dax(vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) dev_info_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) "%s: %s: fail, vma is not DAX capable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) current->comm, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) for (i = 0; i < dev_dax->nr_range; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct dev_dax_range *dax_range = &dev_dax->ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct range *range = &dax_range->range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long long pgoff_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (phys + size - 1 <= range->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct vm_fault *vmf, pfn_t *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct device *dev = &dev_dax->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int fault_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (check_vma(dev_dax, vmf->vma, __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (dev_dax->align > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dev_dax->align, fault_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (fault_size != dev_dax->align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (phys == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct vm_fault *vmf, pfn_t *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long pmd_addr = vmf->address & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct device *dev = &dev_dax->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int fault_size = PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (check_vma(dev_dax, vmf->vma, __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (dev_dax->align > PMD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev_dax->align, fault_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (fault_size < dev_dax->align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) else if (fault_size > dev_dax->align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* if we are outside of the VMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (pmd_addr < vmf->vma->vm_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pgoff = linear_page_index(vmf->vma, pmd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (phys == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct vm_fault *vmf, pfn_t *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long pud_addr = vmf->address & PUD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct device *dev = &dev_dax->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned int fault_size = PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (check_vma(dev_dax, vmf->vma, __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (dev_dax->align > PUD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dev_dax->align, fault_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (fault_size < dev_dax->align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else if (fault_size > dev_dax->align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* if we are outside of the VMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (pud_addr < vmf->vma->vm_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pgoff = linear_page_index(vmf->vma, pud_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (phys == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct vm_fault *vmf, pfn_t *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return VM_FAULT_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) enum page_entry_size pe_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct file *filp = vmf->vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long fault_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) vm_fault_t rc = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pfn_t pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct dev_dax *dev_dax = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) id = dax_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) switch (pe_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case PE_SIZE_PTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) fault_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) case PE_SIZE_PMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) fault_size = PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case PE_SIZE_PUD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) fault_size = PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rc = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (rc == VM_FAULT_NOPAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * In the device-dax case the only possibility for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * VM_FAULT_NOPAGE result is when device-dax capacity is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * mapped. No need to consider the zero page, or racing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * conflicting mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pgoff = linear_page_index(vmf->vma, vmf->address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) & ~(fault_size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) for (i = 0; i < fault_size / PAGE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (page->mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) page->mapping = filp->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) page->index = pgoff + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dax_read_unlock(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct file *filp = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct dev_dax *dev_dax = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!IS_ALIGNED(addr, dev_dax->align))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct file *filp = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct dev_dax *dev_dax = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return dev_dax->align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static const struct vm_operations_struct dax_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .fault = dev_dax_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .huge_fault = dev_dax_huge_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .split = dev_dax_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .pagesize = dev_dax_pagesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct dev_dax *dev_dax = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int rc, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dev_dbg(&dev_dax->dev, "trace\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * We lock to check dax_dev liveness and will re-check at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * fault time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) id = dax_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) rc = check_vma(dev_dax, vma, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dax_read_unlock(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) vma->vm_ops = &dax_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) vma->vm_flags |= VM_HUGEPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* return an unmapped area aligned to the dax region specified alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static unsigned long dax_get_unmapped_area(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long addr, unsigned long len, unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long off, off_end, off_align, len_align, addr_align, align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!dev_dax || addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) align = dev_dax->align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) off = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) off_end = off + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) off_align = round_up(off, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if ((off_end <= off_align) || ((off_end - off_align) < align))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) len_align = len + align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if ((off + len_align) < off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!IS_ERR_VALUE(addr_align)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) addr_align += (off - addr_align) & (align - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return addr_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static const struct address_space_operations dev_dax_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .set_page_dirty = noop_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) .invalidatepage = noop_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int dax_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct dax_device *dax_dev = inode_dax(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct inode *__dax_inode = dax_inode(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct dev_dax *dev_dax = dax_get_private(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) dev_dbg(&dev_dax->dev, "trace\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) inode->i_mapping = __dax_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) inode->i_mapping->host = __dax_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) inode->i_mapping->a_ops = &dev_dax_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) filp->f_mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) filp->f_sb_err = file_sample_sb_err(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) filp->private_data = dev_dax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) inode->i_flags = S_DAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int dax_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct dev_dax *dev_dax = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dev_dbg(&dev_dax->dev, "trace\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static const struct file_operations dax_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .open = dax_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .release = dax_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) .get_unmapped_area = dax_get_unmapped_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) .mmap = dax_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .mmap_supported_flags = MAP_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void dev_dax_cdev_del(void *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cdev_del(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void dev_dax_kill(void *dev_dax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) kill_dev_dax(dev_dax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int dev_dax_probe(struct dev_dax *dev_dax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct dax_device *dax_dev = dev_dax->dax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct device *dev = &dev_dax->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct dev_pagemap *pgmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct cdev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) pgmap = dev_dax->pgmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) "static pgmap / multi-range device conflict\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!pgmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * (dev_dax->nr_range - 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!pgmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pgmap->nr_range = dev_dax->nr_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) for (i = 0; i < dev_dax->nr_range; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct range *range = &dev_dax->ranges[i].range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!devm_request_mem_region(dev, range->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) range_len(range), dev_name(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) i, range->start, range->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* don't update the range for static pgmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!dev_dax->pgmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pgmap->ranges[i] = *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pgmap->type = MEMORY_DEVICE_GENERIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) addr = devm_memremap_pages(dev, pgmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (IS_ERR(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return PTR_ERR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) inode = dax_inode(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cdev = inode->i_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) cdev_init(cdev, &dax_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (dev->class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) cdev->owner = dev->parent->driver->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cdev->owner = dev->driver->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cdev_set_parent(cdev, &dev->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) rc = cdev_add(cdev, dev->devt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) run_dax(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) EXPORT_SYMBOL_GPL(dev_dax_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static int dev_dax_remove(struct dev_dax *dev_dax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* all probe actions are unwound by devm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static struct dax_device_driver device_dax_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .probe = dev_dax_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .remove = dev_dax_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .match_always = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int __init dax_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return dax_driver_register(&device_dax_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void __exit dax_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dax_driver_unregister(&device_dax_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) module_init(dax_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) module_exit(dax_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) MODULE_ALIAS_DAX_DEVICE(0);