blob: 1c974b7caae6ed9cd58942d00afe2cef22e93d91 [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Dave Jiangefebc712017-04-07 15:33:36 -07002/*
3 * Copyright(c) 2016 Intel Corporation. All rights reserved.
Dave Jiangefebc712017-04-07 15:33:36 -07004 */
5#ifndef __DAX_PRIVATE_H__
6#define __DAX_PRIVATE_H__
7
8#include <linux/device.h>
9#include <linux/cdev.h>
Dan Williams0f3da142020-10-13 16:50:13 -070010#include <linux/idr.h>
Dave Jiangefebc712017-04-07 15:33:36 -070011
Dan Williams51cf7842017-07-12 17:58:21 -070012/* private routines between core files */
13struct dax_device;
14struct dax_device *inode_dax(struct inode *inode);
15struct inode *dax_inode(struct dax_device *dax_dev);
Dan Williams9567da02017-07-12 17:58:21 -070016int dax_bus_init(void);
17void dax_bus_exit(void);
Dan Williams51cf7842017-07-12 17:58:21 -070018
Dave Jiangefebc712017-04-07 15:33:36 -070019/**
20 * struct dax_region - mapping infrastructure for dax devices
21 * @id: kernel-wide unique region for a memory range
Dan Williams8fc5c732018-11-09 12:43:07 -080022 * @target_node: effective numa node if this memory range is onlined
Dave Jiangefebc712017-04-07 15:33:36 -070023 * @kref: to pin while other agents have a need to do lookups
24 * @dev: parent device backing this region
25 * @align: allocation and mapping alignment for child dax devices
Dan Williams0f3da142020-10-13 16:50:13 -070026 * @ida: instance id allocator
Dan Williamsc2f30112020-10-13 16:50:03 -070027 * @res: resource tree to track instance allocations
Dan Williams0f3da142020-10-13 16:50:13 -070028 * @seed: allow userspace to find the first unbound seed device
29 * @youngest: allow userspace to find the most recently created device
Dave Jiangefebc712017-04-07 15:33:36 -070030 */
31struct dax_region {
32 int id;
Dan Williams8fc5c732018-11-09 12:43:07 -080033 int target_node;
Dave Jiangefebc712017-04-07 15:33:36 -070034 struct kref kref;
35 struct device *dev;
36 unsigned int align;
Dan Williams0f3da142020-10-13 16:50:13 -070037 struct ida ida;
Dave Jiangefebc712017-04-07 15:33:36 -070038 struct resource res;
Dan Williams0f3da142020-10-13 16:50:13 -070039 struct device *seed;
40 struct device *youngest;
Dave Jiangefebc712017-04-07 15:33:36 -070041};
42
Dan Williams0b07ce82020-10-13 16:50:45 -070043struct dax_mapping {
44 struct device dev;
45 int range_id;
46 int id;
47};
48
Dave Jiangefebc712017-04-07 15:33:36 -070049/**
Dan Williams89ec9f22018-10-29 15:52:42 -070050 * struct dev_dax - instance data for a subdivision of a dax region, and
51 * data while the device is activated in the driver.
Dave Jiangefebc712017-04-07 15:33:36 -070052 * @region - parent region
Dan Williams73616362017-05-04 23:38:43 -070053 * @dax_dev - core dax functionality
Dan Williams8fc5c732018-11-09 12:43:07 -080054 * @target_node: effective numa node if dev_dax memory range is onlined
Dan Williams0f3da142020-10-13 16:50:13 -070055 * @id: ida allocated id
Dan Williams0b07ce82020-10-13 16:50:45 -070056 * @ida: mapping id allocator
Dan Williams73616362017-05-04 23:38:43 -070057 * @dev - device core
Dan Williams89ec9f22018-10-29 15:52:42 -070058 * @pgmap - pgmap for memmap setup / lifetime (driver owned)
Dan Williams60e93dc2020-10-13 16:50:39 -070059 * @nr_range: size of @ranges
60 * @ranges: resource-span + pgoff tuples for the instance
Dave Jiangefebc712017-04-07 15:33:36 -070061 */
Dan Williams73616362017-05-04 23:38:43 -070062struct dev_dax {
Dave Jiangefebc712017-04-07 15:33:36 -070063 struct dax_region *region;
Dan Williams73616362017-05-04 23:38:43 -070064 struct dax_device *dax_dev;
Joao Martins33cf94d2020-10-13 16:50:50 -070065 unsigned int align;
Dan Williams8fc5c732018-11-09 12:43:07 -080066 int target_node;
Dan Williams0f3da142020-10-13 16:50:13 -070067 int id;
Dan Williams0b07ce82020-10-13 16:50:45 -070068 struct ida ida;
Dave Jiangefebc712017-04-07 15:33:36 -070069 struct device dev;
Dan Williamsf5516ec2020-10-13 16:49:43 -070070 struct dev_pagemap *pgmap;
Dan Williams60e93dc2020-10-13 16:50:39 -070071 int nr_range;
72 struct dev_dax_range {
73 unsigned long pgoff;
74 struct range range;
Dan Williams0b07ce82020-10-13 16:50:45 -070075 struct dax_mapping *mapping;
Dan Williams60e93dc2020-10-13 16:50:39 -070076 } *ranges;
Dave Jiangefebc712017-04-07 15:33:36 -070077};
Dan Williams51cf7842017-07-12 17:58:21 -070078
79static inline struct dev_dax *to_dev_dax(struct device *dev)
80{
81 return container_of(dev, struct dev_dax, dev);
82}
Dan Williams0b07ce82020-10-13 16:50:45 -070083
84static inline struct dax_mapping *to_dax_mapping(struct device *dev)
85{
86 return container_of(dev, struct dax_mapping, dev);
87}
Joao Martins33cf94d2020-10-13 16:50:50 -070088
89phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
Dan Williams6d821202020-10-13 16:50:55 -070090
91#ifdef CONFIG_TRANSPARENT_HUGEPAGE
92static inline bool dax_align_valid(unsigned long align)
93{
94 if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
95 return true;
96 if (align == PMD_SIZE && has_transparent_hugepage())
97 return true;
98 if (align == PAGE_SIZE)
99 return true;
100 return false;
101}
102#else
103static inline bool dax_align_valid(unsigned long align)
104{
105 return align == PAGE_SIZE;
106}
107#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Dave Jiangefebc712017-04-07 15:33:36 -0700108#endif