Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2019 Intel Corporation. |
| 4 | * Lei Chuanhua <Chuanhua.lei@intel.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/bitfield.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/of_device.h> |
| 10 | #include <linux/platform_device.h> |
| 11 | #include <linux/reboot.h> |
| 12 | #include <linux/regmap.h> |
| 13 | #include <linux/reset-controller.h> |
| 14 | |
| 15 | #define RCU_RST_STAT 0x0024 |
| 16 | #define RCU_RST_REQ 0x0048 |
| 17 | |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 18 | #define REG_OFFSET_MASK GENMASK(31, 16) |
| 19 | #define BIT_OFFSET_MASK GENMASK(15, 8) |
| 20 | #define STAT_BIT_OFFSET_MASK GENMASK(7, 0) |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 21 | |
| 22 | #define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev) |
| 23 | |
| 24 | struct intel_reset_soc { |
| 25 | bool legacy; |
| 26 | u32 reset_cell_count; |
| 27 | }; |
| 28 | |
| 29 | struct intel_reset_data { |
| 30 | struct reset_controller_dev rcdev; |
| 31 | struct notifier_block restart_nb; |
| 32 | const struct intel_reset_soc *soc_data; |
| 33 | struct regmap *regmap; |
| 34 | struct device *dev; |
| 35 | u32 reboot_id; |
| 36 | }; |
| 37 | |
| 38 | static const struct regmap_config intel_rcu_regmap_config = { |
| 39 | .name = "intel-reset", |
| 40 | .reg_bits = 32, |
| 41 | .reg_stride = 4, |
| 42 | .val_bits = 32, |
| 43 | .fast_io = true, |
| 44 | }; |
| 45 | |
| 46 | /* |
| 47 | * Reset status register offset relative to |
| 48 | * the reset control register(X) is X + 4 |
| 49 | */ |
| 50 | static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data, |
| 51 | unsigned long id, u32 *rst_req, |
| 52 | u32 *req_bit, u32 *stat_bit) |
| 53 | { |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 54 | *rst_req = FIELD_GET(REG_OFFSET_MASK, id); |
| 55 | *req_bit = FIELD_GET(BIT_OFFSET_MASK, id); |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 56 | |
| 57 | if (data->soc_data->legacy) |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 58 | *stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id); |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 59 | else |
| 60 | *stat_bit = *req_bit; |
| 61 | |
| 62 | if (data->soc_data->legacy && *rst_req == RCU_RST_REQ) |
| 63 | return RCU_RST_STAT; |
| 64 | else |
| 65 | return *rst_req + 0x4; |
| 66 | } |
| 67 | |
| 68 | static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id, |
| 69 | bool set) |
| 70 | { |
| 71 | u32 rst_req, req_bit, rst_stat, stat_bit, val; |
| 72 | int ret; |
| 73 | |
| 74 | rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req, |
| 75 | &req_bit, &stat_bit); |
| 76 | |
| 77 | val = set ? BIT(req_bit) : 0; |
| 78 | ret = regmap_update_bits(data->regmap, rst_req, BIT(req_bit), val); |
| 79 | if (ret) |
| 80 | return ret; |
| 81 | |
| 82 | return regmap_read_poll_timeout(data->regmap, rst_stat, val, |
| 83 | set == !!(val & BIT(stat_bit)), 20, |
| 84 | 200); |
| 85 | } |
| 86 | |
| 87 | static int intel_assert_device(struct reset_controller_dev *rcdev, |
| 88 | unsigned long id) |
| 89 | { |
| 90 | struct intel_reset_data *data = to_reset_data(rcdev); |
| 91 | int ret; |
| 92 | |
| 93 | ret = intel_set_clr_bits(data, id, true); |
| 94 | if (ret) |
| 95 | dev_err(data->dev, "Reset assert failed %d\n", ret); |
| 96 | |
| 97 | return ret; |
| 98 | } |
| 99 | |
| 100 | static int intel_deassert_device(struct reset_controller_dev *rcdev, |
| 101 | unsigned long id) |
| 102 | { |
| 103 | struct intel_reset_data *data = to_reset_data(rcdev); |
| 104 | int ret; |
| 105 | |
| 106 | ret = intel_set_clr_bits(data, id, false); |
| 107 | if (ret) |
| 108 | dev_err(data->dev, "Reset deassert failed %d\n", ret); |
| 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | static int intel_reset_status(struct reset_controller_dev *rcdev, |
| 114 | unsigned long id) |
| 115 | { |
| 116 | struct intel_reset_data *data = to_reset_data(rcdev); |
| 117 | u32 rst_req, req_bit, rst_stat, stat_bit, val; |
| 118 | int ret; |
| 119 | |
| 120 | rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req, |
| 121 | &req_bit, &stat_bit); |
| 122 | ret = regmap_read(data->regmap, rst_stat, &val); |
| 123 | if (ret) |
| 124 | return ret; |
| 125 | |
| 126 | return !!(val & BIT(stat_bit)); |
| 127 | } |
| 128 | |
| 129 | static const struct reset_control_ops intel_reset_ops = { |
| 130 | .assert = intel_assert_device, |
| 131 | .deassert = intel_deassert_device, |
| 132 | .status = intel_reset_status, |
| 133 | }; |
| 134 | |
| 135 | static int intel_reset_xlate(struct reset_controller_dev *rcdev, |
| 136 | const struct of_phandle_args *spec) |
| 137 | { |
| 138 | struct intel_reset_data *data = to_reset_data(rcdev); |
| 139 | u32 id; |
| 140 | |
| 141 | if (spec->args[1] > 31) |
| 142 | return -EINVAL; |
| 143 | |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 144 | id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]); |
| 145 | id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]); |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 146 | |
| 147 | if (data->soc_data->legacy) { |
| 148 | if (spec->args[2] > 31) |
| 149 | return -EINVAL; |
| 150 | |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 151 | id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]); |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | return id; |
| 155 | } |
| 156 | |
| 157 | static int intel_reset_restart_handler(struct notifier_block *nb, |
| 158 | unsigned long action, void *data) |
| 159 | { |
| 160 | struct intel_reset_data *reset_data; |
| 161 | |
| 162 | reset_data = container_of(nb, struct intel_reset_data, restart_nb); |
| 163 | intel_assert_device(&reset_data->rcdev, reset_data->reboot_id); |
| 164 | |
| 165 | return NOTIFY_DONE; |
| 166 | } |
| 167 | |
| 168 | static int intel_reset_probe(struct platform_device *pdev) |
| 169 | { |
| 170 | struct device_node *np = pdev->dev.of_node; |
| 171 | struct device *dev = &pdev->dev; |
| 172 | struct intel_reset_data *data; |
| 173 | void __iomem *base; |
| 174 | u32 rb_id[3]; |
| 175 | int ret; |
| 176 | |
| 177 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
| 178 | if (!data) |
| 179 | return -ENOMEM; |
| 180 | |
| 181 | data->soc_data = of_device_get_match_data(dev); |
| 182 | if (!data->soc_data) |
| 183 | return -ENODEV; |
| 184 | |
| 185 | base = devm_platform_ioremap_resource(pdev, 0); |
| 186 | if (IS_ERR(base)) |
| 187 | return PTR_ERR(base); |
| 188 | |
| 189 | data->regmap = devm_regmap_init_mmio(dev, base, |
| 190 | &intel_rcu_regmap_config); |
| 191 | if (IS_ERR(data->regmap)) { |
| 192 | dev_err(dev, "regmap initialization failed\n"); |
| 193 | return PTR_ERR(data->regmap); |
| 194 | } |
| 195 | |
| 196 | ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id, |
| 197 | data->soc_data->reset_cell_count); |
| 198 | if (ret) { |
| 199 | dev_err(dev, "Failed to get global reset offset!\n"); |
| 200 | return ret; |
| 201 | } |
| 202 | |
| 203 | data->dev = dev; |
| 204 | data->rcdev.of_node = np; |
| 205 | data->rcdev.owner = dev->driver->owner; |
| 206 | data->rcdev.ops = &intel_reset_ops; |
| 207 | data->rcdev.of_xlate = intel_reset_xlate; |
| 208 | data->rcdev.of_reset_n_cells = data->soc_data->reset_cell_count; |
| 209 | ret = devm_reset_controller_register(&pdev->dev, &data->rcdev); |
| 210 | if (ret) |
| 211 | return ret; |
| 212 | |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 213 | data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]); |
| 214 | data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]); |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 215 | |
| 216 | if (data->soc_data->legacy) |
Dejin Zheng | 3086467 | 2020-06-26 21:00:41 +0800 | [diff] [blame] | 217 | data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]); |
Dilip Kota | c9aef21 | 2020-01-03 18:00:18 +0800 | [diff] [blame] | 218 | |
| 219 | data->restart_nb.notifier_call = intel_reset_restart_handler; |
| 220 | data->restart_nb.priority = 128; |
| 221 | register_restart_handler(&data->restart_nb); |
| 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | static const struct intel_reset_soc xrx200_data = { |
| 227 | .legacy = true, |
| 228 | .reset_cell_count = 3, |
| 229 | }; |
| 230 | |
| 231 | static const struct intel_reset_soc lgm_data = { |
| 232 | .legacy = false, |
| 233 | .reset_cell_count = 2, |
| 234 | }; |
| 235 | |
| 236 | static const struct of_device_id intel_reset_match[] = { |
| 237 | { .compatible = "intel,rcu-lgm", .data = &lgm_data }, |
| 238 | { .compatible = "intel,rcu-xrx200", .data = &xrx200_data }, |
| 239 | {} |
| 240 | }; |
| 241 | |
| 242 | static struct platform_driver intel_reset_driver = { |
| 243 | .probe = intel_reset_probe, |
| 244 | .driver = { |
| 245 | .name = "intel-reset", |
| 246 | .of_match_table = intel_reset_match, |
| 247 | }, |
| 248 | }; |
| 249 | |
| 250 | static int __init intel_reset_init(void) |
| 251 | { |
| 252 | return platform_driver_register(&intel_reset_driver); |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * RCU is system core entity which is in Always On Domain whose clocks |
| 257 | * or resource initialization happens in system core initialization. |
| 258 | * Also, it is required for most of the platform or architecture |
| 259 | * specific devices to perform reset operation as part of initialization. |
| 260 | * So perform RCU as post core initialization. |
| 261 | */ |
| 262 | postcore_initcall(intel_reset_init); |