Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 1 | /* |
| 2 | * SuperH On-Chip RTC Support |
| 3 | * |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 4 | * Copyright (C) 2006 - 2009 Paul Mundt |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 5 | * Copyright (C) 2006 Jamie Lenehan |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 6 | * Copyright (C) 2008 Angelo Castello |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 7 | * |
| 8 | * Based on the old arch/sh/kernel/cpu/rtc.c by: |
| 9 | * |
| 10 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
| 11 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka |
| 12 | * |
| 13 | * This file is subject to the terms and conditions of the GNU General Public |
| 14 | * License. See the file "COPYING" in the main directory of this archive |
| 15 | * for more details. |
| 16 | */ |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/bcd.h> |
| 20 | #include <linux/rtc.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/platform_device.h> |
| 23 | #include <linux/seq_file.h> |
| 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/spinlock.h> |
Jamie Lenehan | 31ccb08 | 2006-12-07 17:23:50 +0900 | [diff] [blame] | 26 | #include <linux/io.h> |
Jonathan Cameron | 5d2a503 | 2009-01-06 14:42:12 -0800 | [diff] [blame] | 27 | #include <linux/log2.h> |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 28 | #include <linux/clk.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 29 | #include <linux/slab.h> |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 30 | #ifdef CONFIG_SUPERH |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 31 | #include <asm/rtc.h> |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 32 | #else |
| 33 | /* Default values for RZ/A RTC */ |
| 34 | #define rtc_reg_size sizeof(u16) |
| 35 | #define RTC_BIT_INVERTED 0 /* no chip bugs */ |
| 36 | #define RTC_CAP_4_DIGIT_YEAR (1 << 0) |
| 37 | #define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR |
| 38 | #endif |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 39 | |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 40 | #define DRV_NAME "sh-rtc" |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 41 | |
| 42 | #define RTC_REG(r) ((r) * rtc_reg_size) |
| 43 | |
Jamie Lenehan | 31ccb08 | 2006-12-07 17:23:50 +0900 | [diff] [blame] | 44 | #define R64CNT RTC_REG(0) |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 45 | |
| 46 | #define RSECCNT RTC_REG(1) /* RTC sec */ |
| 47 | #define RMINCNT RTC_REG(2) /* RTC min */ |
| 48 | #define RHRCNT RTC_REG(3) /* RTC hour */ |
| 49 | #define RWKCNT RTC_REG(4) /* RTC week */ |
| 50 | #define RDAYCNT RTC_REG(5) /* RTC day */ |
| 51 | #define RMONCNT RTC_REG(6) /* RTC month */ |
| 52 | #define RYRCNT RTC_REG(7) /* RTC year */ |
| 53 | #define RSECAR RTC_REG(8) /* ALARM sec */ |
| 54 | #define RMINAR RTC_REG(9) /* ALARM min */ |
| 55 | #define RHRAR RTC_REG(10) /* ALARM hour */ |
| 56 | #define RWKAR RTC_REG(11) /* ALARM week */ |
| 57 | #define RDAYAR RTC_REG(12) /* ALARM day */ |
| 58 | #define RMONAR RTC_REG(13) /* ALARM month */ |
| 59 | #define RCR1 RTC_REG(14) /* Control */ |
| 60 | #define RCR2 RTC_REG(15) /* Control */ |
| 61 | |
Paul Mundt | ff1b750 | 2007-11-26 17:56:31 +0900 | [diff] [blame] | 62 | /* |
| 63 | * Note on RYRAR and RCR3: Up until this point most of the register |
| 64 | * definitions are consistent across all of the available parts. However, |
| 65 | * the placement of the optional RYRAR and RCR3 (the RYRAR control |
| 66 | * register used to control RYRCNT/RYRAR compare) varies considerably |
| 67 | * across various parts, occasionally being mapped in to a completely |
| 68 | * unrelated address space. For proper RYRAR support a separate resource |
| 69 | * would have to be handed off, but as this is purely optional in |
| 70 | * practice, we simply opt not to support it, thereby keeping the code |
| 71 | * quite a bit more simplified. |
| 72 | */ |
| 73 | |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 74 | /* ALARM Bits - or with BCD encoded value */ |
| 75 | #define AR_ENB 0x80 /* Enable for alarm cmp */ |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 76 | |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 77 | /* Period Bits */ |
| 78 | #define PF_HP 0x100 /* Enable Half Period to support 8,32,128Hz */ |
| 79 | #define PF_COUNT 0x200 /* Half periodic counter */ |
| 80 | #define PF_OXS 0x400 /* Periodic One x Second */ |
| 81 | #define PF_KOU 0x800 /* Kernel or User periodic request 1=kernel */ |
| 82 | #define PF_MASK 0xf00 |
| 83 | |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 84 | /* RCR1 Bits */ |
| 85 | #define RCR1_CF 0x80 /* Carry Flag */ |
| 86 | #define RCR1_CIE 0x10 /* Carry Interrupt Enable */ |
| 87 | #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */ |
| 88 | #define RCR1_AF 0x01 /* Alarm Flag */ |
| 89 | |
| 90 | /* RCR2 Bits */ |
| 91 | #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */ |
| 92 | #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */ |
| 93 | #define RCR2_RTCEN 0x08 /* ENable RTC */ |
| 94 | #define RCR2_ADJ 0x04 /* ADJustment (30-second) */ |
| 95 | #define RCR2_RESET 0x02 /* Reset bit */ |
| 96 | #define RCR2_START 0x01 /* Start bit */ |
| 97 | |
| 98 | struct sh_rtc { |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 99 | void __iomem *regbase; |
| 100 | unsigned long regsize; |
| 101 | struct resource *res; |
| 102 | int alarm_irq; |
| 103 | int periodic_irq; |
| 104 | int carry_irq; |
| 105 | struct clk *clk; |
| 106 | struct rtc_device *rtc_dev; |
| 107 | spinlock_t lock; |
| 108 | unsigned long capabilities; /* See asm/rtc.h for cap bits */ |
| 109 | unsigned short periodic_freq; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 110 | }; |
| 111 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 112 | static int __sh_rtc_interrupt(struct sh_rtc *rtc) |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 113 | { |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 114 | unsigned int tmp, pending; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 115 | |
| 116 | tmp = readb(rtc->regbase + RCR1); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 117 | pending = tmp & RCR1_CF; |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 118 | tmp &= ~RCR1_CF; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 119 | writeb(tmp, rtc->regbase + RCR1); |
| 120 | |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 121 | /* Users have requested One x Second IRQ */ |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 122 | if (pending && rtc->periodic_freq & PF_OXS) |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 123 | rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 124 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 125 | return pending; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 126 | } |
| 127 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 128 | static int __sh_rtc_alarm(struct sh_rtc *rtc) |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 129 | { |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 130 | unsigned int tmp, pending; |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 131 | |
| 132 | tmp = readb(rtc->regbase + RCR1); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 133 | pending = tmp & RCR1_AF; |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 134 | tmp &= ~(RCR1_AF | RCR1_AIE); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 135 | writeb(tmp, rtc->regbase + RCR1); |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 136 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 137 | if (pending) |
| 138 | rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 139 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 140 | return pending; |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 141 | } |
| 142 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 143 | static int __sh_rtc_periodic(struct sh_rtc *rtc) |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 144 | { |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 145 | struct rtc_device *rtc_dev = rtc->rtc_dev; |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 146 | struct rtc_task *irq_task; |
| 147 | unsigned int tmp, pending; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 148 | |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 149 | tmp = readb(rtc->regbase + RCR2); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 150 | pending = tmp & RCR2_PEF; |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 151 | tmp &= ~RCR2_PEF; |
| 152 | writeb(tmp, rtc->regbase + RCR2); |
| 153 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 154 | if (!pending) |
| 155 | return 0; |
| 156 | |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 157 | /* Half period enabled than one skipped and the next notified */ |
| 158 | if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT)) |
| 159 | rtc->periodic_freq &= ~PF_COUNT; |
| 160 | else { |
| 161 | if (rtc->periodic_freq & PF_HP) |
| 162 | rtc->periodic_freq |= PF_COUNT; |
| 163 | if (rtc->periodic_freq & PF_KOU) { |
| 164 | spin_lock(&rtc_dev->irq_task_lock); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 165 | irq_task = rtc_dev->irq_task; |
| 166 | if (irq_task) |
| 167 | irq_task->func(irq_task->private_data); |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 168 | spin_unlock(&rtc_dev->irq_task_lock); |
| 169 | } else |
| 170 | rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); |
| 171 | } |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 172 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 173 | return pending; |
| 174 | } |
| 175 | |
| 176 | static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) |
| 177 | { |
| 178 | struct sh_rtc *rtc = dev_id; |
| 179 | int ret; |
| 180 | |
| 181 | spin_lock(&rtc->lock); |
| 182 | ret = __sh_rtc_interrupt(rtc); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 183 | spin_unlock(&rtc->lock); |
| 184 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 185 | return IRQ_RETVAL(ret); |
| 186 | } |
| 187 | |
| 188 | static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) |
| 189 | { |
| 190 | struct sh_rtc *rtc = dev_id; |
| 191 | int ret; |
| 192 | |
| 193 | spin_lock(&rtc->lock); |
| 194 | ret = __sh_rtc_alarm(rtc); |
| 195 | spin_unlock(&rtc->lock); |
| 196 | |
| 197 | return IRQ_RETVAL(ret); |
| 198 | } |
| 199 | |
| 200 | static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) |
| 201 | { |
| 202 | struct sh_rtc *rtc = dev_id; |
| 203 | int ret; |
| 204 | |
| 205 | spin_lock(&rtc->lock); |
| 206 | ret = __sh_rtc_periodic(rtc); |
| 207 | spin_unlock(&rtc->lock); |
| 208 | |
| 209 | return IRQ_RETVAL(ret); |
| 210 | } |
| 211 | |
| 212 | static irqreturn_t sh_rtc_shared(int irq, void *dev_id) |
| 213 | { |
| 214 | struct sh_rtc *rtc = dev_id; |
| 215 | int ret; |
| 216 | |
| 217 | spin_lock(&rtc->lock); |
| 218 | ret = __sh_rtc_interrupt(rtc); |
| 219 | ret |= __sh_rtc_alarm(rtc); |
| 220 | ret |= __sh_rtc_periodic(rtc); |
| 221 | spin_unlock(&rtc->lock); |
| 222 | |
| 223 | return IRQ_RETVAL(ret); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 224 | } |
| 225 | |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 226 | static int sh_rtc_irq_set_state(struct device *dev, int enable) |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 227 | { |
| 228 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
| 229 | unsigned int tmp; |
| 230 | |
| 231 | spin_lock_irq(&rtc->lock); |
| 232 | |
| 233 | tmp = readb(rtc->regbase + RCR2); |
| 234 | |
| 235 | if (enable) { |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 236 | rtc->periodic_freq |= PF_KOU; |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 237 | tmp &= ~RCR2_PEF; /* Clear PES bit */ |
| 238 | tmp |= (rtc->periodic_freq & ~PF_HP); /* Set PES2-0 */ |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 239 | } else { |
| 240 | rtc->periodic_freq &= ~PF_KOU; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 241 | tmp &= ~(RCR2_PESMASK | RCR2_PEF); |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 242 | } |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 243 | |
| 244 | writeb(tmp, rtc->regbase + RCR2); |
| 245 | |
| 246 | spin_unlock_irq(&rtc->lock); |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 247 | |
| 248 | return 0; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 249 | } |
| 250 | |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 251 | static int sh_rtc_irq_set_freq(struct device *dev, int freq) |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 252 | { |
| 253 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
| 254 | int tmp, ret = 0; |
| 255 | |
| 256 | spin_lock_irq(&rtc->lock); |
| 257 | tmp = rtc->periodic_freq & PF_MASK; |
| 258 | |
| 259 | switch (freq) { |
| 260 | case 0: |
| 261 | rtc->periodic_freq = 0x00; |
| 262 | break; |
| 263 | case 1: |
| 264 | rtc->periodic_freq = 0x60; |
| 265 | break; |
| 266 | case 2: |
| 267 | rtc->periodic_freq = 0x50; |
| 268 | break; |
| 269 | case 4: |
| 270 | rtc->periodic_freq = 0x40; |
| 271 | break; |
| 272 | case 8: |
| 273 | rtc->periodic_freq = 0x30 | PF_HP; |
| 274 | break; |
| 275 | case 16: |
| 276 | rtc->periodic_freq = 0x30; |
| 277 | break; |
| 278 | case 32: |
| 279 | rtc->periodic_freq = 0x20 | PF_HP; |
| 280 | break; |
| 281 | case 64: |
| 282 | rtc->periodic_freq = 0x20; |
| 283 | break; |
| 284 | case 128: |
| 285 | rtc->periodic_freq = 0x10 | PF_HP; |
| 286 | break; |
| 287 | case 256: |
| 288 | rtc->periodic_freq = 0x10; |
| 289 | break; |
| 290 | default: |
| 291 | ret = -ENOTSUPP; |
| 292 | } |
| 293 | |
Paul Mundt | 1043bf5 | 2009-09-09 12:13:01 +0900 | [diff] [blame] | 294 | if (ret == 0) |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 295 | rtc->periodic_freq |= tmp; |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 296 | |
| 297 | spin_unlock_irq(&rtc->lock); |
| 298 | return ret; |
| 299 | } |
| 300 | |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 301 | static inline void sh_rtc_setaie(struct device *dev, unsigned int enable) |
| 302 | { |
| 303 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
| 304 | unsigned int tmp; |
| 305 | |
| 306 | spin_lock_irq(&rtc->lock); |
| 307 | |
| 308 | tmp = readb(rtc->regbase + RCR1); |
| 309 | |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 310 | if (enable) |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 311 | tmp |= RCR1_AIE; |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 312 | else |
| 313 | tmp &= ~RCR1_AIE; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 314 | |
| 315 | writeb(tmp, rtc->regbase + RCR1); |
| 316 | |
| 317 | spin_unlock_irq(&rtc->lock); |
| 318 | } |
| 319 | |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 320 | static int sh_rtc_proc(struct device *dev, struct seq_file *seq) |
| 321 | { |
| 322 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
| 323 | unsigned int tmp; |
| 324 | |
| 325 | tmp = readb(rtc->regbase + RCR1); |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 326 | seq_printf(seq, "carry_IRQ\t: %s\n", (tmp & RCR1_CIE) ? "yes" : "no"); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 327 | |
| 328 | tmp = readb(rtc->regbase + RCR2); |
| 329 | seq_printf(seq, "periodic_IRQ\t: %s\n", |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 330 | (tmp & RCR2_PESMASK) ? "yes" : "no"); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
Magnus Damm | 9cd88b9 | 2009-03-19 10:05:58 +0000 | [diff] [blame] | 335 | static inline void sh_rtc_setcie(struct device *dev, unsigned int enable) |
| 336 | { |
| 337 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
| 338 | unsigned int tmp; |
| 339 | |
| 340 | spin_lock_irq(&rtc->lock); |
| 341 | |
| 342 | tmp = readb(rtc->regbase + RCR1); |
| 343 | |
| 344 | if (!enable) |
| 345 | tmp &= ~RCR1_CIE; |
| 346 | else |
| 347 | tmp |= RCR1_CIE; |
| 348 | |
| 349 | writeb(tmp, rtc->regbase + RCR1); |
| 350 | |
| 351 | spin_unlock_irq(&rtc->lock); |
| 352 | } |
| 353 | |
John Stultz | 16380c1 | 2011-02-02 17:02:41 -0800 | [diff] [blame] | 354 | static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
| 355 | { |
| 356 | sh_rtc_setaie(dev, enabled); |
| 357 | return 0; |
| 358 | } |
| 359 | |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 360 | static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) |
| 361 | { |
Wolfram Sang | 85368bb | 2018-04-19 16:06:14 +0200 | [diff] [blame] | 362 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 363 | unsigned int sec128, sec2, yr, yr100, cf_bit; |
| 364 | |
| 365 | do { |
| 366 | unsigned int tmp; |
| 367 | |
| 368 | spin_lock_irq(&rtc->lock); |
| 369 | |
| 370 | tmp = readb(rtc->regbase + RCR1); |
| 371 | tmp &= ~RCR1_CF; /* Clear CF-bit */ |
| 372 | tmp |= RCR1_CIE; |
| 373 | writeb(tmp, rtc->regbase + RCR1); |
| 374 | |
| 375 | sec128 = readb(rtc->regbase + R64CNT); |
| 376 | |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 377 | tm->tm_sec = bcd2bin(readb(rtc->regbase + RSECCNT)); |
| 378 | tm->tm_min = bcd2bin(readb(rtc->regbase + RMINCNT)); |
| 379 | tm->tm_hour = bcd2bin(readb(rtc->regbase + RHRCNT)); |
| 380 | tm->tm_wday = bcd2bin(readb(rtc->regbase + RWKCNT)); |
| 381 | tm->tm_mday = bcd2bin(readb(rtc->regbase + RDAYCNT)); |
| 382 | tm->tm_mon = bcd2bin(readb(rtc->regbase + RMONCNT)) - 1; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 383 | |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 384 | if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { |
| 385 | yr = readw(rtc->regbase + RYRCNT); |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 386 | yr100 = bcd2bin(yr >> 8); |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 387 | yr &= 0xff; |
| 388 | } else { |
| 389 | yr = readb(rtc->regbase + RYRCNT); |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 390 | yr100 = bcd2bin((yr == 0x99) ? 0x19 : 0x20); |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 391 | } |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 392 | |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 393 | tm->tm_year = (yr100 * 100 + bcd2bin(yr)) - 1900; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 394 | |
| 395 | sec2 = readb(rtc->regbase + R64CNT); |
| 396 | cf_bit = readb(rtc->regbase + RCR1) & RCR1_CF; |
| 397 | |
| 398 | spin_unlock_irq(&rtc->lock); |
| 399 | } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0); |
| 400 | |
| 401 | #if RTC_BIT_INVERTED != 0 |
| 402 | if ((sec128 & RTC_BIT_INVERTED)) |
| 403 | tm->tm_sec--; |
| 404 | #endif |
| 405 | |
Magnus Damm | 9cd88b9 | 2009-03-19 10:05:58 +0000 | [diff] [blame] | 406 | /* only keep the carry interrupt enabled if UIE is on */ |
| 407 | if (!(rtc->periodic_freq & PF_OXS)) |
| 408 | sh_rtc_setcie(dev, 0); |
| 409 | |
Paul Mundt | 435c55d | 2007-05-08 11:56:27 +0900 | [diff] [blame] | 410 | dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 411 | "mday=%d, mon=%d, year=%d, wday=%d\n", |
Harvey Harrison | 2a4e2b878 | 2008-04-28 02:12:00 -0700 | [diff] [blame] | 412 | __func__, |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 413 | tm->tm_sec, tm->tm_min, tm->tm_hour, |
Jamie Lenehan | a161479 | 2006-12-08 14:49:30 +0900 | [diff] [blame] | 414 | tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 415 | |
Alexandre Belloni | 22652ba | 2018-02-19 16:23:56 +0100 | [diff] [blame] | 416 | return 0; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 417 | } |
| 418 | |
| 419 | static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) |
| 420 | { |
Wolfram Sang | 85368bb | 2018-04-19 16:06:14 +0200 | [diff] [blame] | 421 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 422 | unsigned int tmp; |
| 423 | int year; |
| 424 | |
| 425 | spin_lock_irq(&rtc->lock); |
| 426 | |
| 427 | /* Reset pre-scaler & stop RTC */ |
| 428 | tmp = readb(rtc->regbase + RCR2); |
| 429 | tmp |= RCR2_RESET; |
Markus Brunner | 699bc66 | 2007-07-26 17:31:28 +0900 | [diff] [blame] | 430 | tmp &= ~RCR2_START; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 431 | writeb(tmp, rtc->regbase + RCR2); |
| 432 | |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 433 | writeb(bin2bcd(tm->tm_sec), rtc->regbase + RSECCNT); |
| 434 | writeb(bin2bcd(tm->tm_min), rtc->regbase + RMINCNT); |
| 435 | writeb(bin2bcd(tm->tm_hour), rtc->regbase + RHRCNT); |
| 436 | writeb(bin2bcd(tm->tm_wday), rtc->regbase + RWKCNT); |
| 437 | writeb(bin2bcd(tm->tm_mday), rtc->regbase + RDAYCNT); |
| 438 | writeb(bin2bcd(tm->tm_mon + 1), rtc->regbase + RMONCNT); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 439 | |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 440 | if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 441 | year = (bin2bcd((tm->tm_year + 1900) / 100) << 8) | |
| 442 | bin2bcd(tm->tm_year % 100); |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 443 | writew(year, rtc->regbase + RYRCNT); |
| 444 | } else { |
| 445 | year = tm->tm_year % 100; |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 446 | writeb(bin2bcd(year), rtc->regbase + RYRCNT); |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 447 | } |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 448 | |
| 449 | /* Start RTC */ |
| 450 | tmp = readb(rtc->regbase + RCR2); |
| 451 | tmp &= ~RCR2_RESET; |
| 452 | tmp |= RCR2_RTCEN | RCR2_START; |
| 453 | writeb(tmp, rtc->regbase + RCR2); |
| 454 | |
| 455 | spin_unlock_irq(&rtc->lock); |
| 456 | |
| 457 | return 0; |
| 458 | } |
| 459 | |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 460 | static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) |
| 461 | { |
| 462 | unsigned int byte; |
| 463 | int value = 0xff; /* return 0xff for ignored values */ |
| 464 | |
| 465 | byte = readb(rtc->regbase + reg_off); |
| 466 | if (byte & AR_ENB) { |
| 467 | byte &= ~AR_ENB; /* strip the enable bit */ |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 468 | value = bcd2bin(byte); |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | return value; |
| 472 | } |
| 473 | |
| 474 | static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) |
| 475 | { |
Wolfram Sang | 85368bb | 2018-04-19 16:06:14 +0200 | [diff] [blame] | 476 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 477 | struct rtc_time *tm = &wkalrm->time; |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 478 | |
| 479 | spin_lock_irq(&rtc->lock); |
| 480 | |
| 481 | tm->tm_sec = sh_rtc_read_alarm_value(rtc, RSECAR); |
| 482 | tm->tm_min = sh_rtc_read_alarm_value(rtc, RMINAR); |
| 483 | tm->tm_hour = sh_rtc_read_alarm_value(rtc, RHRAR); |
| 484 | tm->tm_wday = sh_rtc_read_alarm_value(rtc, RWKAR); |
| 485 | tm->tm_mday = sh_rtc_read_alarm_value(rtc, RDAYAR); |
| 486 | tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR); |
| 487 | if (tm->tm_mon > 0) |
| 488 | tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */ |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 489 | |
David Brownell | 0d103e9 | 2007-01-10 23:15:32 -0800 | [diff] [blame] | 490 | wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0; |
| 491 | |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 492 | spin_unlock_irq(&rtc->lock); |
| 493 | |
| 494 | return 0; |
| 495 | } |
| 496 | |
| 497 | static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc, |
| 498 | int value, int reg_off) |
| 499 | { |
| 500 | /* < 0 for a value that is ignored */ |
| 501 | if (value < 0) |
| 502 | writeb(0, rtc->regbase + reg_off); |
| 503 | else |
Adrian Bunk | fe20ba7 | 2008-10-18 20:28:41 -0700 | [diff] [blame] | 504 | writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off); |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 505 | } |
| 506 | |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 507 | static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) |
| 508 | { |
Wolfram Sang | 85368bb | 2018-04-19 16:06:14 +0200 | [diff] [blame] | 509 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 510 | unsigned int rcr1; |
| 511 | struct rtc_time *tm = &wkalrm->time; |
Uwe Kleine-König | 8441189 | 2016-06-28 10:43:48 +0200 | [diff] [blame] | 512 | int mon; |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 513 | |
| 514 | spin_lock_irq(&rtc->lock); |
| 515 | |
Jamie Lenehan | 15c945c | 2007-01-22 20:40:41 -0800 | [diff] [blame] | 516 | /* disable alarm interrupt and clear the alarm flag */ |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 517 | rcr1 = readb(rtc->regbase + RCR1); |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 518 | rcr1 &= ~(RCR1_AF | RCR1_AIE); |
Jamie Lenehan | 15c945c | 2007-01-22 20:40:41 -0800 | [diff] [blame] | 519 | writeb(rcr1, rtc->regbase + RCR1); |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 520 | |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 521 | /* set alarm time */ |
| 522 | sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR); |
| 523 | sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR); |
| 524 | sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR); |
| 525 | sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR); |
| 526 | sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR); |
| 527 | mon = tm->tm_mon; |
| 528 | if (mon >= 0) |
| 529 | mon += 1; |
| 530 | sh_rtc_write_alarm_value(rtc, mon, RMONAR); |
| 531 | |
Jamie Lenehan | 15c945c | 2007-01-22 20:40:41 -0800 | [diff] [blame] | 532 | if (wkalrm->enabled) { |
| 533 | rcr1 |= RCR1_AIE; |
| 534 | writeb(rcr1, rtc->regbase + RCR1); |
| 535 | } |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 536 | |
| 537 | spin_unlock_irq(&rtc->lock); |
| 538 | |
| 539 | return 0; |
| 540 | } |
| 541 | |
Bhumika Goyal | 8bc57e7 | 2017-01-05 22:25:05 +0530 | [diff] [blame] | 542 | static const struct rtc_class_ops sh_rtc_ops = { |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 543 | .read_time = sh_rtc_read_time, |
| 544 | .set_time = sh_rtc_set_time, |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 545 | .read_alarm = sh_rtc_read_alarm, |
| 546 | .set_alarm = sh_rtc_set_alarm, |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 547 | .proc = sh_rtc_proc, |
John Stultz | 16380c1 | 2011-02-02 17:02:41 -0800 | [diff] [blame] | 548 | .alarm_irq_enable = sh_rtc_alarm_irq_enable, |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 549 | }; |
| 550 | |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 551 | static int __init sh_rtc_probe(struct platform_device *pdev) |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 552 | { |
| 553 | struct sh_rtc *rtc; |
| 554 | struct resource *res; |
Magnus Damm | edf2247 | 2009-03-19 10:10:44 +0000 | [diff] [blame] | 555 | struct rtc_time r; |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 556 | char clk_name[6]; |
| 557 | int clk_id, ret; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 558 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 559 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 560 | if (unlikely(!rtc)) |
| 561 | return -ENOMEM; |
| 562 | |
| 563 | spin_lock_init(&rtc->lock); |
| 564 | |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 565 | /* get periodic/carry/alarm irqs */ |
roel kluin | 2641dc9 | 2008-09-10 19:34:44 +0200 | [diff] [blame] | 566 | ret = platform_get_irq(pdev, 0); |
Anton Vorontsov | 2fac667 | 2009-01-06 14:42:11 -0800 | [diff] [blame] | 567 | if (unlikely(ret <= 0)) { |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 568 | dev_err(&pdev->dev, "No IRQ resource\n"); |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 569 | return -ENOENT; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 570 | } |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 571 | |
roel kluin | 2641dc9 | 2008-09-10 19:34:44 +0200 | [diff] [blame] | 572 | rtc->periodic_irq = ret; |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 573 | rtc->carry_irq = platform_get_irq(pdev, 1); |
| 574 | rtc->alarm_irq = platform_get_irq(pdev, 2); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 575 | |
| 576 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 577 | if (!res) |
| 578 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 579 | if (unlikely(res == NULL)) { |
| 580 | dev_err(&pdev->dev, "No IO resource\n"); |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 581 | return -ENOENT; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 582 | } |
| 583 | |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 584 | rtc->regsize = resource_size(res); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 585 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 586 | rtc->res = devm_request_mem_region(&pdev->dev, res->start, |
| 587 | rtc->regsize, pdev->name); |
| 588 | if (unlikely(!rtc->res)) |
| 589 | return -EBUSY; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 590 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 591 | rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start, |
| 592 | rtc->regsize); |
| 593 | if (unlikely(!rtc->regbase)) |
| 594 | return -EINVAL; |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 595 | |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 596 | if (!pdev->dev.of_node) { |
| 597 | clk_id = pdev->id; |
| 598 | /* With a single device, the clock id is still "rtc0" */ |
| 599 | if (clk_id < 0) |
| 600 | clk_id = 0; |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 601 | |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 602 | snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); |
| 603 | } else |
| 604 | snprintf(clk_name, sizeof(clk_name), "fck"); |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 605 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 606 | rtc->clk = devm_clk_get(&pdev->dev, clk_name); |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 607 | if (IS_ERR(rtc->clk)) { |
| 608 | /* |
| 609 | * No error handling for rtc->clk intentionally, not all |
| 610 | * platforms will have a unique clock for the RTC, and |
| 611 | * the clk API can handle the struct clk pointer being |
| 612 | * NULL. |
| 613 | */ |
| 614 | rtc->clk = NULL; |
| 615 | } |
| 616 | |
| 617 | clk_enable(rtc->clk); |
| 618 | |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 619 | rtc->capabilities = RTC_DEF_CAPABILITIES; |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 620 | |
| 621 | #ifdef CONFIG_SUPERH |
Jingoo Han | e58c18d | 2013-11-12 15:10:52 -0800 | [diff] [blame] | 622 | if (dev_get_platdata(&pdev->dev)) { |
| 623 | struct sh_rtc_platform_info *pinfo = |
| 624 | dev_get_platdata(&pdev->dev); |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 625 | |
| 626 | /* |
| 627 | * Some CPUs have special capabilities in addition to the |
| 628 | * default set. Add those in here. |
| 629 | */ |
| 630 | rtc->capabilities |= pinfo->capabilities; |
| 631 | } |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 632 | #endif |
Paul Mundt | ad89f87 | 2007-08-03 14:19:58 +0900 | [diff] [blame] | 633 | |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 634 | if (rtc->carry_irq <= 0) { |
| 635 | /* register shared periodic/carry/alarm irq */ |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 636 | ret = devm_request_irq(&pdev->dev, rtc->periodic_irq, |
| 637 | sh_rtc_shared, 0, "sh-rtc", rtc); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 638 | if (unlikely(ret)) { |
| 639 | dev_err(&pdev->dev, |
| 640 | "request IRQ failed with %d, IRQ %d\n", ret, |
| 641 | rtc->periodic_irq); |
| 642 | goto err_unmap; |
| 643 | } |
| 644 | } else { |
| 645 | /* register periodic/carry/alarm irqs */ |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 646 | ret = devm_request_irq(&pdev->dev, rtc->periodic_irq, |
| 647 | sh_rtc_periodic, 0, "sh-rtc period", rtc); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 648 | if (unlikely(ret)) { |
| 649 | dev_err(&pdev->dev, |
| 650 | "request period IRQ failed with %d, IRQ %d\n", |
| 651 | ret, rtc->periodic_irq); |
| 652 | goto err_unmap; |
| 653 | } |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 654 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 655 | ret = devm_request_irq(&pdev->dev, rtc->carry_irq, |
| 656 | sh_rtc_interrupt, 0, "sh-rtc carry", rtc); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 657 | if (unlikely(ret)) { |
| 658 | dev_err(&pdev->dev, |
| 659 | "request carry IRQ failed with %d, IRQ %d\n", |
| 660 | ret, rtc->carry_irq); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 661 | goto err_unmap; |
| 662 | } |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 663 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 664 | ret = devm_request_irq(&pdev->dev, rtc->alarm_irq, |
| 665 | sh_rtc_alarm, 0, "sh-rtc alarm", rtc); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 666 | if (unlikely(ret)) { |
| 667 | dev_err(&pdev->dev, |
| 668 | "request alarm IRQ failed with %d, IRQ %d\n", |
| 669 | ret, rtc->alarm_irq); |
Magnus Damm | 5e084a1 | 2009-02-24 22:11:03 +0900 | [diff] [blame] | 670 | goto err_unmap; |
| 671 | } |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 672 | } |
| 673 | |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 674 | platform_set_drvdata(pdev, rtc); |
| 675 | |
Magnus Damm | 9cd88b9 | 2009-03-19 10:05:58 +0000 | [diff] [blame] | 676 | /* everything disabled by default */ |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 677 | sh_rtc_irq_set_freq(&pdev->dev, 0); |
| 678 | sh_rtc_irq_set_state(&pdev->dev, 0); |
Magnus Damm | 9cd88b9 | 2009-03-19 10:05:58 +0000 | [diff] [blame] | 679 | sh_rtc_setaie(&pdev->dev, 0); |
| 680 | sh_rtc_setcie(&pdev->dev, 0); |
Magnus Damm | edf2247 | 2009-03-19 10:10:44 +0000 | [diff] [blame] | 681 | |
Jingoo Han | 0209aff | 2013-07-03 15:07:11 -0700 | [diff] [blame] | 682 | rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "sh", |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 683 | &sh_rtc_ops, THIS_MODULE); |
| 684 | if (IS_ERR(rtc->rtc_dev)) { |
| 685 | ret = PTR_ERR(rtc->rtc_dev); |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 686 | goto err_unmap; |
| 687 | } |
| 688 | |
| 689 | rtc->rtc_dev->max_user_freq = 256; |
| 690 | |
Magnus Damm | edf2247 | 2009-03-19 10:10:44 +0000 | [diff] [blame] | 691 | /* reset rtc to epoch 0 if time is invalid */ |
| 692 | if (rtc_read_time(rtc->rtc_dev, &r) < 0) { |
| 693 | rtc_time_to_tm(0, &r); |
| 694 | rtc_set_time(rtc->rtc_dev, &r); |
| 695 | } |
| 696 | |
Magnus Damm | 7a8fe8e | 2009-03-19 10:14:41 +0000 | [diff] [blame] | 697 | device_init_wakeup(&pdev->dev, 1); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 698 | return 0; |
| 699 | |
Paul Mundt | 0305794 | 2008-04-25 17:58:42 +0900 | [diff] [blame] | 700 | err_unmap: |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 701 | clk_disable(rtc->clk); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 702 | |
| 703 | return ret; |
| 704 | } |
| 705 | |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 706 | static int __exit sh_rtc_remove(struct platform_device *pdev) |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 707 | { |
| 708 | struct sh_rtc *rtc = platform_get_drvdata(pdev); |
| 709 | |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 710 | sh_rtc_irq_set_state(&pdev->dev, 0); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 711 | |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 712 | sh_rtc_setaie(&pdev->dev, 0); |
Magnus Damm | 9cd88b9 | 2009-03-19 10:05:58 +0000 | [diff] [blame] | 713 | sh_rtc_setcie(&pdev->dev, 0); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 714 | |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 715 | clk_disable(rtc->clk); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 716 | |
| 717 | return 0; |
| 718 | } |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 719 | |
| 720 | static void sh_rtc_set_irq_wake(struct device *dev, int enabled) |
| 721 | { |
Wolfram Sang | 85368bb | 2018-04-19 16:06:14 +0200 | [diff] [blame] | 722 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 723 | |
Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 724 | irq_set_irq_wake(rtc->periodic_irq, enabled); |
Paul Mundt | 063adc7 | 2009-04-16 14:12:22 +0900 | [diff] [blame] | 725 | |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 726 | if (rtc->carry_irq > 0) { |
Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 727 | irq_set_irq_wake(rtc->carry_irq, enabled); |
| 728 | irq_set_irq_wake(rtc->alarm_irq, enabled); |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 729 | } |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 730 | } |
| 731 | |
Arnd Bergmann | 5d05e81 | 2017-04-19 19:52:43 +0200 | [diff] [blame] | 732 | static int __maybe_unused sh_rtc_suspend(struct device *dev) |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 733 | { |
| 734 | if (device_may_wakeup(dev)) |
| 735 | sh_rtc_set_irq_wake(dev, 1); |
| 736 | |
| 737 | return 0; |
| 738 | } |
| 739 | |
Arnd Bergmann | 5d05e81 | 2017-04-19 19:52:43 +0200 | [diff] [blame] | 740 | static int __maybe_unused sh_rtc_resume(struct device *dev) |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 741 | { |
| 742 | if (device_may_wakeup(dev)) |
| 743 | sh_rtc_set_irq_wake(dev, 0); |
| 744 | |
| 745 | return 0; |
| 746 | } |
| 747 | |
Jingoo Han | 0ed5054 | 2013-04-29 16:20:00 -0700 | [diff] [blame] | 748 | static SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume); |
Magnus Damm | faa9fa8 | 2009-04-01 14:45:17 +0000 | [diff] [blame] | 749 | |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 750 | static const struct of_device_id sh_rtc_of_match[] = { |
| 751 | { .compatible = "renesas,sh-rtc", }, |
| 752 | { /* sentinel */ } |
| 753 | }; |
| 754 | MODULE_DEVICE_TABLE(of, sh_rtc_of_match); |
| 755 | |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 756 | static struct platform_driver sh_rtc_platform_driver = { |
| 757 | .driver = { |
Jamie Lenehan | 1b73e6a | 2006-12-08 15:26:15 +0900 | [diff] [blame] | 758 | .name = DRV_NAME, |
Jingoo Han | 0ed5054 | 2013-04-29 16:20:00 -0700 | [diff] [blame] | 759 | .pm = &sh_rtc_pm_ops, |
Chris Brandt | dab5aec | 2017-03-29 10:30:29 -0700 | [diff] [blame] | 760 | .of_match_table = sh_rtc_of_match, |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 761 | }, |
Alessandro Zummo | 5c9740a | 2009-08-20 13:25:11 +0900 | [diff] [blame] | 762 | .remove = __exit_p(sh_rtc_remove), |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 763 | }; |
| 764 | |
Jingoo Han | deed5a9 | 2013-04-29 16:18:51 -0700 | [diff] [blame] | 765 | module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 766 | |
| 767 | MODULE_DESCRIPTION("SuperH on-chip RTC driver"); |
Angelo Castello | b420b1a | 2008-03-06 12:50:53 +0900 | [diff] [blame] | 768 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, " |
| 769 | "Jamie Lenehan <lenehan@twibble.org>, " |
| 770 | "Angelo Castello <angelo.castello@st.com>"); |
Paul Mundt | 317a610 | 2006-09-27 17:13:19 +0900 | [diff] [blame] | 771 | MODULE_LICENSE("GPL"); |
Kay Sievers | ad28a07 | 2008-04-10 21:29:25 -0700 | [diff] [blame] | 772 | MODULE_ALIAS("platform:" DRV_NAME); |