blob: 9aa855a96c4ae2e1149baafa56f1d0d6e1c8717e [file] [log] [blame]
Thomas Gleixner3e456102019-06-01 10:08:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Matthew Wilcox64ac24e2008-03-07 21:55:58 -05002/*
3 * Copyright (c) 2008 Intel Corporation
4 * Author: Matthew Wilcox <willy@linux.intel.com>
5 *
Matthew Wilcox714493c2008-04-11 15:23:52 -04006 * This file implements counting semaphores.
7 * A counting semaphore may be acquired 'n' times before sleeping.
8 * See mutex.c for single-acquisition sleeping locks which enforce
9 * rules which allow code to be debugged more easily.
10 */
11
12/*
13 * Some notes on the implementation:
14 *
15 * The spinlock controls access to the other members of the semaphore.
16 * down_trylock() and up() can be called from interrupt context, so we
17 * have to disable interrupts when taking the lock. It turns out various
18 * parts of the kernel expect to be able to use down() on a semaphore in
19 * interrupt context when they know it will succeed, so we have to use
20 * irqsave variants for down(), down_interruptible() and down_killable()
21 * too.
22 *
23 * The ->count variable represents how many more tasks can acquire this
24 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050025 */
26
27#include <linux/compiler.h>
28#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040029#include <linux/export.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050030#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010031#include <linux/sched/debug.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050032#include <linux/semaphore.h>
33#include <linux/spinlock.h>
Ingo Molnar74f4e362008-05-12 21:21:15 +020034#include <linux/ftrace.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050035
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050036static noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040038static noinline int __down_killable(struct semaphore *sem);
Mark Rustad31542762014-09-03 03:17:24 -070039static noinline int __down_timeout(struct semaphore *sem, long timeout);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050040static noinline void __up(struct semaphore *sem);
41
Matthew Wilcox714493c2008-04-11 15:23:52 -040042/**
43 * down - acquire the semaphore
44 * @sem: the semaphore to be acquired
45 *
46 * Acquires the semaphore. If no more tasks are allowed to acquire the
47 * semaphore, calling this function will put the task to sleep until the
48 * semaphore is released.
49 *
50 * Use of this function is deprecated, please use down_interruptible() or
51 * down_killable() instead.
52 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050053void down(struct semaphore *sem)
54{
55 unsigned long flags;
56
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010057 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -070058 if (likely(sem->count > 0))
59 sem->count--;
60 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050061 __down(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010062 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050063}
64EXPORT_SYMBOL(down);
65
Matthew Wilcox714493c2008-04-11 15:23:52 -040066/**
67 * down_interruptible - acquire the semaphore unless interrupted
68 * @sem: the semaphore to be acquired
69 *
70 * Attempts to acquire the semaphore. If no more tasks are allowed to
71 * acquire the semaphore, calling this function will put the task to sleep.
72 * If the sleep is interrupted by a signal, this function will return -EINTR.
73 * If the semaphore is successfully acquired, this function returns 0.
74 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050075int down_interruptible(struct semaphore *sem)
76{
77 unsigned long flags;
78 int result = 0;
79
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010080 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -070081 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +020082 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -070083 else
84 result = __down_interruptible(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010085 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050086
87 return result;
88}
89EXPORT_SYMBOL(down_interruptible);
90
Matthew Wilcox714493c2008-04-11 15:23:52 -040091/**
92 * down_killable - acquire the semaphore unless killed
93 * @sem: the semaphore to be acquired
94 *
95 * Attempts to acquire the semaphore. If no more tasks are allowed to
96 * acquire the semaphore, calling this function will put the task to sleep.
97 * If the sleep is interrupted by a fatal signal, this function will return
98 * -EINTR. If the semaphore is successfully acquired, this function returns
99 * 0.
100 */
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400101int down_killable(struct semaphore *sem)
102{
103 unsigned long flags;
104 int result = 0;
105
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100106 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700107 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200108 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700109 else
110 result = __down_killable(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100111 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400112
113 return result;
114}
115EXPORT_SYMBOL(down_killable);
116
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500117/**
118 * down_trylock - try to acquire the semaphore, without waiting
119 * @sem: the semaphore to be acquired
120 *
Lucia Rosculetef7232f02012-03-03 16:18:47 +0200121 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
Randy Dunlapc034f482021-02-25 17:21:10 -0800122 * been acquired successfully or 1 if it cannot be acquired.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500123 *
124 * NOTE: This return value is inverted from both spin_trylock and
125 * mutex_trylock! Be careful about this when converting code.
126 *
127 * Unlike mutex_trylock, this function can be used from interrupt context,
128 * and the semaphore can be released by any task or interrupt.
129 */
130int down_trylock(struct semaphore *sem)
131{
132 unsigned long flags;
133 int count;
134
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100135 raw_spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500136 count = sem->count - 1;
137 if (likely(count >= 0))
138 sem->count = count;
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100139 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500140
141 return (count < 0);
142}
143EXPORT_SYMBOL(down_trylock);
144
Matthew Wilcox714493c2008-04-11 15:23:52 -0400145/**
146 * down_timeout - acquire the semaphore within a specified time
147 * @sem: the semaphore to be acquired
Mark Rustad31542762014-09-03 03:17:24 -0700148 * @timeout: how long to wait before failing
Matthew Wilcox714493c2008-04-11 15:23:52 -0400149 *
150 * Attempts to acquire the semaphore. If no more tasks are allowed to
151 * acquire the semaphore, calling this function will put the task to sleep.
152 * If the semaphore is not released within the specified number of jiffies,
153 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
154 */
Mark Rustad31542762014-09-03 03:17:24 -0700155int down_timeout(struct semaphore *sem, long timeout)
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400156{
157 unsigned long flags;
158 int result = 0;
159
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100160 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700161 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200162 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700163 else
Mark Rustad31542762014-09-03 03:17:24 -0700164 result = __down_timeout(sem, timeout);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100165 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400166
167 return result;
168}
169EXPORT_SYMBOL(down_timeout);
170
Matthew Wilcox714493c2008-04-11 15:23:52 -0400171/**
172 * up - release the semaphore
173 * @sem: the semaphore to release
174 *
175 * Release the semaphore. Unlike mutexes, up() may be called from any
176 * context and even by tasks which have never called down().
177 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500178void up(struct semaphore *sem)
179{
180 unsigned long flags;
181
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100182 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700183 if (likely(list_empty(&sem->wait_list)))
184 sem->count++;
185 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500186 __up(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100187 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500188}
189EXPORT_SYMBOL(up);
190
191/* Functions for the contended case */
192
193struct semaphore_waiter {
194 struct list_head list;
195 struct task_struct *task;
liguang06a6ea32013-04-30 15:28:33 -0700196 bool up;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500197};
198
199/*
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400200 * Because this function is inlined, the 'state' parameter will be
201 * constant, and thus optimised away by the compiler. Likewise the
202 * 'timeout' parameter for the cases without timeouts.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500203 */
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400204static inline int __sched __down_common(struct semaphore *sem, long state,
205 long timeout)
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500206{
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500207 struct semaphore_waiter waiter;
208
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200209 list_add_tail(&waiter.list, &sem->wait_list);
Davidlohr Buesod269a8b2017-01-03 13:43:13 -0800210 waiter.task = current;
liguang06a6ea32013-04-30 15:28:33 -0700211 waiter.up = false;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500212
213 for (;;) {
Davidlohr Buesod269a8b2017-01-03 13:43:13 -0800214 if (signal_pending_state(state, current))
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700215 goto interrupted;
liguangc74f66c2013-04-30 15:28:32 -0700216 if (unlikely(timeout <= 0))
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700217 goto timed_out;
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800218 __set_current_state(state);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100219 raw_spin_unlock_irq(&sem->lock);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400220 timeout = schedule_timeout(timeout);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100221 raw_spin_lock_irq(&sem->lock);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700222 if (waiter.up)
223 return 0;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500224 }
225
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700226 timed_out:
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400227 list_del(&waiter.list);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700228 return -ETIME;
229
230 interrupted:
231 list_del(&waiter.list);
232 return -EINTR;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500233}
234
235static noinline void __sched __down(struct semaphore *sem)
236{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400237 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500238}
239
240static noinline int __sched __down_interruptible(struct semaphore *sem)
241{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400242 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500243}
244
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400245static noinline int __sched __down_killable(struct semaphore *sem)
246{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400247 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
248}
249
Mark Rustad31542762014-09-03 03:17:24 -0700250static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400251{
Mark Rustad31542762014-09-03 03:17:24 -0700252 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400253}
254
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500255static noinline void __sched __up(struct semaphore *sem)
256{
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400257 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
258 struct semaphore_waiter, list);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700259 list_del(&waiter->list);
liguang06a6ea32013-04-30 15:28:33 -0700260 waiter->up = true;
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400261 wake_up_process(waiter->task);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500262}