blob: 9ee381e4d2a4d02f5fc2956233f0c3d6964c1674 [file] [log] [blame]
Thomas Gleixner3e456102019-06-01 10:08:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Matthew Wilcox64ac24e2008-03-07 21:55:58 -05002/*
3 * Copyright (c) 2008 Intel Corporation
4 * Author: Matthew Wilcox <willy@linux.intel.com>
5 *
Matthew Wilcox714493c2008-04-11 15:23:52 -04006 * This file implements counting semaphores.
7 * A counting semaphore may be acquired 'n' times before sleeping.
8 * See mutex.c for single-acquisition sleeping locks which enforce
9 * rules which allow code to be debugged more easily.
10 */
11
12/*
13 * Some notes on the implementation:
14 *
15 * The spinlock controls access to the other members of the semaphore.
16 * down_trylock() and up() can be called from interrupt context, so we
17 * have to disable interrupts when taking the lock. It turns out various
18 * parts of the kernel expect to be able to use down() on a semaphore in
19 * interrupt context when they know it will succeed, so we have to use
20 * irqsave variants for down(), down_interruptible() and down_killable()
21 * too.
22 *
23 * The ->count variable represents how many more tasks can acquire this
24 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050025 */
26
27#include <linux/compiler.h>
28#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040029#include <linux/export.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050030#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010031#include <linux/sched/debug.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050032#include <linux/semaphore.h>
33#include <linux/spinlock.h>
Ingo Molnar74f4e362008-05-12 21:21:15 +020034#include <linux/ftrace.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050035
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050036static noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040038static noinline int __down_killable(struct semaphore *sem);
Mark Rustad31542762014-09-03 03:17:24 -070039static noinline int __down_timeout(struct semaphore *sem, long timeout);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050040static noinline void __up(struct semaphore *sem);
41
Matthew Wilcox714493c2008-04-11 15:23:52 -040042/**
43 * down - acquire the semaphore
44 * @sem: the semaphore to be acquired
45 *
46 * Acquires the semaphore. If no more tasks are allowed to acquire the
47 * semaphore, calling this function will put the task to sleep until the
48 * semaphore is released.
49 *
50 * Use of this function is deprecated, please use down_interruptible() or
51 * down_killable() instead.
52 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050053void down(struct semaphore *sem)
54{
55 unsigned long flags;
56
Xiaoming Ni99409b92021-08-09 10:12:15 +080057 might_sleep();
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010058 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -070059 if (likely(sem->count > 0))
60 sem->count--;
61 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050062 __down(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010063 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050064}
65EXPORT_SYMBOL(down);
66
Matthew Wilcox714493c2008-04-11 15:23:52 -040067/**
68 * down_interruptible - acquire the semaphore unless interrupted
69 * @sem: the semaphore to be acquired
70 *
71 * Attempts to acquire the semaphore. If no more tasks are allowed to
72 * acquire the semaphore, calling this function will put the task to sleep.
73 * If the sleep is interrupted by a signal, this function will return -EINTR.
74 * If the semaphore is successfully acquired, this function returns 0.
75 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050076int down_interruptible(struct semaphore *sem)
77{
78 unsigned long flags;
79 int result = 0;
80
Xiaoming Ni99409b92021-08-09 10:12:15 +080081 might_sleep();
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010082 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -070083 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +020084 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -070085 else
86 result = __down_interruptible(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010087 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050088
89 return result;
90}
91EXPORT_SYMBOL(down_interruptible);
92
Matthew Wilcox714493c2008-04-11 15:23:52 -040093/**
94 * down_killable - acquire the semaphore unless killed
95 * @sem: the semaphore to be acquired
96 *
97 * Attempts to acquire the semaphore. If no more tasks are allowed to
98 * acquire the semaphore, calling this function will put the task to sleep.
99 * If the sleep is interrupted by a fatal signal, this function will return
100 * -EINTR. If the semaphore is successfully acquired, this function returns
101 * 0.
102 */
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400103int down_killable(struct semaphore *sem)
104{
105 unsigned long flags;
106 int result = 0;
107
Xiaoming Ni99409b92021-08-09 10:12:15 +0800108 might_sleep();
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100109 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700110 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200111 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700112 else
113 result = __down_killable(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100114 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400115
116 return result;
117}
118EXPORT_SYMBOL(down_killable);
119
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500120/**
121 * down_trylock - try to acquire the semaphore, without waiting
122 * @sem: the semaphore to be acquired
123 *
Lucia Rosculetef7232f02012-03-03 16:18:47 +0200124 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
Randy Dunlapc034f482021-02-25 17:21:10 -0800125 * been acquired successfully or 1 if it cannot be acquired.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500126 *
127 * NOTE: This return value is inverted from both spin_trylock and
128 * mutex_trylock! Be careful about this when converting code.
129 *
130 * Unlike mutex_trylock, this function can be used from interrupt context,
131 * and the semaphore can be released by any task or interrupt.
132 */
133int down_trylock(struct semaphore *sem)
134{
135 unsigned long flags;
136 int count;
137
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100138 raw_spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500139 count = sem->count - 1;
140 if (likely(count >= 0))
141 sem->count = count;
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100142 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500143
144 return (count < 0);
145}
146EXPORT_SYMBOL(down_trylock);
147
Matthew Wilcox714493c2008-04-11 15:23:52 -0400148/**
149 * down_timeout - acquire the semaphore within a specified time
150 * @sem: the semaphore to be acquired
Mark Rustad31542762014-09-03 03:17:24 -0700151 * @timeout: how long to wait before failing
Matthew Wilcox714493c2008-04-11 15:23:52 -0400152 *
153 * Attempts to acquire the semaphore. If no more tasks are allowed to
154 * acquire the semaphore, calling this function will put the task to sleep.
155 * If the semaphore is not released within the specified number of jiffies,
156 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
157 */
Mark Rustad31542762014-09-03 03:17:24 -0700158int down_timeout(struct semaphore *sem, long timeout)
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400159{
160 unsigned long flags;
161 int result = 0;
162
Xiaoming Ni99409b92021-08-09 10:12:15 +0800163 might_sleep();
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100164 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700165 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200166 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700167 else
Mark Rustad31542762014-09-03 03:17:24 -0700168 result = __down_timeout(sem, timeout);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100169 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400170
171 return result;
172}
173EXPORT_SYMBOL(down_timeout);
174
Matthew Wilcox714493c2008-04-11 15:23:52 -0400175/**
176 * up - release the semaphore
177 * @sem: the semaphore to release
178 *
179 * Release the semaphore. Unlike mutexes, up() may be called from any
180 * context and even by tasks which have never called down().
181 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500182void up(struct semaphore *sem)
183{
184 unsigned long flags;
185
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100186 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700187 if (likely(list_empty(&sem->wait_list)))
188 sem->count++;
189 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500190 __up(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100191 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500192}
193EXPORT_SYMBOL(up);
194
195/* Functions for the contended case */
196
197struct semaphore_waiter {
198 struct list_head list;
199 struct task_struct *task;
liguang06a6ea32013-04-30 15:28:33 -0700200 bool up;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500201};
202
203/*
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400204 * Because this function is inlined, the 'state' parameter will be
205 * constant, and thus optimised away by the compiler. Likewise the
206 * 'timeout' parameter for the cases without timeouts.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500207 */
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400208static inline int __sched __down_common(struct semaphore *sem, long state,
209 long timeout)
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500210{
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500211 struct semaphore_waiter waiter;
212
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200213 list_add_tail(&waiter.list, &sem->wait_list);
Davidlohr Buesod269a8b2017-01-03 13:43:13 -0800214 waiter.task = current;
liguang06a6ea32013-04-30 15:28:33 -0700215 waiter.up = false;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500216
217 for (;;) {
Davidlohr Buesod269a8b2017-01-03 13:43:13 -0800218 if (signal_pending_state(state, current))
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700219 goto interrupted;
liguangc74f66c2013-04-30 15:28:32 -0700220 if (unlikely(timeout <= 0))
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700221 goto timed_out;
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800222 __set_current_state(state);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100223 raw_spin_unlock_irq(&sem->lock);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400224 timeout = schedule_timeout(timeout);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100225 raw_spin_lock_irq(&sem->lock);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700226 if (waiter.up)
227 return 0;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500228 }
229
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700230 timed_out:
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400231 list_del(&waiter.list);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700232 return -ETIME;
233
234 interrupted:
235 list_del(&waiter.list);
236 return -EINTR;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500237}
238
239static noinline void __sched __down(struct semaphore *sem)
240{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400241 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500242}
243
244static noinline int __sched __down_interruptible(struct semaphore *sem)
245{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400246 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500247}
248
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400249static noinline int __sched __down_killable(struct semaphore *sem)
250{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400251 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
252}
253
Mark Rustad31542762014-09-03 03:17:24 -0700254static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400255{
Mark Rustad31542762014-09-03 03:17:24 -0700256 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400257}
258
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500259static noinline void __sched __up(struct semaphore *sem)
260{
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400261 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
262 struct semaphore_waiter, list);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700263 list_del(&waiter->list);
liguang06a6ea32013-04-30 15:28:33 -0700264 waiter->up = true;
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400265 wake_up_process(waiter->task);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500266}