blob: 561acdd399605b881cae51d618b249cfafa1fb3b [file] [log] [blame]
Matthew Wilcox64ac24e2008-03-07 21:55:58 -05001/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
Matthew Wilcox714493c2008-04-11 15:23:52 -04006 *
7 * This file implements counting semaphores.
8 * A counting semaphore may be acquired 'n' times before sleeping.
9 * See mutex.c for single-acquisition sleeping locks which enforce
10 * rules which allow code to be debugged more easily.
11 */
12
13/*
14 * Some notes on the implementation:
15 *
16 * The spinlock controls access to the other members of the semaphore.
17 * down_trylock() and up() can be called from interrupt context, so we
18 * have to disable interrupts when taking the lock. It turns out various
19 * parts of the kernel expect to be able to use down() on a semaphore in
20 * interrupt context when they know it will succeed, so we have to use
21 * irqsave variants for down(), down_interruptible() and down_killable()
22 * too.
23 *
24 * The ->count variable represents how many more tasks can acquire this
25 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050026 */
27
28#include <linux/compiler.h>
29#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040030#include <linux/export.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050031#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010032#include <linux/sched/debug.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050033#include <linux/semaphore.h>
34#include <linux/spinlock.h>
Ingo Molnar74f4e362008-05-12 21:21:15 +020035#include <linux/ftrace.h>
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050036
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050037static noinline void __down(struct semaphore *sem);
38static noinline int __down_interruptible(struct semaphore *sem);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -040039static noinline int __down_killable(struct semaphore *sem);
Mark Rustad31542762014-09-03 03:17:24 -070040static noinline int __down_timeout(struct semaphore *sem, long timeout);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050041static noinline void __up(struct semaphore *sem);
42
Matthew Wilcox714493c2008-04-11 15:23:52 -040043/**
44 * down - acquire the semaphore
45 * @sem: the semaphore to be acquired
46 *
47 * Acquires the semaphore. If no more tasks are allowed to acquire the
48 * semaphore, calling this function will put the task to sleep until the
49 * semaphore is released.
50 *
51 * Use of this function is deprecated, please use down_interruptible() or
52 * down_killable() instead.
53 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050054void down(struct semaphore *sem)
55{
56 unsigned long flags;
57
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010058 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -070059 if (likely(sem->count > 0))
60 sem->count--;
61 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050062 __down(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010063 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050064}
65EXPORT_SYMBOL(down);
66
Matthew Wilcox714493c2008-04-11 15:23:52 -040067/**
68 * down_interruptible - acquire the semaphore unless interrupted
69 * @sem: the semaphore to be acquired
70 *
71 * Attempts to acquire the semaphore. If no more tasks are allowed to
72 * acquire the semaphore, calling this function will put the task to sleep.
73 * If the sleep is interrupted by a signal, this function will return -EINTR.
74 * If the semaphore is successfully acquired, this function returns 0.
75 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050076int down_interruptible(struct semaphore *sem)
77{
78 unsigned long flags;
79 int result = 0;
80
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010081 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -070082 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +020083 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -070084 else
85 result = __down_interruptible(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +010086 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -050087
88 return result;
89}
90EXPORT_SYMBOL(down_interruptible);
91
Matthew Wilcox714493c2008-04-11 15:23:52 -040092/**
93 * down_killable - acquire the semaphore unless killed
94 * @sem: the semaphore to be acquired
95 *
96 * Attempts to acquire the semaphore. If no more tasks are allowed to
97 * acquire the semaphore, calling this function will put the task to sleep.
98 * If the sleep is interrupted by a fatal signal, this function will return
99 * -EINTR. If the semaphore is successfully acquired, this function returns
100 * 0.
101 */
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400102int down_killable(struct semaphore *sem)
103{
104 unsigned long flags;
105 int result = 0;
106
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100107 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700108 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200109 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700110 else
111 result = __down_killable(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100112 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400113
114 return result;
115}
116EXPORT_SYMBOL(down_killable);
117
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500118/**
119 * down_trylock - try to acquire the semaphore, without waiting
120 * @sem: the semaphore to be acquired
121 *
Lucia Rosculetef7232f02012-03-03 16:18:47 +0200122 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
Matthew Wilcox714493c2008-04-11 15:23:52 -0400123 * been acquired successfully or 1 if it it cannot be acquired.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500124 *
125 * NOTE: This return value is inverted from both spin_trylock and
126 * mutex_trylock! Be careful about this when converting code.
127 *
128 * Unlike mutex_trylock, this function can be used from interrupt context,
129 * and the semaphore can be released by any task or interrupt.
130 */
131int down_trylock(struct semaphore *sem)
132{
133 unsigned long flags;
134 int count;
135
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100136 raw_spin_lock_irqsave(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500137 count = sem->count - 1;
138 if (likely(count >= 0))
139 sem->count = count;
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100140 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500141
142 return (count < 0);
143}
144EXPORT_SYMBOL(down_trylock);
145
Matthew Wilcox714493c2008-04-11 15:23:52 -0400146/**
147 * down_timeout - acquire the semaphore within a specified time
148 * @sem: the semaphore to be acquired
Mark Rustad31542762014-09-03 03:17:24 -0700149 * @timeout: how long to wait before failing
Matthew Wilcox714493c2008-04-11 15:23:52 -0400150 *
151 * Attempts to acquire the semaphore. If no more tasks are allowed to
152 * acquire the semaphore, calling this function will put the task to sleep.
153 * If the semaphore is not released within the specified number of jiffies,
154 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
155 */
Mark Rustad31542762014-09-03 03:17:24 -0700156int down_timeout(struct semaphore *sem, long timeout)
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400157{
158 unsigned long flags;
159 int result = 0;
160
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100161 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700162 if (likely(sem->count > 0))
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200163 sem->count--;
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700164 else
Mark Rustad31542762014-09-03 03:17:24 -0700165 result = __down_timeout(sem, timeout);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100166 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400167
168 return result;
169}
170EXPORT_SYMBOL(down_timeout);
171
Matthew Wilcox714493c2008-04-11 15:23:52 -0400172/**
173 * up - release the semaphore
174 * @sem: the semaphore to release
175 *
176 * Release the semaphore. Unlike mutexes, up() may be called from any
177 * context and even by tasks which have never called down().
178 */
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500179void up(struct semaphore *sem)
180{
181 unsigned long flags;
182
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100183 raw_spin_lock_irqsave(&sem->lock, flags);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700184 if (likely(list_empty(&sem->wait_list)))
185 sem->count++;
186 else
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500187 __up(sem);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100188 raw_spin_unlock_irqrestore(&sem->lock, flags);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500189}
190EXPORT_SYMBOL(up);
191
192/* Functions for the contended case */
193
194struct semaphore_waiter {
195 struct list_head list;
196 struct task_struct *task;
liguang06a6ea32013-04-30 15:28:33 -0700197 bool up;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500198};
199
200/*
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400201 * Because this function is inlined, the 'state' parameter will be
202 * constant, and thus optimised away by the compiler. Likewise the
203 * 'timeout' parameter for the cases without timeouts.
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500204 */
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400205static inline int __sched __down_common(struct semaphore *sem, long state,
206 long timeout)
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500207{
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500208 struct semaphore_waiter waiter;
209
Ingo Molnarbf726ea2008-05-08 11:53:48 +0200210 list_add_tail(&waiter.list, &sem->wait_list);
Davidlohr Buesod269a8b2017-01-03 13:43:13 -0800211 waiter.task = current;
liguang06a6ea32013-04-30 15:28:33 -0700212 waiter.up = false;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500213
214 for (;;) {
Davidlohr Buesod269a8b2017-01-03 13:43:13 -0800215 if (signal_pending_state(state, current))
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700216 goto interrupted;
liguangc74f66c2013-04-30 15:28:32 -0700217 if (unlikely(timeout <= 0))
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700218 goto timed_out;
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800219 __set_current_state(state);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100220 raw_spin_unlock_irq(&sem->lock);
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400221 timeout = schedule_timeout(timeout);
Thomas Gleixner8292c9e12010-02-24 09:50:22 +0100222 raw_spin_lock_irq(&sem->lock);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700223 if (waiter.up)
224 return 0;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500225 }
226
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700227 timed_out:
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400228 list_del(&waiter.list);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700229 return -ETIME;
230
231 interrupted:
232 list_del(&waiter.list);
233 return -EINTR;
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500234}
235
236static noinline void __sched __down(struct semaphore *sem)
237{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500239}
240
241static noinline int __sched __down_interruptible(struct semaphore *sem)
242{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400243 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500244}
245
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400246static noinline int __sched __down_killable(struct semaphore *sem)
247{
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400248 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
249}
250
Mark Rustad31542762014-09-03 03:17:24 -0700251static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
Matthew Wilcoxf1241c82008-03-14 13:43:13 -0400252{
Mark Rustad31542762014-09-03 03:17:24 -0700253 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
Matthew Wilcoxf06d9682008-03-14 13:19:33 -0400254}
255
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500256static noinline void __sched __up(struct semaphore *sem)
257{
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list);
Linus Torvalds00b41ec2008-05-10 20:43:22 -0700260 list_del(&waiter->list);
liguang06a6ea32013-04-30 15:28:33 -0700261 waiter->up = true;
Matthew Wilcoxb17170b2008-03-14 14:35:22 -0400262 wake_up_process(waiter->task);
Matthew Wilcox64ac24e2008-03-07 21:55:58 -0500263}