Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
e48b3deee4
Коммит
64ac24e738
|
@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS)
|
||||||
EXTRA_CFLAGS := -Werror -Wno-sign-compare
|
EXTRA_CFLAGS := -Werror -Wno-sign-compare
|
||||||
|
|
||||||
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
|
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
|
||||||
irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \
|
irq_alpha.o signal.o setup.o ptrace.o time.o \
|
||||||
alpha_ksyms.o systbls.o err_common.o io.o
|
alpha_ksyms.o systbls.o err_common.o io.o
|
||||||
|
|
||||||
obj-$(CONFIG_VGA_HOSE) += console.o
|
obj-$(CONFIG_VGA_HOSE) += console.o
|
||||||
|
|
|
@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user);
|
||||||
EXPORT_SYMBOL(__strncpy_from_user);
|
EXPORT_SYMBOL(__strncpy_from_user);
|
||||||
EXPORT_SYMBOL(__strnlen_user);
|
EXPORT_SYMBOL(__strnlen_user);
|
||||||
|
|
||||||
/* Semaphore helper functions. */
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
EXPORT_SYMBOL(down);
|
|
||||||
EXPORT_SYMBOL(down_interruptible);
|
|
||||||
EXPORT_SYMBOL(down_trylock);
|
|
||||||
EXPORT_SYMBOL(up);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SMP-specific symbols.
|
* SMP-specific symbols.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,224 +0,0 @@
|
||||||
/*
|
|
||||||
* Alpha semaphore implementation.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
* (C) Copyright 1999, 2000 Richard Henderson
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is basically the PPC semaphore scheme ported to use
|
|
||||||
* the Alpha ll/sc sequences, so see the PPC code for
|
|
||||||
* credits.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically update sem->count.
|
|
||||||
* This does the equivalent of the following:
|
|
||||||
*
|
|
||||||
* old_count = sem->count;
|
|
||||||
* tmp = MAX(old_count, 0) + incr;
|
|
||||||
* sem->count = tmp;
|
|
||||||
* return old_count;
|
|
||||||
*/
|
|
||||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
|
||||||
{
|
|
||||||
long old_count, tmp = 0;
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"1: ldl_l %0,%2\n"
|
|
||||||
" cmovgt %0,%0,%1\n"
|
|
||||||
" addl %1,%3,%1\n"
|
|
||||||
" stl_c %1,%2\n"
|
|
||||||
" beq %1,2f\n"
|
|
||||||
" mb\n"
|
|
||||||
".subsection 2\n"
|
|
||||||
"2: br 1b\n"
|
|
||||||
".previous"
|
|
||||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
|
||||||
: "Ir" (incr), "1" (tmp), "m" (sem->count));
|
|
||||||
|
|
||||||
return old_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*/
|
|
||||||
|
|
||||||
void __sched
|
|
||||||
__down_failed(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down failed(%p)\n",
|
|
||||||
tsk->comm, task_pid_nr(tsk), sem);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
wmb();
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to get the semaphore. If the count is > 0, then we've
|
|
||||||
* got the semaphore; we decrement count and exit the loop.
|
|
||||||
* If the count is 0 or negative, we set it to -1, indicating
|
|
||||||
* that we are asleep, and then sleep.
|
|
||||||
*/
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there are any more sleepers, wake one of them up so
|
|
||||||
* that it can either get the semaphore, or set count to -1
|
|
||||||
* indicating that there are still processes sleeping.
|
|
||||||
*/
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down acquired(%p)\n",
|
|
||||||
tsk->comm, task_pid_nr(tsk), sem);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched
|
|
||||||
__down_failed_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
long ret = 0;
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down failed(%p)\n",
|
|
||||||
tsk->comm, task_pid_nr(tsk), sem);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
wmb();
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
/*
|
|
||||||
* A signal is pending - give up trying.
|
|
||||||
* Set sem->count to 0 if it is negative,
|
|
||||||
* since we are no longer sleeping.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 0);
|
|
||||||
ret = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down %s(%p)\n",
|
|
||||||
current->comm, task_pid_nr(current),
|
|
||||||
(ret < 0 ? "interrupted" : "acquired"), sem);
|
|
||||||
#endif
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
__up_wakeup(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Note that we incremented count in up() before we came here,
|
|
||||||
* but that was ineffective since the result was <= 0, and
|
|
||||||
* any negative value of count is equivalent to 0.
|
|
||||||
* This ends up setting count to 1, unless count is now > 0
|
|
||||||
* (i.e. because some other cpu has called up() in the meantime),
|
|
||||||
* in which case we just increment count.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 1);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sched
|
|
||||||
down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
#ifdef WAITQUEUE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down(%p) <count=%d> from %p\n",
|
|
||||||
current->comm, task_pid_nr(current), sem,
|
|
||||||
atomic_read(&sem->count), __builtin_return_address(0));
|
|
||||||
#endif
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched
|
|
||||||
down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
#ifdef WAITQUEUE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down(%p) <count=%d> from %p\n",
|
|
||||||
current->comm, task_pid_nr(current), sem,
|
|
||||||
atomic_read(&sem->count), __builtin_return_address(0));
|
|
||||||
#endif
|
|
||||||
return __down_interruptible(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
#ifdef WAITQUEUE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = __down_trylock(sem);
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): down_trylock %s from %p\n",
|
|
||||||
current->comm, task_pid_nr(current),
|
|
||||||
ret ? "failed" : "acquired",
|
|
||||||
__builtin_return_address(0));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
#ifdef WAITQUEUE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
printk("%s(%d): up(%p) <count=%d> from %p\n",
|
|
||||||
current->comm, task_pid_nr(current), sem,
|
|
||||||
atomic_read(&sem->count), __builtin_return_address(0));
|
|
||||||
#endif
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
|
@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||||
# Object file lists.
|
# Object file lists.
|
||||||
|
|
||||||
obj-y := compat.o entry-armv.o entry-common.o irq.o \
|
obj-y := compat.o entry-armv.o entry-common.o irq.o \
|
||||||
process.o ptrace.o semaphore.o setup.o signal.o \
|
process.o ptrace.o setup.o signal.o \
|
||||||
sys_arm.o stacktrace.o time.o traps.o
|
sys_arm.o stacktrace.o time.o traps.o
|
||||||
|
|
||||||
obj-$(CONFIG_ISA_DMA_API) += dma.o
|
obj-$(CONFIG_ISA_DMA_API) += dma.o
|
||||||
|
|
|
@ -1,221 +0,0 @@
|
||||||
/*
|
|
||||||
* ARM semaphore implementation, taken from
|
|
||||||
*
|
|
||||||
* i386 semaphore implementation.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1999 Linus Torvalds
|
|
||||||
*
|
|
||||||
* Modified for ARM by Russell King
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*/
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to acquire the semaphore, while the "sleeping"
|
|
||||||
* variable is a count of such acquires.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* "sleeping" and the contention routine ordering is
|
|
||||||
* protected by the semaphore spinlock.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Logic:
|
|
||||||
* - only on a boundary condition do we need to care. When we go
|
|
||||||
* from a negative count to a non-negative, we wake people up.
|
|
||||||
* - when we go from a non-negative count to a negative do we
|
|
||||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
|
||||||
* that we're on the wakeup list before we synchronize so that
|
|
||||||
* we cannot lose wakeup events.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(semaphore_lock);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
sem->sleepers ++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With signals pending, this turns into
|
|
||||||
* the trylock failure case - we won't be
|
|
||||||
* sleeping, and we* can't get the lock as
|
|
||||||
* it has contention. Just correct the count
|
|
||||||
* and exit.
|
|
||||||
*/
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
retval = -EINTR;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
atomic_add(sleepers, &sem->count);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock. The
|
|
||||||
* "-1" is because we're still hoping to get
|
|
||||||
* the lock.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Trylock failed - make sure we correct for
|
|
||||||
* having decremented the count.
|
|
||||||
*
|
|
||||||
* We could have done the trylock with a
|
|
||||||
* single "cmpxchg" without failure cases,
|
|
||||||
* but then it wouldn't work on a 386.
|
|
||||||
*/
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int sleepers;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_lock, flags);
|
|
||||||
sleepers = sem->sleepers + 1;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" and us into it. They aren't
|
|
||||||
* playing, because we own the spinlock.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers, &sem->count))
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&semaphore_lock, flags);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The semaphore operations have a special calling sequence that
|
|
||||||
* allow us to do a simpler in-line version of them. These routines
|
|
||||||
* need to convert that sequence back into the C sequence when
|
|
||||||
* there is contention on the semaphore.
|
|
||||||
*
|
|
||||||
* ip contains the semaphore pointer on entry. Save the C-clobbered
|
|
||||||
* registers (r0 to r3 and lr), but not ip, as we use it as a return
|
|
||||||
* value in some cases..
|
|
||||||
* To remain AAPCS compliant (64-bit stack align) we save r4 as well.
|
|
||||||
*/
|
|
||||||
asm(" .section .sched.text,\"ax\",%progbits \n\
|
|
||||||
.align 5 \n\
|
|
||||||
.globl __down_failed \n\
|
|
||||||
__down_failed: \n\
|
|
||||||
stmfd sp!, {r0 - r4, lr} \n\
|
|
||||||
mov r0, ip \n\
|
|
||||||
bl __down \n\
|
|
||||||
ldmfd sp!, {r0 - r4, pc} \n\
|
|
||||||
\n\
|
|
||||||
.align 5 \n\
|
|
||||||
.globl __down_interruptible_failed \n\
|
|
||||||
__down_interruptible_failed: \n\
|
|
||||||
stmfd sp!, {r0 - r4, lr} \n\
|
|
||||||
mov r0, ip \n\
|
|
||||||
bl __down_interruptible \n\
|
|
||||||
mov ip, r0 \n\
|
|
||||||
ldmfd sp!, {r0 - r4, pc} \n\
|
|
||||||
\n\
|
|
||||||
.align 5 \n\
|
|
||||||
.globl __down_trylock_failed \n\
|
|
||||||
__down_trylock_failed: \n\
|
|
||||||
stmfd sp!, {r0 - r4, lr} \n\
|
|
||||||
mov r0, ip \n\
|
|
||||||
bl __down_trylock \n\
|
|
||||||
mov ip, r0 \n\
|
|
||||||
ldmfd sp!, {r0 - r4, pc} \n\
|
|
||||||
\n\
|
|
||||||
.align 5 \n\
|
|
||||||
.globl __up_wakeup \n\
|
|
||||||
__up_wakeup: \n\
|
|
||||||
stmfd sp!, {r0 - r4, lr} \n\
|
|
||||||
mov r0, ip \n\
|
|
||||||
bl __up \n\
|
|
||||||
ldmfd sp!, {r0 - r4, pc} \n\
|
|
||||||
");
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible_failed);
|
|
||||||
EXPORT_SYMBOL(__down_trylock_failed);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
|
@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
|
||||||
|
|
||||||
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
|
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
|
||||||
obj-y += syscall_table.o syscall-stubs.o irq.o
|
obj-y += syscall_table.o syscall-stubs.o irq.o
|
||||||
obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o
|
obj-y += setup.o traps.o ocd.o ptrace.o
|
||||||
obj-y += signal.o sys_avr32.o process.o time.o
|
obj-y += signal.o sys_avr32.o process.o time.o
|
||||||
obj-y += init_task.o switch_to.o cpu.o
|
obj-y += init_task.o switch_to.o cpu.o
|
||||||
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
|
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
|
||||||
|
|
|
@ -1,148 +0,0 @@
|
||||||
/*
|
|
||||||
* AVR32 sempahore implementation.
|
|
||||||
*
|
|
||||||
* Copyright (C) 2004-2006 Atmel Corporation
|
|
||||||
*
|
|
||||||
* Based on linux/arch/i386/kernel/semaphore.c
|
|
||||||
* Copyright (C) 1999 Linus Torvalds
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to acquire the semaphore, while the "sleeping"
|
|
||||||
* variable is a count of such acquires.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* "sleeping" and the contention routine ordering is protected
|
|
||||||
* by the spinlock in the semaphore's waitqueue head.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Logic:
|
|
||||||
* - only on a boundary condition do we need to care. When we go
|
|
||||||
* from a negative count to a non-negative, we wake people up.
|
|
||||||
* - when we go from a non-negative count to a negative do we
|
|
||||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
|
||||||
* that we're on the wakeup list before we synchronize so that
|
|
||||||
* we cannot lose wakeup events.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
|
||||||
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock in
|
|
||||||
* the wait_queue_head.
|
|
||||||
*/
|
|
||||||
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue_locked(&sem->wait, &wait);
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
|
||||||
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With signals pending, this turns into the trylock
|
|
||||||
* failure case - we won't be sleeping, and we can't
|
|
||||||
* get the lock as it has contention. Just correct the
|
|
||||||
* count and exit.
|
|
||||||
*/
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
retval = -EINTR;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
atomic_add(sleepers, &sem->count);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock in
|
|
||||||
* the wait_queue_head.
|
|
||||||
*/
|
|
||||||
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue_locked(&sem->wait, &wait);
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
|
@ -31,10 +31,6 @@ config ZONE_DMA
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config SEMAPHORE_SLEEPERS
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
|
|
||||||
config GENERIC_FIND_NEXT_BIT
|
config GENERIC_FIND_NEXT_BIT
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum);
|
||||||
|
|
||||||
EXPORT_SYMBOL(kernel_thread);
|
EXPORT_SYMBOL(kernel_thread);
|
||||||
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(is_in_rom);
|
EXPORT_SYMBOL(is_in_rom);
|
||||||
EXPORT_SYMBOL(bfin_return_from_exception);
|
EXPORT_SYMBOL(bfin_return_from_exception);
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
|
|
||||||
extra-y := vmlinux.lds
|
extra-y := vmlinux.lds
|
||||||
|
|
||||||
obj-y := process.o traps.o irq.o ptrace.o setup.o \
|
obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
|
||||||
time.o sys_cris.o semaphore.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_MODULES) += crisksyms.o
|
obj-$(CONFIG_MODULES) += crisksyms.o
|
||||||
obj-$(CONFIG_MODULES) += module.o
|
obj-$(CONFIG_MODULES) += module.o
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/tty.h>
|
#include <linux/tty.h>
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
|
@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2);
|
||||||
EXPORT_SYMBOL(__ioremap);
|
EXPORT_SYMBOL(__ioremap);
|
||||||
EXPORT_SYMBOL(iounmap);
|
EXPORT_SYMBOL(iounmap);
|
||||||
|
|
||||||
/* Semaphore functions */
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
|
|
||||||
/* Userspace access functions */
|
/* Userspace access functions */
|
||||||
EXPORT_SYMBOL(__copy_user_zeroing);
|
EXPORT_SYMBOL(__copy_user_zeroing);
|
||||||
EXPORT_SYMBOL(__copy_user);
|
EXPORT_SYMBOL(__copy_user);
|
||||||
|
|
|
@ -1,129 +0,0 @@
|
||||||
/*
|
|
||||||
* Generic semaphore code. Buyer beware. Do your own
|
|
||||||
* specific changes in <asm/semaphore-helper.h>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <asm/semaphore-helper.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to sleep, while the "waking" variable is
|
|
||||||
* incremented when the "up()" code goes to wake up waiting
|
|
||||||
* processes.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
|
||||||
* atomically.
|
|
||||||
*
|
|
||||||
* When __up() is called, the count was negative before
|
|
||||||
* incrementing it, and we need to wake up somebody.
|
|
||||||
*
|
|
||||||
* This routine adds one to the count of processes that need to
|
|
||||||
* wake up and exit. ALL waiting processes actually wake up but
|
|
||||||
* only the one that gets to the "waking" field first will gate
|
|
||||||
* through and acquire the semaphore. The others will go back
|
|
||||||
* to sleep.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_one_more(sem);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from __down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from __down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DOWN_VAR \
|
|
||||||
struct task_struct *tsk = current; \
|
|
||||||
wait_queue_t wait; \
|
|
||||||
init_waitqueue_entry(&wait, tsk);
|
|
||||||
|
|
||||||
#define DOWN_HEAD(task_state) \
|
|
||||||
\
|
|
||||||
\
|
|
||||||
tsk->state = (task_state); \
|
|
||||||
add_wait_queue(&sem->wait, &wait); \
|
|
||||||
\
|
|
||||||
/* \
|
|
||||||
* Ok, we're set up. sem->count is known to be less than zero \
|
|
||||||
* so we must wait. \
|
|
||||||
* \
|
|
||||||
* We can let go the lock for purposes of waiting. \
|
|
||||||
* We re-acquire it after awaking so as to protect \
|
|
||||||
* all semaphore operations. \
|
|
||||||
* \
|
|
||||||
* If "up()" is called before we call waking_non_zero() then \
|
|
||||||
* we will catch it right away. If it is called later then \
|
|
||||||
* we will have to go through a wakeup cycle to catch it. \
|
|
||||||
* \
|
|
||||||
* Multiple waiters contend for the semaphore lock to see \
|
|
||||||
* who gets to gate through and who has to wait some more. \
|
|
||||||
*/ \
|
|
||||||
for (;;) {
|
|
||||||
|
|
||||||
#define DOWN_TAIL(task_state) \
|
|
||||||
tsk->state = (task_state); \
|
|
||||||
} \
|
|
||||||
tsk->state = TASK_RUNNING; \
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DOWN_VAR
|
|
||||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
|
||||||
if (waking_non_zero(sem))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
DOWN_VAR
|
|
||||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
|
||||||
|
|
||||||
ret = waking_non_zero_interruptible(sem, tsk);
|
|
||||||
if (ret)
|
|
||||||
{
|
|
||||||
if (ret == 1)
|
|
||||||
/* ret != 0 only if we get interrupted -arca */
|
|
||||||
ret = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return waking_non_zero_trylock(sem);
|
|
||||||
}
|
|
|
@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
|
obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
|
||||||
kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
|
kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
|
||||||
sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \
|
sys_frv.o time.o setup.o frv_ksyms.o \
|
||||||
debug-stub.o irq.o sleep.o uaccess.o
|
debug-stub.o irq.o sleep.o uaccess.o
|
||||||
|
|
||||||
obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
|
obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
#include <asm/hardirq.h>
|
#include <asm/hardirq.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
|
@ -1,155 +0,0 @@
|
||||||
/* semaphore.c: FR-V semaphores
|
|
||||||
*
|
|
||||||
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
|
|
||||||
* Written by David Howells (dhowells@redhat.com)
|
|
||||||
* - Derived from lib/rwsem-spinlock.c
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
struct sem_waiter {
|
|
||||||
struct list_head list;
|
|
||||||
struct task_struct *task;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
void semtrace(struct semaphore *sem, const char *str)
|
|
||||||
{
|
|
||||||
if (sem->debug)
|
|
||||||
printk("[%d] %s({%d,%d})\n",
|
|
||||||
current->pid,
|
|
||||||
str,
|
|
||||||
sem->counter,
|
|
||||||
list_empty(&sem->wait_list) ? 0 : 1);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define semtrace(SEM,STR) do { } while(0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* wait for a token to be granted from a semaphore
|
|
||||||
* - entered with lock held and interrupts disabled
|
|
||||||
*/
|
|
||||||
void __down(struct semaphore *sem, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
struct sem_waiter waiter;
|
|
||||||
|
|
||||||
semtrace(sem, "Entering __down");
|
|
||||||
|
|
||||||
/* set up my own style of waitqueue */
|
|
||||||
waiter.task = tsk;
|
|
||||||
get_task_struct(tsk);
|
|
||||||
|
|
||||||
list_add_tail(&waiter.list, &sem->wait_list);
|
|
||||||
|
|
||||||
/* we don't need to touch the semaphore struct anymore */
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
|
|
||||||
/* wait to be given the semaphore */
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
if (list_empty(&waiter.list))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
semtrace(sem, "Leaving __down");
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* interruptibly wait for a token to be granted from a semaphore
|
|
||||||
* - entered with lock held and interrupts disabled
|
|
||||||
*/
|
|
||||||
int __down_interruptible(struct semaphore *sem, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
struct sem_waiter waiter;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
semtrace(sem,"Entering __down_interruptible");
|
|
||||||
|
|
||||||
/* set up my own style of waitqueue */
|
|
||||||
waiter.task = tsk;
|
|
||||||
get_task_struct(tsk);
|
|
||||||
|
|
||||||
list_add_tail(&waiter.list, &sem->wait_list);
|
|
||||||
|
|
||||||
/* we don't need to touch the semaphore struct anymore */
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
|
|
||||||
/* wait to be given the semaphore */
|
|
||||||
ret = 0;
|
|
||||||
for (;;) {
|
|
||||||
if (list_empty(&waiter.list))
|
|
||||||
break;
|
|
||||||
if (unlikely(signal_pending(current)))
|
|
||||||
goto interrupted;
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
semtrace(sem, "Leaving __down_interruptible");
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
interrupted:
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
|
|
||||||
if (!list_empty(&waiter.list)) {
|
|
||||||
list_del(&waiter.list);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
if (ret == -EINTR)
|
|
||||||
put_task_struct(current);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* release a single token back to a semaphore
|
|
||||||
* - entered with lock held and interrupts disabled
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk;
|
|
||||||
struct sem_waiter *waiter;
|
|
||||||
|
|
||||||
semtrace(sem,"Entering __up");
|
|
||||||
|
|
||||||
/* grant the token to the process at the front of the queue */
|
|
||||||
waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
|
|
||||||
|
|
||||||
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
|
|
||||||
* It is allocated on the waiter's stack and may become invalid at
|
|
||||||
* any time after that point (due to a wakeup from another source).
|
|
||||||
*/
|
|
||||||
list_del_init(&waiter->list);
|
|
||||||
tsk = waiter->task;
|
|
||||||
mb();
|
|
||||||
waiter->task = NULL;
|
|
||||||
wake_up_process(tsk);
|
|
||||||
put_task_struct(tsk);
|
|
||||||
|
|
||||||
semtrace(sem,"Leaving __up");
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
|
@ -5,7 +5,7 @@
|
||||||
extra-y := vmlinux.lds
|
extra-y := vmlinux.lds
|
||||||
|
|
||||||
obj-y := process.o traps.o ptrace.o irq.o \
|
obj-y := process.o traps.o ptrace.o irq.o \
|
||||||
sys_h8300.o time.o semaphore.o signal.o \
|
sys_h8300.o time.o signal.o \
|
||||||
setup.o gpio.o init_task.o syscalls.o \
|
setup.o gpio.o init_task.o syscalls.o \
|
||||||
entry.o
|
entry.o
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
#include <asm/gpio.h>
|
#include <asm/gpio.h>
|
||||||
|
|
|
@ -1,132 +0,0 @@
|
||||||
/*
|
|
||||||
* Generic semaphore code. Buyer beware. Do your own
|
|
||||||
* specific changes in <asm/semaphore-helper.h>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/semaphore-helper.h>
|
|
||||||
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
spinlock_t semaphore_wake_lock;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to sleep, while the "waking" variable is
|
|
||||||
* incremented when the "up()" code goes to wake up waiting
|
|
||||||
* processes.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
|
||||||
* atomically.
|
|
||||||
*
|
|
||||||
* When __up() is called, the count was negative before
|
|
||||||
* incrementing it, and we need to wake up somebody.
|
|
||||||
*
|
|
||||||
* This routine adds one to the count of processes that need to
|
|
||||||
* wake up and exit. ALL waiting processes actually wake up but
|
|
||||||
* only the one that gets to the "waking" field first will gate
|
|
||||||
* through and acquire the semaphore. The others will go back
|
|
||||||
* to sleep.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_one_more(sem);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from __down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from __down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
#define DOWN_HEAD(task_state) \
|
|
||||||
\
|
|
||||||
\
|
|
||||||
current->state = (task_state); \
|
|
||||||
add_wait_queue(&sem->wait, &wait); \
|
|
||||||
\
|
|
||||||
/* \
|
|
||||||
* Ok, we're set up. sem->count is known to be less than zero \
|
|
||||||
* so we must wait. \
|
|
||||||
* \
|
|
||||||
* We can let go the lock for purposes of waiting. \
|
|
||||||
* We re-acquire it after awaking so as to protect \
|
|
||||||
* all semaphore operations. \
|
|
||||||
* \
|
|
||||||
* If "up()" is called before we call waking_non_zero() then \
|
|
||||||
* we will catch it right away. If it is called later then \
|
|
||||||
* we will have to go through a wakeup cycle to catch it. \
|
|
||||||
* \
|
|
||||||
* Multiple waiters contend for the semaphore lock to see \
|
|
||||||
* who gets to gate through and who has to wait some more. \
|
|
||||||
*/ \
|
|
||||||
for (;;) {
|
|
||||||
|
|
||||||
#define DOWN_TAIL(task_state) \
|
|
||||||
current->state = (task_state); \
|
|
||||||
} \
|
|
||||||
current->state = TASK_RUNNING; \
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
|
||||||
|
|
||||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
|
||||||
if (waking_non_zero(sem))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
|
||||||
|
|
||||||
ret = waking_non_zero_interruptible(sem, current);
|
|
||||||
if (ret)
|
|
||||||
{
|
|
||||||
if (ret == 1)
|
|
||||||
/* ret != 0 only if we get interrupted -arca */
|
|
||||||
ret = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return waking_non_zero_trylock(sem);
|
|
||||||
}
|
|
|
@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
|
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
|
||||||
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
|
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
|
||||||
salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
||||||
unwind.o mca.o mca_asm.o topology.o
|
unwind.o mca.o mca_asm.o topology.o
|
||||||
|
|
||||||
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
|
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
|
||||||
|
|
|
@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
|
||||||
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
|
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
|
||||||
EXPORT_SYMBOL(csum_ipv6_magic);
|
EXPORT_SYMBOL(csum_ipv6_magic);
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
EXPORT_SYMBOL(clear_page);
|
EXPORT_SYMBOL(clear_page);
|
||||||
|
|
||||||
|
|
|
@ -1,165 +0,0 @@
|
||||||
/*
|
|
||||||
* IA-64 semaphore implementation (derived from x86 version).
|
|
||||||
*
|
|
||||||
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
|
|
||||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter: The "count"
|
|
||||||
* variable is decremented for each process that tries to acquire the
|
|
||||||
* semaphore, while the "sleepers" variable is a count of such
|
|
||||||
* acquires.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can efficiently
|
|
||||||
* test if they need to do any extra work (up needs to do something
|
|
||||||
* only if count was negative before the increment operation.
|
|
||||||
*
|
|
||||||
* "sleeping" and the contention routine ordering is protected
|
|
||||||
* by the spinlock in the semaphore's waitqueue head.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is contention
|
|
||||||
* on the lock, and as such all this is the "non-critical" part of the
|
|
||||||
* whole semaphore business. The critical part is the inline stuff in
|
|
||||||
* <asm/semaphore.h> where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
#include <asm/errno.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Logic:
|
|
||||||
* - Only on a boundary condition do we need to care. When we go
|
|
||||||
* from a negative count to a non-negative, we wake people up.
|
|
||||||
* - When we go from a non-negative count to a negative do we
|
|
||||||
* (a) synchronize with the "sleepers" count and (b) make sure
|
|
||||||
* that we're on the wakeup list before we synchronize so that
|
|
||||||
* we cannot lose wakeup events.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void
|
|
||||||
__up (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sched __down (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
|
||||||
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock in
|
|
||||||
* the wait_queue_head.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue_locked(&sem->wait, &wait);
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible (struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
|
||||||
|
|
||||||
sem->sleepers ++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With signals pending, this turns into
|
|
||||||
* the trylock failure case - we won't be
|
|
||||||
* sleeping, and we* can't get the lock as
|
|
||||||
* it has contention. Just correct the count
|
|
||||||
* and exit.
|
|
||||||
*/
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
retval = -EINTR;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
atomic_add(sleepers, &sem->count);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock in
|
|
||||||
* wait_queue_head. The "-1" is because we're
|
|
||||||
* still hoping to get the semaphore.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue_locked(&sem->wait, &wait);
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Trylock failed - make sure we correct for having decremented the
|
|
||||||
* count.
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
__down_trylock (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int sleepers;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
sleepers = sem->sleepers + 1;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" and us into it. They aren't
|
|
||||||
* playing, because we own the spinlock in the
|
|
||||||
* wait_queue_head.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers, &sem->count)) {
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
return 1;
|
|
||||||
}
|
|
|
@ -5,7 +5,7 @@
|
||||||
extra-y := head.o init_task.o vmlinux.lds
|
extra-y := head.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
|
obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
|
||||||
m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o
|
m32r_ksyms.o sys_m32r.o signal.o ptrace.o
|
||||||
|
|
||||||
obj-$(CONFIG_SMP) += smp.o smpboot.o
|
obj-$(CONFIG_SMP) += smp.o smpboot.o
|
||||||
obj-$(CONFIG_MODULES) += module.o
|
obj-$(CONFIG_MODULES) += module.o
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
|
@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu);
|
||||||
EXPORT_SYMBOL(__ioremap);
|
EXPORT_SYMBOL(__ioremap);
|
||||||
EXPORT_SYMBOL(iounmap);
|
EXPORT_SYMBOL(iounmap);
|
||||||
EXPORT_SYMBOL(kernel_thread);
|
EXPORT_SYMBOL(kernel_thread);
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
|
|
||||||
/* Networking helper routines. */
|
/* Networking helper routines. */
|
||||||
/* Delay loops */
|
/* Delay loops */
|
||||||
|
|
|
@ -1,185 +0,0 @@
|
||||||
/*
|
|
||||||
* linux/arch/m32r/semaphore.c
|
|
||||||
* orig : i386 2.6.4
|
|
||||||
*
|
|
||||||
* M32R semaphore implementation.
|
|
||||||
*
|
|
||||||
* Copyright (c) 2002 - 2004 Hitoshi Yamamoto
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* i386 semaphore implementation.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1999 Linus Torvalds
|
|
||||||
*
|
|
||||||
* Portions Copyright 1999 Red Hat, Inc.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
|
|
||||||
*/
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/err.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to acquire the semaphore, while the "sleeping"
|
|
||||||
* variable is a count of such acquires.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* "sleeping" and the contention routine ordering is protected
|
|
||||||
* by the spinlock in the semaphore's waitqueue head.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Logic:
|
|
||||||
* - only on a boundary condition do we need to care. When we go
|
|
||||||
* from a negative count to a non-negative, we wake people up.
|
|
||||||
* - when we go from a non-negative count to a negative do we
|
|
||||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
|
||||||
* that we're on the wakeup list before we synchronize so that
|
|
||||||
* we cannot lose wakeup events.
|
|
||||||
*/
|
|
||||||
|
|
||||||
asmlinkage void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
|
||||||
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock in
|
|
||||||
* the wait_queue_head.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue_locked(&sem->wait, &wait);
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
|
||||||
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With signals pending, this turns into
|
|
||||||
* the trylock failure case - we won't be
|
|
||||||
* sleeping, and we* can't get the lock as
|
|
||||||
* it has contention. Just correct the count
|
|
||||||
* and exit.
|
|
||||||
*/
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
retval = -EINTR;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
atomic_add(sleepers, &sem->count);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock in
|
|
||||||
* wait_queue_head. The "-1" is because we're
|
|
||||||
* still hoping to get the semaphore.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue_locked(&sem->wait, &wait);
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Trylock failed - make sure we correct for
|
|
||||||
* having decremented the count.
|
|
||||||
*
|
|
||||||
* We could have done the trylock with a
|
|
||||||
* single "cmpxchg" without failure cases,
|
|
||||||
* but then it wouldn't work on a 386.
|
|
||||||
*/
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int sleepers;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
|
||||||
sleepers = sem->sleepers + 1;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" and us into it. They aren't
|
|
||||||
* playing, because we own the spinlock in the
|
|
||||||
* wait_queue_head.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers, &sem->count)) {
|
|
||||||
wake_up_locked(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
|
||||||
return 1;
|
|
||||||
}
|
|
|
@ -10,7 +10,7 @@ endif
|
||||||
extra-y += vmlinux.lds
|
extra-y += vmlinux.lds
|
||||||
|
|
||||||
obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
|
obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
|
||||||
sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o
|
sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
|
||||||
|
|
||||||
devres-y = ../../../kernel/irq/devres.o
|
devres-y = ../../../kernel/irq/devres.o
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
asmlinkage long long __ashldi3 (long long, int);
|
asmlinkage long long __ashldi3 (long long, int);
|
||||||
asmlinkage long long __ashrdi3 (long long, int);
|
asmlinkage long long __ashrdi3 (long long, int);
|
||||||
|
@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3);
|
||||||
EXPORT_SYMBOL(__lshrdi3);
|
EXPORT_SYMBOL(__lshrdi3);
|
||||||
EXPORT_SYMBOL(__muldi3);
|
EXPORT_SYMBOL(__muldi3);
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_failed_trylock);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
|
|
||||||
|
|
|
@ -1,132 +0,0 @@
|
||||||
/*
|
|
||||||
* Generic semaphore code. Buyer beware. Do your own
|
|
||||||
* specific changes in <asm/semaphore-helper.h>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/semaphore-helper.h>
|
|
||||||
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
spinlock_t semaphore_wake_lock;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to sleep, while the "waking" variable is
|
|
||||||
* incremented when the "up()" code goes to wake up waiting
|
|
||||||
* processes.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
|
||||||
* atomically.
|
|
||||||
*
|
|
||||||
* When __up() is called, the count was negative before
|
|
||||||
* incrementing it, and we need to wake up somebody.
|
|
||||||
*
|
|
||||||
* This routine adds one to the count of processes that need to
|
|
||||||
* wake up and exit. ALL waiting processes actually wake up but
|
|
||||||
* only the one that gets to the "waking" field first will gate
|
|
||||||
* through and acquire the semaphore. The others will go back
|
|
||||||
* to sleep.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_one_more(sem);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from __down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from __down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
#define DOWN_HEAD(task_state) \
|
|
||||||
\
|
|
||||||
\
|
|
||||||
current->state = (task_state); \
|
|
||||||
add_wait_queue(&sem->wait, &wait); \
|
|
||||||
\
|
|
||||||
/* \
|
|
||||||
* Ok, we're set up. sem->count is known to be less than zero \
|
|
||||||
* so we must wait. \
|
|
||||||
* \
|
|
||||||
* We can let go the lock for purposes of waiting. \
|
|
||||||
* We re-acquire it after awaking so as to protect \
|
|
||||||
* all semaphore operations. \
|
|
||||||
* \
|
|
||||||
* If "up()" is called before we call waking_non_zero() then \
|
|
||||||
* we will catch it right away. If it is called later then \
|
|
||||||
* we will have to go through a wakeup cycle to catch it. \
|
|
||||||
* \
|
|
||||||
* Multiple waiters contend for the semaphore lock to see \
|
|
||||||
* who gets to gate through and who has to wait some more. \
|
|
||||||
*/ \
|
|
||||||
for (;;) {
|
|
||||||
|
|
||||||
#define DOWN_TAIL(task_state) \
|
|
||||||
current->state = (task_state); \
|
|
||||||
} \
|
|
||||||
current->state = TASK_RUNNING; \
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
|
||||||
|
|
||||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
|
||||||
if (waking_non_zero(sem))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
|
||||||
|
|
||||||
ret = waking_non_zero_interruptible(sem, current);
|
|
||||||
if (ret)
|
|
||||||
{
|
|
||||||
if (ret == 1)
|
|
||||||
/* ret != 0 only if we get interrupted -arca */
|
|
||||||
ret = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return waking_non_zero_trylock(sem);
|
|
||||||
}
|
|
|
@ -5,4 +5,4 @@
|
||||||
EXTRA_AFLAGS := -traditional
|
EXTRA_AFLAGS := -traditional
|
||||||
|
|
||||||
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
|
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
|
||||||
checksum.o string.o semaphore.o uaccess.o
|
checksum.o string.o uaccess.o
|
||||||
|
|
|
@ -1,53 +0,0 @@
|
||||||
/*
|
|
||||||
* linux/arch/m68k/lib/semaphore.S
|
|
||||||
*
|
|
||||||
* Copyright (C) 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The semaphore operations have a special calling sequence that
|
|
||||||
* allow us to do a simpler in-line version of them. These routines
|
|
||||||
* need to convert that sequence back into the C sequence when
|
|
||||||
* there is contention on the semaphore.
|
|
||||||
*/
|
|
||||||
ENTRY(__down_failed)
|
|
||||||
moveml %a0/%d0/%d1,-(%sp)
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __down
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
moveml (%sp)+,%a0/%d0/%d1
|
|
||||||
rts
|
|
||||||
|
|
||||||
ENTRY(__down_failed_interruptible)
|
|
||||||
movel %a0,-(%sp)
|
|
||||||
movel %d1,-(%sp)
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __down_interruptible
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
movel (%sp)+,%d1
|
|
||||||
movel (%sp)+,%a0
|
|
||||||
rts
|
|
||||||
|
|
||||||
ENTRY(__down_failed_trylock)
|
|
||||||
movel %a0,-(%sp)
|
|
||||||
movel %d1,-(%sp)
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __down_trylock
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
movel (%sp)+,%d1
|
|
||||||
movel (%sp)+,%a0
|
|
||||||
rts
|
|
||||||
|
|
||||||
ENTRY(__up_wakeup)
|
|
||||||
moveml %a0/%d0/%d1,-(%sp)
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __up
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
moveml (%sp)+,%a0/%d0/%d1
|
|
||||||
rts
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
extra-y := vmlinux.lds
|
extra-y := vmlinux.lds
|
||||||
|
|
||||||
obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
|
obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
|
||||||
semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
|
setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
|
||||||
|
|
||||||
obj-$(CONFIG_MODULES) += module.o
|
obj-$(CONFIG_MODULES) += module.o
|
||||||
obj-$(CONFIG_COMEMPCI) += comempci.o
|
obj-$(CONFIG_COMEMPCI) += comempci.o
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
|
|
||||||
|
@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
|
||||||
EXPORT_SYMBOL(memcpy);
|
EXPORT_SYMBOL(memcpy);
|
||||||
EXPORT_SYMBOL(memset);
|
EXPORT_SYMBOL(memset);
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_failed_trylock);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* libgcc functions - functions that are used internally by the
|
* libgcc functions - functions that are used internally by the
|
||||||
* compiler... (prototypes are not correct though, but that
|
* compiler... (prototypes are not correct though, but that
|
||||||
|
|
|
@ -1,133 +0,0 @@
|
||||||
/*
|
|
||||||
* Generic semaphore code. Buyer beware. Do your own
|
|
||||||
* specific changes in <asm/semaphore-helper.h>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/err.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/semaphore-helper.h>
|
|
||||||
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
spinlock_t semaphore_wake_lock;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to sleep, while the "waking" variable is
|
|
||||||
* incremented when the "up()" code goes to wake up waiting
|
|
||||||
* processes.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
|
||||||
* atomically.
|
|
||||||
*
|
|
||||||
* When __up() is called, the count was negative before
|
|
||||||
* incrementing it, and we need to wake up somebody.
|
|
||||||
*
|
|
||||||
* This routine adds one to the count of processes that need to
|
|
||||||
* wake up and exit. ALL waiting processes actually wake up but
|
|
||||||
* only the one that gets to the "waking" field first will gate
|
|
||||||
* through and acquire the semaphore. The others will go back
|
|
||||||
* to sleep.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_one_more(sem);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from __down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from __down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
#define DOWN_HEAD(task_state) \
|
|
||||||
\
|
|
||||||
\
|
|
||||||
current->state = (task_state); \
|
|
||||||
add_wait_queue(&sem->wait, &wait); \
|
|
||||||
\
|
|
||||||
/* \
|
|
||||||
* Ok, we're set up. sem->count is known to be less than zero \
|
|
||||||
* so we must wait. \
|
|
||||||
* \
|
|
||||||
* We can let go the lock for purposes of waiting. \
|
|
||||||
* We re-acquire it after awaking so as to protect \
|
|
||||||
* all semaphore operations. \
|
|
||||||
* \
|
|
||||||
* If "up()" is called before we call waking_non_zero() then \
|
|
||||||
* we will catch it right away. If it is called later then \
|
|
||||||
* we will have to go through a wakeup cycle to catch it. \
|
|
||||||
* \
|
|
||||||
* Multiple waiters contend for the semaphore lock to see \
|
|
||||||
* who gets to gate through and who has to wait some more. \
|
|
||||||
*/ \
|
|
||||||
for (;;) {
|
|
||||||
|
|
||||||
#define DOWN_TAIL(task_state) \
|
|
||||||
current->state = (task_state); \
|
|
||||||
} \
|
|
||||||
current->state = TASK_RUNNING; \
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
|
||||||
|
|
||||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
|
||||||
if (waking_non_zero(sem))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
|
||||||
|
|
||||||
ret = waking_non_zero_interruptible(sem, current);
|
|
||||||
if (ret)
|
|
||||||
{
|
|
||||||
if (ret == 1)
|
|
||||||
/* ret != 0 only if we get interrupted -arca */
|
|
||||||
ret = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return waking_non_zero_trylock(sem);
|
|
||||||
}
|
|
|
@ -4,4 +4,4 @@
|
||||||
|
|
||||||
lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
|
lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
|
||||||
muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
|
muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
|
||||||
checksum.o semaphore.o memcpy.o memset.o delay.o
|
checksum.o memcpy.o memset.o delay.o
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
/*
|
|
||||||
* linux/arch/m68k/lib/semaphore.S
|
|
||||||
*
|
|
||||||
* Copyright (C) 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*
|
|
||||||
* MAR/1999 -- modified to support ColdFire (gerg@snapgear.com)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* "down_failed" is called with the eventual return address
|
|
||||||
* in %a0, and the address of the semaphore in %a1. We need
|
|
||||||
* to increment the number of waiters on the semaphore,
|
|
||||||
* call "__down()", and then eventually return to try again.
|
|
||||||
*/
|
|
||||||
ENTRY(__down_failed)
|
|
||||||
#ifdef CONFIG_COLDFIRE
|
|
||||||
subl #12,%sp
|
|
||||||
moveml %a0/%d0/%d1,(%sp)
|
|
||||||
#else
|
|
||||||
moveml %a0/%d0/%d1,-(%sp)
|
|
||||||
#endif
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __down
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
movel (%sp)+,%d0
|
|
||||||
movel (%sp)+,%d1
|
|
||||||
rts
|
|
||||||
|
|
||||||
ENTRY(__down_failed_interruptible)
|
|
||||||
movel %a0,-(%sp)
|
|
||||||
movel %d1,-(%sp)
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __down_interruptible
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
movel (%sp)+,%d1
|
|
||||||
rts
|
|
||||||
|
|
||||||
ENTRY(__up_wakeup)
|
|
||||||
#ifdef CONFIG_COLDFIRE
|
|
||||||
subl #12,%sp
|
|
||||||
moveml %a0/%d0/%d1,(%sp)
|
|
||||||
#else
|
|
||||||
moveml %a0/%d0/%d1,-(%sp)
|
|
||||||
#endif
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __up
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
movel (%sp)+,%d0
|
|
||||||
movel (%sp)+,%d1
|
|
||||||
rts
|
|
||||||
|
|
||||||
ENTRY(__down_failed_trylock)
|
|
||||||
movel %a0,-(%sp)
|
|
||||||
movel %d1,-(%sp)
|
|
||||||
movel %a1,-(%sp)
|
|
||||||
jbsr __down_trylock
|
|
||||||
movel (%sp)+,%a1
|
|
||||||
movel (%sp)+,%d1
|
|
||||||
movel (%sp)+,%a0
|
|
||||||
rts
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
extra-y := head.o init_task.o vmlinux.lds
|
extra-y := head.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
|
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
|
||||||
ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
|
ptrace.o reset.o setup.o signal.o syscall.o \
|
||||||
time.o topology.o traps.o unaligned.o
|
time.o topology.o traps.o unaligned.o
|
||||||
|
|
||||||
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
|
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
|
||||||
|
|
|
@ -1,168 +0,0 @@
|
||||||
/*
|
|
||||||
* MIPS-specific semaphore code.
|
|
||||||
*
|
|
||||||
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
||||||
* Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
|
|
||||||
* to eliminate the SMP races in the old version between the updates
|
|
||||||
* of `count' and `waking'. Now we use negative `count' values to
|
|
||||||
* indicate that some process(es) are waiting for the semaphore.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/cpu-features.h>
|
|
||||||
#include <asm/errno.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/war.h>
|
|
||||||
/*
|
|
||||||
* Atomically update sem->count.
|
|
||||||
* This does the equivalent of the following:
|
|
||||||
*
|
|
||||||
* old_count = sem->count;
|
|
||||||
* tmp = MAX(old_count, 0) + incr;
|
|
||||||
* sem->count = tmp;
|
|
||||||
* return old_count;
|
|
||||||
*
|
|
||||||
* On machines without lld/scd we need a spinlock to make the manipulation of
|
|
||||||
* sem->count and sem->waking atomic. Scalability isn't an issue because
|
|
||||||
* this lock is used on UP only so it's just an empty variable.
|
|
||||||
*/
|
|
||||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
|
||||||
{
|
|
||||||
int old_count, tmp;
|
|
||||||
|
|
||||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
|
||||||
__asm__ __volatile__(
|
|
||||||
" .set mips3 \n"
|
|
||||||
"1: ll %0, %2 # __sem_update_count \n"
|
|
||||||
" sra %1, %0, 31 \n"
|
|
||||||
" not %1 \n"
|
|
||||||
" and %1, %0, %1 \n"
|
|
||||||
" addu %1, %1, %3 \n"
|
|
||||||
" sc %1, %2 \n"
|
|
||||||
" beqzl %1, 1b \n"
|
|
||||||
" .set mips0 \n"
|
|
||||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
|
||||||
: "r" (incr), "m" (sem->count));
|
|
||||||
} else if (cpu_has_llsc) {
|
|
||||||
__asm__ __volatile__(
|
|
||||||
" .set mips3 \n"
|
|
||||||
"1: ll %0, %2 # __sem_update_count \n"
|
|
||||||
" sra %1, %0, 31 \n"
|
|
||||||
" not %1 \n"
|
|
||||||
" and %1, %0, %1 \n"
|
|
||||||
" addu %1, %1, %3 \n"
|
|
||||||
" sc %1, %2 \n"
|
|
||||||
" beqz %1, 1b \n"
|
|
||||||
" .set mips0 \n"
|
|
||||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
|
||||||
: "r" (incr), "m" (sem->count));
|
|
||||||
} else {
|
|
||||||
static DEFINE_SPINLOCK(semaphore_lock);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_lock, flags);
|
|
||||||
old_count = atomic_read(&sem->count);
|
|
||||||
tmp = max_t(int, old_count, 0) + incr;
|
|
||||||
atomic_set(&sem->count, tmp);
|
|
||||||
spin_unlock_irqrestore(&semaphore_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return old_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Note that we incremented count in up() before we came here,
|
|
||||||
* but that was ineffective since the result was <= 0, and
|
|
||||||
* any negative value of count is equivalent to 0.
|
|
||||||
* This ends up setting count to 1, unless count is now > 0
|
|
||||||
* (i.e. because some other cpu has called up() in the meantime),
|
|
||||||
* in which case we just increment count.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 1);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that when we come in to __down or __down_interruptible,
|
|
||||||
* we have already decremented count, but that decrement was
|
|
||||||
* ineffective since the result was < 0, and any negative value
|
|
||||||
* of count is equivalent to 0.
|
|
||||||
* Thus it is only when we decrement count from some value > 0
|
|
||||||
* that we have actually got the semaphore.
|
|
||||||
*/
|
|
||||||
void __sched __down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to get the semaphore. If the count is > 0, then we've
|
|
||||||
* got the semaphore; we decrement count and exit the loop.
|
|
||||||
* If the count is 0 or negative, we set it to -1, indicating
|
|
||||||
* that we are asleep, and then sleep.
|
|
||||||
*/
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
__set_task_state(tsk, TASK_RUNNING);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there are any more sleepers, wake one of them up so
|
|
||||||
* that it can either get the semaphore, or set count to -1
|
|
||||||
* indicating that there are still processes sleeping.
|
|
||||||
*/
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
__set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
/*
|
|
||||||
* A signal is pending - give up trying.
|
|
||||||
* Set sem->count to 0 if it is negative,
|
|
||||||
* since we are no longer sleeping.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 0);
|
|
||||||
retval = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
__set_task_state(tsk, TASK_RUNNING);
|
|
||||||
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
|
@ -3,7 +3,7 @@
|
||||||
#
|
#
|
||||||
extra-y := head.o init_task.o vmlinux.lds
|
extra-y := head.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \
|
obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
|
||||||
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
|
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
|
||||||
switch_to.o mn10300_ksyms.o kernel_execve.o
|
switch_to.o mn10300_ksyms.o kernel_execve.o
|
||||||
|
|
||||||
|
|
|
@ -1,149 +0,0 @@
|
||||||
/* MN10300 Semaphore implementation
|
|
||||||
*
|
|
||||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
||||||
* Written by David Howells (dhowells@redhat.com)
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public Licence
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the Licence, or (at your option) any later version.
|
|
||||||
*/
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
struct sem_waiter {
|
|
||||||
struct list_head list;
|
|
||||||
struct task_struct *task;
|
|
||||||
};
|
|
||||||
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
void semtrace(struct semaphore *sem, const char *str)
|
|
||||||
{
|
|
||||||
if (sem->debug)
|
|
||||||
printk(KERN_DEBUG "[%d] %s({%d,%d})\n",
|
|
||||||
current->pid,
|
|
||||||
str,
|
|
||||||
atomic_read(&sem->count),
|
|
||||||
list_empty(&sem->wait_list) ? 0 : 1);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define semtrace(SEM, STR) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* wait for a token to be granted from a semaphore
|
|
||||||
* - entered with lock held and interrupts disabled
|
|
||||||
*/
|
|
||||||
void __down(struct semaphore *sem, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
struct sem_waiter waiter;
|
|
||||||
|
|
||||||
semtrace(sem, "Entering __down");
|
|
||||||
|
|
||||||
/* set up my own style of waitqueue */
|
|
||||||
waiter.task = tsk;
|
|
||||||
get_task_struct(tsk);
|
|
||||||
|
|
||||||
list_add_tail(&waiter.list, &sem->wait_list);
|
|
||||||
|
|
||||||
/* we don't need to touch the semaphore struct anymore */
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
|
|
||||||
/* wait to be given the semaphore */
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
if (!waiter.task)
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
semtrace(sem, "Leaving __down");
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* interruptibly wait for a token to be granted from a semaphore
|
|
||||||
* - entered with lock held and interrupts disabled
|
|
||||||
*/
|
|
||||||
int __down_interruptible(struct semaphore *sem, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
struct sem_waiter waiter;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
semtrace(sem, "Entering __down_interruptible");
|
|
||||||
|
|
||||||
/* set up my own style of waitqueue */
|
|
||||||
waiter.task = tsk;
|
|
||||||
get_task_struct(tsk);
|
|
||||||
|
|
||||||
list_add_tail(&waiter.list, &sem->wait_list);
|
|
||||||
|
|
||||||
/* we don't need to touch the semaphore struct anymore */
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
|
|
||||||
/* wait to be given the semaphore */
|
|
||||||
ret = 0;
|
|
||||||
for (;;) {
|
|
||||||
if (!waiter.task)
|
|
||||||
break;
|
|
||||||
if (unlikely(signal_pending(current)))
|
|
||||||
goto interrupted;
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
semtrace(sem, "Leaving __down_interruptible");
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
interrupted:
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
list_del(&waiter.list);
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
if (!waiter.task) {
|
|
||||||
put_task_struct(current);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* release a single token back to a semaphore
|
|
||||||
* - entered with lock held and interrupts disabled
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk;
|
|
||||||
struct sem_waiter *waiter;
|
|
||||||
|
|
||||||
semtrace(sem, "Entering __up");
|
|
||||||
|
|
||||||
/* grant the token to the process at the front of the queue */
|
|
||||||
waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
|
|
||||||
|
|
||||||
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
|
|
||||||
* It is an allocated on the waiter's stack and may become invalid at
|
|
||||||
* any time after that point (due to a wakeup from another source).
|
|
||||||
*/
|
|
||||||
list_del_init(&waiter->list);
|
|
||||||
tsk = waiter->task;
|
|
||||||
smp_mb();
|
|
||||||
waiter->task = NULL;
|
|
||||||
wake_up_process(tsk);
|
|
||||||
put_task_struct(tsk);
|
|
||||||
|
|
||||||
semtrace(sem, "Leaving __up");
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
|
@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional
|
||||||
|
|
||||||
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
|
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
|
||||||
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
|
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
|
||||||
ptrace.o hardware.o inventory.o drivers.o semaphore.o \
|
ptrace.o hardware.o inventory.o drivers.o \
|
||||||
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
|
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
|
||||||
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
|
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
|
||||||
topology.o
|
topology.o
|
||||||
|
|
|
@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio);
|
||||||
EXPORT_SYMBOL(memcpy_fromio);
|
EXPORT_SYMBOL(memcpy_fromio);
|
||||||
EXPORT_SYMBOL(memset_io);
|
EXPORT_SYMBOL(memset_io);
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
|
|
||||||
extern void $$divI(void);
|
extern void $$divI(void);
|
||||||
extern void $$divU(void);
|
extern void $$divU(void);
|
||||||
extern void $$remI(void);
|
extern void $$remI(void);
|
||||||
|
|
|
@ -1,102 +0,0 @@
|
||||||
/*
|
|
||||||
* Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are complex as we wish to avoid using two variables.
|
|
||||||
* `count' has multiple roles, depending on its value. If it is positive
|
|
||||||
* or zero, there are no waiters. The functions here will never be
|
|
||||||
* called; see <asm/semaphore.h>
|
|
||||||
*
|
|
||||||
* When count is -1 it indicates there is at least one task waiting
|
|
||||||
* for the semaphore.
|
|
||||||
*
|
|
||||||
* When count is less than that, there are '- count - 1' wakeups
|
|
||||||
* pending. ie if it has value -3, there are 2 wakeups pending.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is contention
|
|
||||||
* on the lock, and as such all this is the "non-critical" part of the
|
|
||||||
* whole semaphore business. The critical part is the inline stuff in
|
|
||||||
* <asm/semaphore.h> where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sem->count--;
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define wakers(count) (-1 - count)
|
|
||||||
|
|
||||||
#define DOWN_HEAD \
|
|
||||||
int ret = 0; \
|
|
||||||
DECLARE_WAITQUEUE(wait, current); \
|
|
||||||
\
|
|
||||||
/* Note that someone is waiting */ \
|
|
||||||
if (sem->count == 0) \
|
|
||||||
sem->count = -1; \
|
|
||||||
\
|
|
||||||
/* protected by the sentry still -- use unlocked version */ \
|
|
||||||
wait.flags = WQ_FLAG_EXCLUSIVE; \
|
|
||||||
__add_wait_queue_tail(&sem->wait, &wait); \
|
|
||||||
lost_race: \
|
|
||||||
spin_unlock_irq(&sem->sentry); \
|
|
||||||
|
|
||||||
#define DOWN_TAIL \
|
|
||||||
spin_lock_irq(&sem->sentry); \
|
|
||||||
if (wakers(sem->count) == 0 && ret == 0) \
|
|
||||||
goto lost_race; /* Someone stole our wakeup */ \
|
|
||||||
__remove_wait_queue(&sem->wait, &wait); \
|
|
||||||
current->state = TASK_RUNNING; \
|
|
||||||
if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
|
|
||||||
sem->count = wakers(sem->count);
|
|
||||||
|
|
||||||
#define UPDATE_COUNT \
|
|
||||||
sem->count += (sem->count < 0) ? 1 : - 1;
|
|
||||||
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DOWN_HEAD
|
|
||||||
|
|
||||||
for(;;) {
|
|
||||||
set_task_state(current, TASK_UNINTERRUPTIBLE);
|
|
||||||
/* we can _read_ this without the sentry */
|
|
||||||
if (sem->count != -1)
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
}
|
|
||||||
|
|
||||||
DOWN_TAIL
|
|
||||||
UPDATE_COUNT
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DOWN_HEAD
|
|
||||||
|
|
||||||
for(;;) {
|
|
||||||
set_task_state(current, TASK_INTERRUPTIBLE);
|
|
||||||
/* we can _read_ this without the sentry */
|
|
||||||
if (sem->count != -1)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
ret = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
}
|
|
||||||
|
|
||||||
DOWN_TAIL
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
UPDATE_COUNT
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
|
@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC
|
||||||
CFLAGS_btext.o += -fPIC
|
CFLAGS_btext.o += -fPIC
|
||||||
endif
|
endif
|
||||||
|
|
||||||
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
|
obj-y := cputable.o ptrace.o syscalls.o \
|
||||||
irq.o align.o signal_32.o pmc.o vdso.o \
|
irq.o align.o signal_32.o pmc.o vdso.o \
|
||||||
init_task.o process.o systbl.o idle.o \
|
init_task.o process.o systbl.o idle.o \
|
||||||
signal.o
|
signal.o
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
|
@ -1,135 +0,0 @@
|
||||||
/*
|
|
||||||
* PowerPC-specific semaphore code.
|
|
||||||
*
|
|
||||||
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
|
|
||||||
* to eliminate the SMP races in the old version between the updates
|
|
||||||
* of `count' and `waking'. Now we use negative `count' values to
|
|
||||||
* indicate that some process(es) are waiting for the semaphore.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/errno.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically update sem->count.
|
|
||||||
* This does the equivalent of the following:
|
|
||||||
*
|
|
||||||
* old_count = sem->count;
|
|
||||||
* tmp = MAX(old_count, 0) + incr;
|
|
||||||
* sem->count = tmp;
|
|
||||||
* return old_count;
|
|
||||||
*/
|
|
||||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
|
||||||
{
|
|
||||||
int old_count, tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
"1: lwarx %0,0,%3\n"
|
|
||||||
" srawi %1,%0,31\n"
|
|
||||||
" andc %1,%0,%1\n"
|
|
||||||
" add %1,%1,%4\n"
|
|
||||||
PPC405_ERR77(0,%3)
|
|
||||||
" stwcx. %1,0,%3\n"
|
|
||||||
" bne 1b"
|
|
||||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
|
||||||
: "r" (&sem->count), "r" (incr), "m" (sem->count)
|
|
||||||
: "cc");
|
|
||||||
|
|
||||||
return old_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Note that we incremented count in up() before we came here,
|
|
||||||
* but that was ineffective since the result was <= 0, and
|
|
||||||
* any negative value of count is equivalent to 0.
|
|
||||||
* This ends up setting count to 1, unless count is now > 0
|
|
||||||
* (i.e. because some other cpu has called up() in the meantime),
|
|
||||||
* in which case we just increment count.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 1);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that when we come in to __down or __down_interruptible,
|
|
||||||
* we have already decremented count, but that decrement was
|
|
||||||
* ineffective since the result was < 0, and any negative value
|
|
||||||
* of count is equivalent to 0.
|
|
||||||
* Thus it is only when we decrement count from some value > 0
|
|
||||||
* that we have actually got the semaphore.
|
|
||||||
*/
|
|
||||||
void __sched __down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to get the semaphore. If the count is > 0, then we've
|
|
||||||
* got the semaphore; we decrement count and exit the loop.
|
|
||||||
* If the count is 0 or negative, we set it to -1, indicating
|
|
||||||
* that we are asleep, and then sleep.
|
|
||||||
*/
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
__set_task_state(tsk, TASK_RUNNING);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there are any more sleepers, wake one of them up so
|
|
||||||
* that it can either get the semaphore, or set count to -1
|
|
||||||
* indicating that there are still processes sleeping.
|
|
||||||
*/
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
__set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
/*
|
|
||||||
* A signal is pending - give up trying.
|
|
||||||
* Set sem->count to 0 if it is negative,
|
|
||||||
* since we are no longer sleeping.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 0);
|
|
||||||
retval = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
__set_task_state(tsk, TASK_RUNNING);
|
|
||||||
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
|
@ -1,131 +0,0 @@
|
||||||
/*
|
|
||||||
* PowerPC-specific semaphore code.
|
|
||||||
*
|
|
||||||
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
|
|
||||||
* to eliminate the SMP races in the old version between the updates
|
|
||||||
* of `count' and `waking'. Now we use negative `count' values to
|
|
||||||
* indicate that some process(es) are waiting for the semaphore.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/errno.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically update sem->count.
|
|
||||||
* This does the equivalent of the following:
|
|
||||||
*
|
|
||||||
* old_count = sem->count;
|
|
||||||
* tmp = MAX(old_count, 0) + incr;
|
|
||||||
* sem->count = tmp;
|
|
||||||
* return old_count;
|
|
||||||
*/
|
|
||||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
|
||||||
{
|
|
||||||
int old_count, tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
"1: lwarx %0,0,%3\n"
|
|
||||||
" srawi %1,%0,31\n"
|
|
||||||
" andc %1,%0,%1\n"
|
|
||||||
" add %1,%1,%4\n"
|
|
||||||
PPC405_ERR77(0,%3)
|
|
||||||
" stwcx. %1,0,%3\n"
|
|
||||||
" bne 1b"
|
|
||||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
|
||||||
: "r" (&sem->count), "r" (incr), "m" (sem->count)
|
|
||||||
: "cc");
|
|
||||||
|
|
||||||
return old_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Note that we incremented count in up() before we came here,
|
|
||||||
* but that was ineffective since the result was <= 0, and
|
|
||||||
* any negative value of count is equivalent to 0.
|
|
||||||
* This ends up setting count to 1, unless count is now > 0
|
|
||||||
* (i.e. because some other cpu has called up() in the meantime),
|
|
||||||
* in which case we just increment count.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 1);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that when we come in to __down or __down_interruptible,
|
|
||||||
* we have already decremented count, but that decrement was
|
|
||||||
* ineffective since the result was < 0, and any negative value
|
|
||||||
* of count is equivalent to 0.
|
|
||||||
* Thus it is only when we decrement count from some value > 0
|
|
||||||
* that we have actually got the semaphore.
|
|
||||||
*/
|
|
||||||
void __sched __down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to get the semaphore. If the count is > 0, then we've
|
|
||||||
* got the semaphore; we decrement count and exit the loop.
|
|
||||||
* If the count is 0 or negative, we set it to -1, indicating
|
|
||||||
* that we are asleep, and then sleep.
|
|
||||||
*/
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there are any more sleepers, wake one of them up so
|
|
||||||
* that it can either get the semaphore, or set count to -1
|
|
||||||
* indicating that there are still processes sleeping.
|
|
||||||
*/
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
/*
|
|
||||||
* A signal is pending - give up trying.
|
|
||||||
* Set sem->count to 0 if it is negative,
|
|
||||||
* since we are no longer sleeping.
|
|
||||||
*/
|
|
||||||
__sem_update_count(sem, 0);
|
|
||||||
retval = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
|
@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull
|
||||||
|
|
||||||
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
|
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
|
||||||
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
|
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
|
||||||
semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o
|
s390_ext.o debug.o irq.o ipl.o dis.o diag.o
|
||||||
|
|
||||||
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
|
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
|
||||||
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
|
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
|
||||||
|
|
|
@ -26,13 +26,6 @@ EXPORT_SYMBOL(_ni_bitmap);
|
||||||
EXPORT_SYMBOL(_zb_findmap);
|
EXPORT_SYMBOL(_zb_findmap);
|
||||||
EXPORT_SYMBOL(_sb_findmap);
|
EXPORT_SYMBOL(_sb_findmap);
|
||||||
|
|
||||||
/*
|
|
||||||
* semaphore ops
|
|
||||||
*/
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* binfmt_elf loader
|
* binfmt_elf loader
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,108 +0,0 @@
|
||||||
/*
|
|
||||||
* linux/arch/s390/kernel/semaphore.c
|
|
||||||
*
|
|
||||||
* S390 version
|
|
||||||
* Copyright (C) 1998-2000 IBM Corporation
|
|
||||||
* Author(s): Martin Schwidefsky
|
|
||||||
*
|
|
||||||
* Derived from "linux/arch/i386/kernel/semaphore.c
|
|
||||||
* Copyright (C) 1999, Linus Torvalds
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically update sem->count. Equivalent to:
|
|
||||||
* old_val = sem->count.counter;
|
|
||||||
* new_val = ((old_val >= 0) ? old_val : 0) + incr;
|
|
||||||
* sem->count.counter = new_val;
|
|
||||||
* return old_val;
|
|
||||||
*/
|
|
||||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
|
||||||
{
|
|
||||||
int old_val, new_val;
|
|
||||||
|
|
||||||
asm volatile(
|
|
||||||
" l %0,0(%3)\n"
|
|
||||||
"0: ltr %1,%0\n"
|
|
||||||
" jhe 1f\n"
|
|
||||||
" lhi %1,0\n"
|
|
||||||
"1: ar %1,%4\n"
|
|
||||||
" cs %0,%1,0(%3)\n"
|
|
||||||
" jl 0b\n"
|
|
||||||
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
|
|
||||||
: "a" (&sem->count), "d" (incr), "m" (sem->count)
|
|
||||||
: "cc");
|
|
||||||
return old_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The inline function up() incremented count but the result
|
|
||||||
* was <= 0. This indicates that some process is waiting on
|
|
||||||
* the semaphore. The semaphore is free and we'll wake the
|
|
||||||
* first sleeping process, so we set count to 1 unless some
|
|
||||||
* other cpu has called up in the meantime in which case
|
|
||||||
* we just increment count by 1.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
__sem_update_count(sem, 1);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The inline function down() decremented count and the result
|
|
||||||
* was < 0. The wait loop will atomically test and update the
|
|
||||||
* semaphore counter following the rules:
|
|
||||||
* count > 0: decrement count, wake up queue and exit.
|
|
||||||
* count <= 0: set count to -1, go to sleep.
|
|
||||||
*/
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
__set_task_state(tsk, TASK_RUNNING);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Same as __down() with an additional test for signals.
|
|
||||||
* If a signal is pending the count is updated as follows:
|
|
||||||
* count > 0: wake up queue and exit.
|
|
||||||
* count <= 0: set count to 0, wake up queue and exit.
|
|
||||||
*/
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
__set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
__sem_update_count(sem, 0);
|
|
||||||
retval = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
__set_task_state(tsk, TASK_RUNNING);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
extra-y := head_32.o init_task.o vmlinux.lds
|
extra-y := head_32.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
|
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
|
||||||
ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
|
ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
|
||||||
syscalls_32.o time_32.o topology.o traps.o traps_32.o
|
syscalls_32.o time_32.o topology.o traps.o traps_32.o
|
||||||
|
|
||||||
obj-y += cpu/ timers/
|
obj-y += cpu/ timers/
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
extra-y := head_64.o init_task.o vmlinux.lds
|
extra-y := head_64.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
|
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
|
||||||
ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
|
ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
|
||||||
syscalls_64.o time_64.o topology.o traps.o traps_64.o
|
syscalls_64.o time_64.o topology.o traps.o traps_64.o
|
||||||
|
|
||||||
obj-y += cpu/ timers/
|
obj-y += cpu/ timers/
|
||||||
|
|
|
@ -1,139 +0,0 @@
|
||||||
/*
|
|
||||||
* Just taken from alpha implementation.
|
|
||||||
* This can't work well, perhaps.
|
|
||||||
*/
|
|
||||||
/*
|
|
||||||
* Generic semaphore code. Buyer beware. Do your own
|
|
||||||
* specific changes in <asm/semaphore-helper.h>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/semaphore-helper.h>
|
|
||||||
|
|
||||||
DEFINE_SPINLOCK(semaphore_wake_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to sleep, while the "waking" variable is
|
|
||||||
* incremented when the "up()" code goes to wake up waiting
|
|
||||||
* processes.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
|
||||||
* atomically.
|
|
||||||
*
|
|
||||||
* When __up() is called, the count was negative before
|
|
||||||
* incrementing it, and we need to wake up somebody.
|
|
||||||
*
|
|
||||||
* This routine adds one to the count of processes that need to
|
|
||||||
* wake up and exit. ALL waiting processes actually wake up but
|
|
||||||
* only the one that gets to the "waking" field first will gate
|
|
||||||
* through and acquire the semaphore. The others will go back
|
|
||||||
* to sleep.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_one_more(sem);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from __down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from __down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DOWN_VAR \
|
|
||||||
struct task_struct *tsk = current; \
|
|
||||||
wait_queue_t wait; \
|
|
||||||
init_waitqueue_entry(&wait, tsk);
|
|
||||||
|
|
||||||
#define DOWN_HEAD(task_state) \
|
|
||||||
\
|
|
||||||
\
|
|
||||||
tsk->state = (task_state); \
|
|
||||||
add_wait_queue(&sem->wait, &wait); \
|
|
||||||
\
|
|
||||||
/* \
|
|
||||||
* Ok, we're set up. sem->count is known to be less than zero \
|
|
||||||
* so we must wait. \
|
|
||||||
* \
|
|
||||||
* We can let go the lock for purposes of waiting. \
|
|
||||||
* We re-acquire it after awaking so as to protect \
|
|
||||||
* all semaphore operations. \
|
|
||||||
* \
|
|
||||||
* If "up()" is called before we call waking_non_zero() then \
|
|
||||||
* we will catch it right away. If it is called later then \
|
|
||||||
* we will have to go through a wakeup cycle to catch it. \
|
|
||||||
* \
|
|
||||||
* Multiple waiters contend for the semaphore lock to see \
|
|
||||||
* who gets to gate through and who has to wait some more. \
|
|
||||||
*/ \
|
|
||||||
for (;;) {
|
|
||||||
|
|
||||||
#define DOWN_TAIL(task_state) \
|
|
||||||
tsk->state = (task_state); \
|
|
||||||
} \
|
|
||||||
tsk->state = TASK_RUNNING; \
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DOWN_VAR
|
|
||||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
|
||||||
if (waking_non_zero(sem))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
DOWN_VAR
|
|
||||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
|
||||||
|
|
||||||
ret = waking_non_zero_interruptible(sem, tsk);
|
|
||||||
if (ret)
|
|
||||||
{
|
|
||||||
if (ret == 1)
|
|
||||||
/* ret != 0 only if we get interrupted -arca */
|
|
||||||
ret = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return waking_non_zero_trylock(sem);
|
|
||||||
}
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
|
@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user);
|
||||||
EXPORT_SYMBOL(get_vm_area);
|
EXPORT_SYMBOL(get_vm_area);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* semaphore exports */
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__udelay);
|
EXPORT_SYMBOL(__udelay);
|
||||||
EXPORT_SYMBOL(__ndelay);
|
EXPORT_SYMBOL(__ndelay);
|
||||||
EXPORT_SYMBOL(__const_udelay);
|
EXPORT_SYMBOL(__const_udelay);
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
#include <linux/in6.h>
|
#include <linux/in6.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/screen_info.h>
|
#include <linux/screen_info.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
|
@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
|
||||||
EXPORT_SYMBOL(screen_info);
|
EXPORT_SYMBOL(screen_info);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__put_user_asm_l);
|
EXPORT_SYMBOL(__put_user_asm_l);
|
||||||
EXPORT_SYMBOL(__get_user_asm_l);
|
EXPORT_SYMBOL(__get_user_asm_l);
|
||||||
EXPORT_SYMBOL(copy_page);
|
EXPORT_SYMBOL(copy_page);
|
||||||
|
|
|
@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
|
||||||
sys_sparc.o sunos_asm.o systbls.o \
|
sys_sparc.o sunos_asm.o systbls.o \
|
||||||
time.o windows.o cpu.o devices.o sclow.o \
|
time.o windows.o cpu.o devices.o sclow.o \
|
||||||
tadpole.o tick14.o ptrace.o sys_solaris.o \
|
tadpole.o tick14.o ptrace.o sys_solaris.o \
|
||||||
unaligned.o una_asm.o muldiv.o semaphore.o \
|
unaligned.o una_asm.o muldiv.o \
|
||||||
prom.o of_device.o devres.o
|
prom.o of_device.o devres.o
|
||||||
|
|
||||||
devres-y = ../../../kernel/irq/devres.o
|
devres-y = ../../../kernel/irq/devres.o
|
||||||
|
|
|
@ -1,155 +0,0 @@
|
||||||
/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
|
|
||||||
|
|
||||||
/* sparc32 semaphore implementation, based on i386 version */
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to acquire the semaphore, while the "sleeping"
|
|
||||||
* variable is a count of such acquires.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* "sleeping" and the contention routine ordering is
|
|
||||||
* protected by the semaphore spinlock.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Logic:
|
|
||||||
* - only on a boundary condition do we need to care. When we go
|
|
||||||
* from a negative count to a non-negative, we wake people up.
|
|
||||||
* - when we go from a non-negative count to a negative do we
|
|
||||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
|
||||||
* that we're on the wakeup list before we synchronize so that
|
|
||||||
* we cannot lose wakeup events.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(semaphore_lock);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock.
|
|
||||||
*/
|
|
||||||
if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
sem->sleepers ++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With signals pending, this turns into
|
|
||||||
* the trylock failure case - we won't be
|
|
||||||
* sleeping, and we* can't get the lock as
|
|
||||||
* it has contention. Just correct the count
|
|
||||||
* and exit.
|
|
||||||
*/
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
retval = -EINTR;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
atomic24_add(sleepers, &sem->count);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock. The
|
|
||||||
* "-1" is because we're still hoping to get
|
|
||||||
* the lock.
|
|
||||||
*/
|
|
||||||
if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Trylock failed - make sure we correct for
|
|
||||||
* having decremented the count.
|
|
||||||
*/
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int sleepers;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_lock, flags);
|
|
||||||
sleepers = sem->sleepers + 1;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" and us into it. They aren't
|
|
||||||
* playing, because we own the spinlock.
|
|
||||||
*/
|
|
||||||
if (!atomic24_add_negative(sleepers, &sem->count))
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&semaphore_lock, flags);
|
|
||||||
return 1;
|
|
||||||
}
|
|
|
@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try);
|
||||||
EXPORT_SYMBOL(___rw_read_exit);
|
EXPORT_SYMBOL(___rw_read_exit);
|
||||||
EXPORT_SYMBOL(___rw_write_enter);
|
EXPORT_SYMBOL(___rw_write_enter);
|
||||||
#endif
|
#endif
|
||||||
/* semaphores */
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(sparc_valid_addr_bitmap);
|
EXPORT_SYMBOL(sparc_valid_addr_bitmap);
|
||||||
EXPORT_SYMBOL(phys_base);
|
EXPORT_SYMBOL(phys_base);
|
||||||
|
|
|
@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||||
obj-y := process.o setup.o cpu.o idprom.o \
|
obj-y := process.o setup.o cpu.o idprom.o \
|
||||||
traps.o auxio.o una_asm.o sysfs.o iommu.o \
|
traps.o auxio.o una_asm.o sysfs.o iommu.o \
|
||||||
irq.o ptrace.o time.o sys_sparc.o signal.o \
|
irq.o ptrace.o time.o sys_sparc.o signal.o \
|
||||||
unaligned.o central.o pci.o starfire.o semaphore.o \
|
unaligned.o central.o pci.o starfire.o \
|
||||||
power.o sbus.o sparc64_ksyms.o chmc.o \
|
power.o sbus.o sparc64_ksyms.o chmc.o \
|
||||||
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
|
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
|
||||||
|
|
||||||
|
|
|
@ -1,254 +0,0 @@
|
||||||
/* semaphore.c: Sparc64 semaphore implementation.
|
|
||||||
*
|
|
||||||
* This is basically the PPC semaphore scheme ported to use
|
|
||||||
* the sparc64 atomic instructions, so see the PPC code for
|
|
||||||
* credits.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically update sem->count.
|
|
||||||
* This does the equivalent of the following:
|
|
||||||
*
|
|
||||||
* old_count = sem->count;
|
|
||||||
* tmp = MAX(old_count, 0) + incr;
|
|
||||||
* sem->count = tmp;
|
|
||||||
* return old_count;
|
|
||||||
*/
|
|
||||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
|
||||||
{
|
|
||||||
int old_count, tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
|
|
||||||
"1: ldsw [%3], %0\n"
|
|
||||||
" mov %0, %1\n"
|
|
||||||
" cmp %0, 0\n"
|
|
||||||
" movl %%icc, 0, %1\n"
|
|
||||||
" add %1, %4, %1\n"
|
|
||||||
" cas [%3], %0, %1\n"
|
|
||||||
" cmp %0, %1\n"
|
|
||||||
" membar #StoreLoad | #StoreStore\n"
|
|
||||||
" bne,pn %%icc, 1b\n"
|
|
||||||
" nop\n"
|
|
||||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
|
||||||
: "r" (&sem->count), "r" (incr), "m" (sem->count)
|
|
||||||
: "cc");
|
|
||||||
|
|
||||||
return old_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
__sem_update_count(sem, 1);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
void up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
/* This atomically does:
|
|
||||||
* old_val = sem->count;
|
|
||||||
* new_val = sem->count + 1;
|
|
||||||
* sem->count = new_val;
|
|
||||||
* if (old_val < 0)
|
|
||||||
* __up(sem);
|
|
||||||
*
|
|
||||||
* The (old_val < 0) test is equivalent to
|
|
||||||
* the more straightforward (new_val <= 0),
|
|
||||||
* but it is easier to test the former because
|
|
||||||
* of how the CAS instruction works.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
" ! up sem(%0)\n"
|
|
||||||
" membar #StoreLoad | #LoadLoad\n"
|
|
||||||
"1: lduw [%0], %%g1\n"
|
|
||||||
" add %%g1, 1, %%g7\n"
|
|
||||||
" cas [%0], %%g1, %%g7\n"
|
|
||||||
" cmp %%g1, %%g7\n"
|
|
||||||
" bne,pn %%icc, 1b\n"
|
|
||||||
" addcc %%g7, 1, %%g0\n"
|
|
||||||
" membar #StoreLoad | #StoreStore\n"
|
|
||||||
" ble,pn %%icc, 3f\n"
|
|
||||||
" nop\n"
|
|
||||||
"2:\n"
|
|
||||||
" .subsection 2\n"
|
|
||||||
"3: mov %0, %%g1\n"
|
|
||||||
" save %%sp, -160, %%sp\n"
|
|
||||||
" call %1\n"
|
|
||||||
" mov %%g1, %%o0\n"
|
|
||||||
" ba,pt %%xcc, 2b\n"
|
|
||||||
" restore\n"
|
|
||||||
" .previous\n"
|
|
||||||
: : "r" (sem), "i" (__up)
|
|
||||||
: "g1", "g2", "g3", "g7", "memory", "cc");
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __sched down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
/* This atomically does:
|
|
||||||
* old_val = sem->count;
|
|
||||||
* new_val = sem->count - 1;
|
|
||||||
* sem->count = new_val;
|
|
||||||
* if (old_val < 1)
|
|
||||||
* __down(sem);
|
|
||||||
*
|
|
||||||
* The (old_val < 1) test is equivalent to
|
|
||||||
* the more straightforward (new_val < 0),
|
|
||||||
* but it is easier to test the former because
|
|
||||||
* of how the CAS instruction works.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
" ! down sem(%0)\n"
|
|
||||||
"1: lduw [%0], %%g1\n"
|
|
||||||
" sub %%g1, 1, %%g7\n"
|
|
||||||
" cas [%0], %%g1, %%g7\n"
|
|
||||||
" cmp %%g1, %%g7\n"
|
|
||||||
" bne,pn %%icc, 1b\n"
|
|
||||||
" cmp %%g7, 1\n"
|
|
||||||
" membar #StoreLoad | #StoreStore\n"
|
|
||||||
" bl,pn %%icc, 3f\n"
|
|
||||||
" nop\n"
|
|
||||||
"2:\n"
|
|
||||||
" .subsection 2\n"
|
|
||||||
"3: mov %0, %%g1\n"
|
|
||||||
" save %%sp, -160, %%sp\n"
|
|
||||||
" call %1\n"
|
|
||||||
" mov %%g1, %%o0\n"
|
|
||||||
" ba,pt %%xcc, 2b\n"
|
|
||||||
" restore\n"
|
|
||||||
" .previous\n"
|
|
||||||
: : "r" (sem), "i" (__down)
|
|
||||||
: "g1", "g2", "g3", "g7", "memory", "cc");
|
|
||||||
}
|
|
||||||
|
|
||||||
int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* This atomically does:
|
|
||||||
* old_val = sem->count;
|
|
||||||
* new_val = sem->count - 1;
|
|
||||||
* if (old_val < 1) {
|
|
||||||
* ret = 1;
|
|
||||||
* } else {
|
|
||||||
* sem->count = new_val;
|
|
||||||
* ret = 0;
|
|
||||||
* }
|
|
||||||
*
|
|
||||||
* The (old_val < 1) test is equivalent to
|
|
||||||
* the more straightforward (new_val < 0),
|
|
||||||
* but it is easier to test the former because
|
|
||||||
* of how the CAS instruction works.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
" ! down_trylock sem(%1) ret(%0)\n"
|
|
||||||
"1: lduw [%1], %%g1\n"
|
|
||||||
" sub %%g1, 1, %%g7\n"
|
|
||||||
" cmp %%g1, 1\n"
|
|
||||||
" bl,pn %%icc, 2f\n"
|
|
||||||
" mov 1, %0\n"
|
|
||||||
" cas [%1], %%g1, %%g7\n"
|
|
||||||
" cmp %%g1, %%g7\n"
|
|
||||||
" bne,pn %%icc, 1b\n"
|
|
||||||
" mov 0, %0\n"
|
|
||||||
" membar #StoreLoad | #StoreStore\n"
|
|
||||||
"2:\n"
|
|
||||||
: "=&r" (ret)
|
|
||||||
: "r" (sem)
|
|
||||||
: "g1", "g7", "memory", "cc");
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
while (__sem_update_count(sem, -1) <= 0) {
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
__sem_update_count(sem, 0);
|
|
||||||
retval = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
}
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
/* This atomically does:
|
|
||||||
* old_val = sem->count;
|
|
||||||
* new_val = sem->count - 1;
|
|
||||||
* sem->count = new_val;
|
|
||||||
* if (old_val < 1)
|
|
||||||
* ret = __down_interruptible(sem);
|
|
||||||
*
|
|
||||||
* The (old_val < 1) test is equivalent to
|
|
||||||
* the more straightforward (new_val < 0),
|
|
||||||
* but it is easier to test the former because
|
|
||||||
* of how the CAS instruction works.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__asm__ __volatile__("\n"
|
|
||||||
" ! down_interruptible sem(%2) ret(%0)\n"
|
|
||||||
"1: lduw [%2], %%g1\n"
|
|
||||||
" sub %%g1, 1, %%g7\n"
|
|
||||||
" cas [%2], %%g1, %%g7\n"
|
|
||||||
" cmp %%g1, %%g7\n"
|
|
||||||
" bne,pn %%icc, 1b\n"
|
|
||||||
" cmp %%g7, 1\n"
|
|
||||||
" membar #StoreLoad | #StoreStore\n"
|
|
||||||
" bl,pn %%icc, 3f\n"
|
|
||||||
" nop\n"
|
|
||||||
"2:\n"
|
|
||||||
" .subsection 2\n"
|
|
||||||
"3: mov %2, %%g1\n"
|
|
||||||
" save %%sp, -160, %%sp\n"
|
|
||||||
" call %3\n"
|
|
||||||
" mov %%g1, %%o0\n"
|
|
||||||
" ba,pt %%xcc, 2b\n"
|
|
||||||
" restore\n"
|
|
||||||
" .previous\n"
|
|
||||||
: "=r" (ret)
|
|
||||||
: "0" (ret), "r" (sem), "i" (__down_interruptible)
|
|
||||||
: "g1", "g2", "g3", "g7", "memory", "cc");
|
|
||||||
return ret;
|
|
||||||
}
|
|
|
@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount);
|
||||||
|
|
||||||
EXPORT_SYMBOL(sparc64_get_clock_tick);
|
EXPORT_SYMBOL(sparc64_get_clock_tick);
|
||||||
|
|
||||||
/* semaphores */
|
|
||||||
EXPORT_SYMBOL(down);
|
|
||||||
EXPORT_SYMBOL(down_trylock);
|
|
||||||
EXPORT_SYMBOL(down_interruptible);
|
|
||||||
EXPORT_SYMBOL(up);
|
|
||||||
|
|
||||||
/* RW semaphores */
|
/* RW semaphores */
|
||||||
EXPORT_SYMBOL(__down_read);
|
EXPORT_SYMBOL(__down_read);
|
||||||
EXPORT_SYMBOL(__down_read_trylock);
|
EXPORT_SYMBOL(__down_read_trylock);
|
||||||
|
|
|
@ -19,10 +19,6 @@ config 64BIT
|
||||||
bool
|
bool
|
||||||
default n
|
default n
|
||||||
|
|
||||||
config SEMAPHORE_SLEEPERS
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
|
|
||||||
config 3_LEVEL_PGTABLES
|
config 3_LEVEL_PGTABLES
|
||||||
bool "Three-level pagetables (EXPERIMENTAL)"
|
bool "Three-level pagetables (EXPERIMENTAL)"
|
||||||
default n
|
default n
|
||||||
|
|
|
@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config SEMAPHORE_SLEEPERS
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
|
|
||||||
config 3_LEVEL_PGTABLES
|
config 3_LEVEL_PGTABLES
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -1,17 +1,5 @@
|
||||||
#include "linux/module.h"
|
#include "linux/module.h"
|
||||||
#include "linux/in6.h"
|
|
||||||
#include "linux/rwsem.h"
|
|
||||||
#include "asm/byteorder.h"
|
|
||||||
#include "asm/delay.h"
|
|
||||||
#include "asm/semaphore.h"
|
|
||||||
#include "asm/uaccess.h"
|
|
||||||
#include "asm/checksum.h"
|
#include "asm/checksum.h"
|
||||||
#include "asm/errno.h"
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_failed_trylock);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
|
|
||||||
/* Networking helper routines. */
|
/* Networking helper routines. */
|
||||||
EXPORT_SYMBOL(csum_partial);
|
EXPORT_SYMBOL(csum_partial);
|
||||||
|
|
|
@ -3,7 +3,7 @@ OBJ = built-in.o
|
||||||
.S.o:
|
.S.o:
|
||||||
$(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
$(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
||||||
|
|
||||||
OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \
|
OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
|
||||||
ptrace_user.o sysrq.o
|
ptrace_user.o sysrq.o
|
||||||
|
|
||||||
EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
|
EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
|
||||||
|
@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c
|
||||||
sigcontext.o: sigcontext.c
|
sigcontext.o: sigcontext.c
|
||||||
$(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
|
$(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
|
||||||
|
|
||||||
semaphore.c:
|
|
||||||
rm -f $@
|
|
||||||
ln -s $(srctree)/arch/ppc/kernel/$@ $@
|
|
||||||
|
|
||||||
checksum.S:
|
checksum.S:
|
||||||
rm -f $@
|
rm -f $@
|
||||||
ln -s $(srctree)/arch/ppc/lib/$@ $@
|
ln -s $(srctree)/arch/ppc/lib/$@ $@
|
||||||
|
@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h
|
||||||
$(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
$(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
|
||||||
rm -f asm
|
rm -f asm
|
||||||
|
|
||||||
clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c
|
clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
|
||||||
|
|
|
@ -1,16 +1,5 @@
|
||||||
#include "linux/module.h"
|
#include "linux/module.h"
|
||||||
#include "linux/in6.h"
|
#include "asm/string.h"
|
||||||
#include "linux/rwsem.h"
|
|
||||||
#include "asm/byteorder.h"
|
|
||||||
#include "asm/semaphore.h"
|
|
||||||
#include "asm/uaccess.h"
|
|
||||||
#include "asm/checksum.h"
|
|
||||||
#include "asm/errno.h"
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_failed_trylock);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
|
|
||||||
/*XXX: we need them because they would be exported by x86_64 */
|
/*XXX: we need them because they would be exported by x86_64 */
|
||||||
EXPORT_SYMBOL(__memcpy);
|
EXPORT_SYMBOL(__memcpy);
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
extra-y := head.o init_task.o vmlinux.lds
|
extra-y := head.o init_task.o vmlinux.lds
|
||||||
|
|
||||||
obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \
|
obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \
|
||||||
signal.o irq.o mach.o ptrace.o bug.o
|
signal.o irq.o mach.o ptrace.o bug.o
|
||||||
obj-$(CONFIG_MODULES) += module.o v850_ksyms.o
|
obj-$(CONFIG_MODULES) += module.o v850_ksyms.o
|
||||||
# chip-specific code
|
# chip-specific code
|
||||||
|
|
|
@ -1,166 +0,0 @@
|
||||||
/*
|
|
||||||
* arch/v850/kernel/semaphore.c -- Semaphore support
|
|
||||||
*
|
|
||||||
* Copyright (C) 1998-2000 IBM Corporation
|
|
||||||
* Copyright (C) 1999 Linus Torvalds
|
|
||||||
*
|
|
||||||
* This file is subject to the terms and conditions of the GNU General
|
|
||||||
* Public License. See the file COPYING in the main directory of this
|
|
||||||
* archive for more details.
|
|
||||||
*
|
|
||||||
* This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
|
|
||||||
* Author(s): Martin Schwidefsky
|
|
||||||
* which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to acquire the semaphore, while the "sleeping"
|
|
||||||
* variable is a count of such acquires.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* "sleeping" and the contention routine ordering is
|
|
||||||
* protected by the semaphore spinlock.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Logic:
|
|
||||||
* - only on a boundary condition do we need to care. When we go
|
|
||||||
* from a negative count to a non-negative, we wake people up.
|
|
||||||
* - when we go from a non-negative count to a negative do we
|
|
||||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
|
||||||
* that we're on the wakeup list before we synchronize so that
|
|
||||||
* we cannot lose wakeup events.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(semaphore_lock);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
sem->sleepers++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int retval = 0;
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
DECLARE_WAITQUEUE(wait, tsk);
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
|
||||||
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
sem->sleepers ++;
|
|
||||||
for (;;) {
|
|
||||||
int sleepers = sem->sleepers;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* With signals pending, this turns into
|
|
||||||
* the trylock failure case - we won't be
|
|
||||||
* sleeping, and we* can't get the lock as
|
|
||||||
* it has contention. Just correct the count
|
|
||||||
* and exit.
|
|
||||||
*/
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
retval = -EINTR;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
atomic_add(sleepers, &sem->count);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" into it. They aren't
|
|
||||||
* playing, because we own the spinlock. The
|
|
||||||
* "-1" is because we're still hoping to get
|
|
||||||
* the lock.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
|
||||||
sem->sleepers = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sem->sleepers = 1; /* us - see -1 above */
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
|
|
||||||
schedule();
|
|
||||||
tsk->state = TASK_INTERRUPTIBLE;
|
|
||||||
spin_lock_irq(&semaphore_lock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&semaphore_lock);
|
|
||||||
tsk->state = TASK_RUNNING;
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Trylock failed - make sure we correct for
|
|
||||||
* having decremented the count.
|
|
||||||
*/
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int sleepers;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_lock, flags);
|
|
||||||
sleepers = sem->sleepers + 1;
|
|
||||||
sem->sleepers = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add "everybody else" and us into it. They aren't
|
|
||||||
* playing, because we own the spinlock.
|
|
||||||
*/
|
|
||||||
if (!atomic_add_negative(sleepers, &sem->count))
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&semaphore_lock, flags);
|
|
||||||
return 1;
|
|
||||||
}
|
|
|
@ -11,7 +11,6 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
|
|
||||||
|
@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset);
|
||||||
EXPORT_SYMBOL (memcpy);
|
EXPORT_SYMBOL (memcpy);
|
||||||
EXPORT_SYMBOL (memmove);
|
EXPORT_SYMBOL (memmove);
|
||||||
|
|
||||||
/* semaphores */
|
|
||||||
EXPORT_SYMBOL (__down);
|
|
||||||
EXPORT_SYMBOL (__down_interruptible);
|
|
||||||
EXPORT_SYMBOL (__down_trylock);
|
|
||||||
EXPORT_SYMBOL (__up);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* libgcc functions - functions that are used internally by the
|
* libgcc functions - functions that are used internally by the
|
||||||
* compiler... (prototypes are not correct though, but that
|
* compiler... (prototypes are not correct though, but that
|
||||||
|
|
|
@ -53,9 +53,6 @@ config STACKTRACE_SUPPORT
|
||||||
config HAVE_LATENCYTOP_SUPPORT
|
config HAVE_LATENCYTOP_SUPPORT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config SEMAPHORE_SLEEPERS
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config FAST_CMPXCHG_LOCAL
|
config FAST_CMPXCHG_LOCAL
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -1,13 +1,8 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/checksum.h>
|
#include <asm/checksum.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_failed_trylock);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
/* Networking helper routines. */
|
/* Networking helper routines. */
|
||||||
EXPORT_SYMBOL(csum_partial_copy_generic);
|
EXPORT_SYMBOL(csum_partial_copy_generic);
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -12,11 +11,6 @@
|
||||||
|
|
||||||
EXPORT_SYMBOL(kernel_thread);
|
EXPORT_SYMBOL(kernel_thread);
|
||||||
|
|
||||||
EXPORT_SYMBOL(__down_failed);
|
|
||||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_failed_trylock);
|
|
||||||
EXPORT_SYMBOL(__up_wakeup);
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__get_user_1);
|
EXPORT_SYMBOL(__get_user_1);
|
||||||
EXPORT_SYMBOL(__get_user_2);
|
EXPORT_SYMBOL(__get_user_2);
|
||||||
EXPORT_SYMBOL(__get_user_4);
|
EXPORT_SYMBOL(__get_user_4);
|
||||||
|
|
|
@ -30,89 +30,6 @@
|
||||||
* value or just clobbered..
|
* value or just clobbered..
|
||||||
*/
|
*/
|
||||||
.section .sched.text, "ax"
|
.section .sched.text, "ax"
|
||||||
ENTRY(__down_failed)
|
|
||||||
CFI_STARTPROC
|
|
||||||
FRAME
|
|
||||||
pushl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET edx,0
|
|
||||||
pushl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ecx,0
|
|
||||||
call __down
|
|
||||||
popl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE ecx
|
|
||||||
popl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE edx
|
|
||||||
ENDFRAME
|
|
||||||
ret
|
|
||||||
CFI_ENDPROC
|
|
||||||
ENDPROC(__down_failed)
|
|
||||||
|
|
||||||
ENTRY(__down_failed_interruptible)
|
|
||||||
CFI_STARTPROC
|
|
||||||
FRAME
|
|
||||||
pushl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET edx,0
|
|
||||||
pushl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ecx,0
|
|
||||||
call __down_interruptible
|
|
||||||
popl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE ecx
|
|
||||||
popl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE edx
|
|
||||||
ENDFRAME
|
|
||||||
ret
|
|
||||||
CFI_ENDPROC
|
|
||||||
ENDPROC(__down_failed_interruptible)
|
|
||||||
|
|
||||||
ENTRY(__down_failed_trylock)
|
|
||||||
CFI_STARTPROC
|
|
||||||
FRAME
|
|
||||||
pushl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET edx,0
|
|
||||||
pushl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ecx,0
|
|
||||||
call __down_trylock
|
|
||||||
popl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE ecx
|
|
||||||
popl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE edx
|
|
||||||
ENDFRAME
|
|
||||||
ret
|
|
||||||
CFI_ENDPROC
|
|
||||||
ENDPROC(__down_failed_trylock)
|
|
||||||
|
|
||||||
ENTRY(__up_wakeup)
|
|
||||||
CFI_STARTPROC
|
|
||||||
FRAME
|
|
||||||
pushl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET edx,0
|
|
||||||
pushl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ecx,0
|
|
||||||
call __up
|
|
||||||
popl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE ecx
|
|
||||||
popl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
CFI_RESTORE edx
|
|
||||||
ENDFRAME
|
|
||||||
ret
|
|
||||||
CFI_ENDPROC
|
|
||||||
ENDPROC(__up_wakeup)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rw spinlock fallbacks
|
* rw spinlock fallbacks
|
||||||
|
|
|
@ -41,11 +41,6 @@
|
||||||
thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
|
thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
thunk __down_failed,__down
|
|
||||||
thunk_retrax __down_failed_interruptible,__down_interruptible
|
|
||||||
thunk_retrax __down_failed_trylock,__down_trylock
|
|
||||||
thunk __up_wakeup,__up
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
thunk trace_hardirqs_on_thunk,trace_hardirqs_on
|
thunk trace_hardirqs_on_thunk,trace_hardirqs_on
|
||||||
thunk trace_hardirqs_off_thunk,trace_hardirqs_off
|
thunk trace_hardirqs_off_thunk,trace_hardirqs_off
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
extra-y := head.o vmlinux.lds
|
extra-y := head.o vmlinux.lds
|
||||||
|
|
||||||
|
|
||||||
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
|
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
|
||||||
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
|
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
|
||||||
pci-dma.o init_task.o io.o
|
pci-dma.o init_task.o io.o
|
||||||
|
|
||||||
|
|
|
@ -1,226 +0,0 @@
|
||||||
/*
|
|
||||||
* arch/xtensa/kernel/semaphore.c
|
|
||||||
*
|
|
||||||
* Generic semaphore code. Buyer beware. Do your own specific changes
|
|
||||||
* in <asm/semaphore-helper.h>
|
|
||||||
*
|
|
||||||
* This file is subject to the terms and conditions of the GNU General Public
|
|
||||||
* License. See the file "COPYING" in the main directory of this archive
|
|
||||||
* for more details.
|
|
||||||
*
|
|
||||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
|
||||||
*
|
|
||||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
||||||
* Chris Zankel <chris@zankel.net>
|
|
||||||
* Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
|
||||||
* Kevin Chea
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <asm/errno.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline__ void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc((atomic_t *)&sem->sleepers);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->sleepers > 0) {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_interruptible() increment while we are
|
|
||||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->sleepers > 0) {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_trylock() increment while we are
|
|
||||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->sleepers <= 0)
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
else {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
DEFINE_SPINLOCK(semaphore_wake_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphores are implemented using a two-way counter:
|
|
||||||
* The "count" variable is decremented for each process
|
|
||||||
* that tries to sleep, while the "waking" variable is
|
|
||||||
* incremented when the "up()" code goes to wake up waiting
|
|
||||||
* processes.
|
|
||||||
*
|
|
||||||
* Notably, the inline "up()" and "down()" functions can
|
|
||||||
* efficiently test if they need to do any extra work (up
|
|
||||||
* needs to do something only if count was negative before
|
|
||||||
* the increment operation.
|
|
||||||
*
|
|
||||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
|
||||||
* atomically.
|
|
||||||
*
|
|
||||||
* When __up() is called, the count was negative before
|
|
||||||
* incrementing it, and we need to wake up somebody.
|
|
||||||
*
|
|
||||||
* This routine adds one to the count of processes that need to
|
|
||||||
* wake up and exit. ALL waiting processes actually wake up but
|
|
||||||
* only the one that gets to the "waking" field first will gate
|
|
||||||
* through and acquire the semaphore. The others will go back
|
|
||||||
* to sleep.
|
|
||||||
*
|
|
||||||
* Note that these functions are only called when there is
|
|
||||||
* contention on the lock, and as such all this is the
|
|
||||||
* "non-critical" part of the whole semaphore business. The
|
|
||||||
* critical part is the inline stuff in <asm/semaphore.h>
|
|
||||||
* where we want to avoid any extra jumps and calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
wake_one_more(sem);
|
|
||||||
wake_up(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the "down" function. Return zero for semaphore acquired,
|
|
||||||
* return negative for signalled out of the function.
|
|
||||||
*
|
|
||||||
* If called from __down, the return is ignored and the wait loop is
|
|
||||||
* not interruptible. This means that a task waiting on a semaphore
|
|
||||||
* using "down()" cannot be killed until someone does an "up()" on
|
|
||||||
* the semaphore.
|
|
||||||
*
|
|
||||||
* If called from __down_interruptible, the return value gets checked
|
|
||||||
* upon return. If the return value is negative then the task continues
|
|
||||||
* with the negative value in the return register (it can be tested by
|
|
||||||
* the caller).
|
|
||||||
*
|
|
||||||
* Either form may be used in conjunction with "up()".
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DOWN_VAR \
|
|
||||||
struct task_struct *tsk = current; \
|
|
||||||
wait_queue_t wait; \
|
|
||||||
init_waitqueue_entry(&wait, tsk);
|
|
||||||
|
|
||||||
#define DOWN_HEAD(task_state) \
|
|
||||||
\
|
|
||||||
\
|
|
||||||
tsk->state = (task_state); \
|
|
||||||
add_wait_queue(&sem->wait, &wait); \
|
|
||||||
\
|
|
||||||
/* \
|
|
||||||
* Ok, we're set up. sem->count is known to be less than zero \
|
|
||||||
* so we must wait. \
|
|
||||||
* \
|
|
||||||
* We can let go the lock for purposes of waiting. \
|
|
||||||
* We re-acquire it after awaking so as to protect \
|
|
||||||
* all semaphore operations. \
|
|
||||||
* \
|
|
||||||
* If "up()" is called before we call waking_non_zero() then \
|
|
||||||
* we will catch it right away. If it is called later then \
|
|
||||||
* we will have to go through a wakeup cycle to catch it. \
|
|
||||||
* \
|
|
||||||
* Multiple waiters contend for the semaphore lock to see \
|
|
||||||
* who gets to gate through and who has to wait some more. \
|
|
||||||
*/ \
|
|
||||||
for (;;) {
|
|
||||||
|
|
||||||
#define DOWN_TAIL(task_state) \
|
|
||||||
tsk->state = (task_state); \
|
|
||||||
} \
|
|
||||||
tsk->state = TASK_RUNNING; \
|
|
||||||
remove_wait_queue(&sem->wait, &wait);
|
|
||||||
|
|
||||||
void __sched __down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
DOWN_VAR
|
|
||||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
|
||||||
if (waking_non_zero(sem))
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
DOWN_VAR
|
|
||||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
|
||||||
|
|
||||||
ret = waking_non_zero_interruptible(sem, tsk);
|
|
||||||
if (ret)
|
|
||||||
{
|
|
||||||
if (ret == 1)
|
|
||||||
/* ret != 0 only if we get interrupted -arca */
|
|
||||||
ret = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
schedule();
|
|
||||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return waking_non_zero_trylock(sem);
|
|
||||||
}
|
|
|
@ -26,7 +26,6 @@
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#ifdef CONFIG_BLK_DEV_FD
|
#ifdef CONFIG_BLK_DEV_FD
|
||||||
#include <asm/floppy.h>
|
#include <asm/floppy.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3);
|
||||||
EXPORT_SYMBOL(__udivdi3);
|
EXPORT_SYMBOL(__udivdi3);
|
||||||
EXPORT_SYMBOL(__umoddi3);
|
EXPORT_SYMBOL(__umoddi3);
|
||||||
|
|
||||||
/*
|
|
||||||
* Semaphore operations
|
|
||||||
*/
|
|
||||||
EXPORT_SYMBOL(__down);
|
|
||||||
EXPORT_SYMBOL(__down_interruptible);
|
|
||||||
EXPORT_SYMBOL(__down_trylock);
|
|
||||||
EXPORT_SYMBOL(__up);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NET
|
#ifdef CONFIG_NET
|
||||||
/*
|
/*
|
||||||
* Networking support
|
* Networking support
|
||||||
|
|
|
@ -1,149 +1 @@
|
||||||
#ifndef _ALPHA_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _ALPHA_SEMAPHORE_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores..
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
* (C) Copyright 1996, 2000 Richard Henderson
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <asm/current.h>
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Logically,
|
|
||||||
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
|
|
||||||
* except that gcc produces better initializing by parts yet.
|
|
||||||
*/
|
|
||||||
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void down(struct semaphore *);
|
|
||||||
extern void __down_failed(struct semaphore *);
|
|
||||||
extern int down_interruptible(struct semaphore *);
|
|
||||||
extern int __down_failed_interruptible(struct semaphore *);
|
|
||||||
extern int down_trylock(struct semaphore *);
|
|
||||||
extern void up(struct semaphore *);
|
|
||||||
extern void __up_wakeup(struct semaphore *);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Hidden out of line code is fun, but extremely messy. Rely on newer
|
|
||||||
* compilers to do a respectable job with this. The contention cases
|
|
||||||
* are handled out of line in arch/alpha/kernel/semaphore.c.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline void __down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
long count;
|
|
||||||
might_sleep();
|
|
||||||
count = atomic_dec_return(&sem->count);
|
|
||||||
if (unlikely(count < 0))
|
|
||||||
__down_failed(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
long count;
|
|
||||||
might_sleep();
|
|
||||||
count = atomic_dec_return(&sem->count);
|
|
||||||
if (unlikely(count < 0))
|
|
||||||
return __down_failed_interruptible(sem);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* down_trylock returns 0 on success, 1 if we failed to get the lock.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int __down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
long ret;
|
|
||||||
|
|
||||||
/* "Equivalent" C:
|
|
||||||
|
|
||||||
do {
|
|
||||||
ret = ldl_l;
|
|
||||||
--ret;
|
|
||||||
if (ret < 0)
|
|
||||||
break;
|
|
||||||
ret = stl_c = ret;
|
|
||||||
} while (ret == 0);
|
|
||||||
*/
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"1: ldl_l %0,%1\n"
|
|
||||||
" subl %0,1,%0\n"
|
|
||||||
" blt %0,2f\n"
|
|
||||||
" stl_c %0,%1\n"
|
|
||||||
" beq %0,3f\n"
|
|
||||||
" mb\n"
|
|
||||||
"2:\n"
|
|
||||||
".subsection 2\n"
|
|
||||||
"3: br 1b\n"
|
|
||||||
".previous"
|
|
||||||
: "=&r" (ret), "=m" (sem->count)
|
|
||||||
: "m" (sem->count));
|
|
||||||
|
|
||||||
return ret < 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_inc_return(&sem->count) <= 0))
|
|
||||||
__up_wakeup(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(CONFIG_DEBUG_SEMAPHORE)
|
|
||||||
extern inline void down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
extern inline int down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
return __down_interruptible(sem);
|
|
||||||
}
|
|
||||||
extern inline int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
return __down_trylock(sem);
|
|
||||||
}
|
|
||||||
extern inline void up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
#ifndef ASMARM_SEMAPHORE_HELPER_H
|
|
||||||
#define ASMARM_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
static inline void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (atomic_read(&sem->count) <= 0)
|
|
||||||
sem->waking++;
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->waking > 0) {
|
|
||||||
sem->waking--;
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking non zero interruptible
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_interruptible() increment while we are
|
|
||||||
* protected by the spinlock in order to make this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->waking > 0) {
|
|
||||||
sem->waking--;
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_try_lock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_interruptible() increment while we are
|
|
||||||
* protected by the spinlock in order to make this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->waking <= 0)
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
else {
|
|
||||||
sem->waking--;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,98 +1 @@
|
||||||
/*
|
#include <linux/semaphore.h>
|
||||||
* linux/include/asm-arm/semaphore.h
|
|
||||||
*/
|
|
||||||
#ifndef __ASM_ARM_SEMAPHORE_H
|
|
||||||
#define __ASM_ARM_SEMAPHORE_H
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/locks.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INIT(name, cnt) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(cnt), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INIT(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
sem->sleepers = 0;
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* special register calling convention
|
|
||||||
*/
|
|
||||||
asmlinkage void __down_failed(void);
|
|
||||||
asmlinkage int __down_interruptible_failed(void);
|
|
||||||
asmlinkage int __down_trylock_failed(void);
|
|
||||||
asmlinkage void __up_wakeup(void);
|
|
||||||
|
|
||||||
extern void __down(struct semaphore * sem);
|
|
||||||
extern int __down_interruptible(struct semaphore * sem);
|
|
||||||
extern int __down_trylock(struct semaphore * sem);
|
|
||||||
extern void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "__down" is the actual routine that waits...
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
__down_op(sem, __down_failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "__down_interruptible" is the actual routine that waits...
|
|
||||||
*/
|
|
||||||
static inline int down_interruptible (struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
return __down_op_ret(sem, __down_interruptible_failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
return __down_op_ret(sem, __down_trylock_failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
__up_op(sem, __up_wakeup);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,108 +1 @@
|
||||||
/*
|
#include <linux/semaphore.h>
|
||||||
* SMP- and interrupt-safe semaphores.
|
|
||||||
*
|
|
||||||
* Copyright (C) 2006 Atmel Corporation
|
|
||||||
*
|
|
||||||
* Based on include/asm-i386/semaphore.h
|
|
||||||
* Copyright (C) 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*/
|
|
||||||
#ifndef __ASM_AVR32_SEMAPHORE_H
|
|
||||||
#define __ASM_AVR32_SEMAPHORE_H
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
sem->sleepers = 0;
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __down(struct semaphore * sem);
|
|
||||||
int __down_interruptible(struct semaphore * sem);
|
|
||||||
void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "__down_failed" is a special asm handler that calls the C
|
|
||||||
* routine that actually waits. See arch/i386/kernel/semaphore.c
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
if (unlikely(atomic_dec_return (&sem->count) < 0))
|
|
||||||
__down (sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interruptible try to acquire a semaphore. If we obtained
|
|
||||||
* it, return zero. If we were interrupted, returns -EINTR
|
|
||||||
*/
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
if (unlikely(atomic_dec_return (&sem->count) < 0))
|
|
||||||
ret = __down_interruptible (sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Non-blockingly attempt to down() a semaphore.
|
|
||||||
* Returns zero if we acquired it
|
|
||||||
*/
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return atomic_dec_if_positive(&sem->count) < 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_inc_return (&sem->count) <= 0))
|
|
||||||
__up (sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /*__ASM_AVR32_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
/* Based on M68K version, Lineo Inc. May 2001 */
|
|
||||||
|
|
||||||
#ifndef _BFIN_SEMAPHORE_HELPER_H
|
|
||||||
#define _BFIN_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <asm/errno.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
static inline void wake_one_more(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
atomic_inc(&sem->waking);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
unsigned long flags = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret = 1;
|
|
||||||
unsigned long flags = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 0;
|
|
||||||
} else
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _BFIN_SEMAPHORE_HELPER_H */
|
|
|
@ -1,105 +1 @@
|
||||||
#ifndef _BFIN_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _BFIN_SEMAPHORE_H
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt-safe semaphores..
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* BFIN version by akbar hussain Lineo Inc April 2001
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.sleepers = 0, \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore *sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore *sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore *sem);
|
|
||||||
asmlinkage void __up(struct semaphore *sem);
|
|
||||||
|
|
||||||
extern spinlock_t semaphore_wake_lock;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "down_failed" is a special asm handler that calls the C
|
|
||||||
* routine that actually waits.
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
ret = __down_trylock(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
if (atomic_inc_return(&sem->count) <= 0)
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
#endif /* _BFIN_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,78 +0,0 @@
|
||||||
/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
|
|
||||||
*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions. Generic versions, no
|
|
||||||
* optimizations whatsoever...
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_SEMAPHORE_HELPER_H
|
|
||||||
#define _ASM_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
|
|
||||||
#define read(a) ((a)->counter)
|
|
||||||
#define inc(a) (((a)->counter)++)
|
|
||||||
#define dec(a) (((a)->counter)--)
|
|
||||||
|
|
||||||
#define count_inc(a) ((*(a))++)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
static inline void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc(&sem->waking);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (read(&sem->waking) > 0) {
|
|
||||||
dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
local_irq_restore(flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (read(&sem->waking) > 0) {
|
|
||||||
dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
local_irq_restore(flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret = 1;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
if (read(&sem->waking) <= 0)
|
|
||||||
inc(&sem->count);
|
|
||||||
else {
|
|
||||||
dec(&sem->waking);
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
local_irq_restore(flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_SEMAPHORE_HELPER_H */
|
|
||||||
|
|
||||||
|
|
|
@ -1,133 +1 @@
|
||||||
/* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */
|
#include <linux/semaphore.h>
|
||||||
|
|
||||||
/* On the i386 these are coded in asm, perhaps we should as well. Later.. */
|
|
||||||
|
|
||||||
#ifndef _CRIS_SEMAPHORE_H
|
|
||||||
#define _CRIS_SEMAPHORE_H
|
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
|
||||||
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CRIS semaphores, implemented in C-only so far.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
atomic_t waking;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.waking = ATOMIC_INIT(0), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __down(struct semaphore * sem);
|
|
||||||
extern int __down_interruptible(struct semaphore * sem);
|
|
||||||
extern int __down_trylock(struct semaphore * sem);
|
|
||||||
extern void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
/* notice - we probably can do cli/sti here instead of saving */
|
|
||||||
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int failed;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
/* atomically decrement the semaphores count, and if its negative, we wait */
|
|
||||||
cris_atomic_save(sem, flags);
|
|
||||||
failed = --(sem->count.counter) < 0;
|
|
||||||
cris_atomic_restore(sem, flags);
|
|
||||||
if(failed) {
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This version waits in interruptible state so that the waiting
|
|
||||||
* process can be killed. The down_interruptible routine
|
|
||||||
* returns negative for signalled and zero for semaphore acquired.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int failed;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
/* atomically decrement the semaphores count, and if its negative, we wait */
|
|
||||||
cris_atomic_save(sem, flags);
|
|
||||||
failed = --(sem->count.counter) < 0;
|
|
||||||
cris_atomic_restore(sem, flags);
|
|
||||||
if(failed)
|
|
||||||
failed = __down_interruptible(sem);
|
|
||||||
return(failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int failed;
|
|
||||||
|
|
||||||
cris_atomic_save(sem, flags);
|
|
||||||
failed = --(sem->count.counter) < 0;
|
|
||||||
cris_atomic_restore(sem, flags);
|
|
||||||
if(failed)
|
|
||||||
failed = __down_trylock(sem);
|
|
||||||
return(failed);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int wakeup;
|
|
||||||
|
|
||||||
/* atomically increment the semaphores count, and if it was negative, we wake people */
|
|
||||||
cris_atomic_save(sem, flags);
|
|
||||||
wakeup = ++(sem->count.counter) <= 0;
|
|
||||||
cris_atomic_restore(sem, flags);
|
|
||||||
if(wakeup) {
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,155 +1 @@
|
||||||
/* semaphore.h: semaphores for the FR-V
|
#include <linux/semaphore.h>
|
||||||
*
|
|
||||||
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
|
|
||||||
* Written by David Howells (dhowells@redhat.com)
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*/
|
|
||||||
#ifndef _ASM_SEMAPHORE_H
|
|
||||||
#define _ASM_SEMAPHORE_H
|
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the semaphore definition
|
|
||||||
* - if counter is >0 then there are tokens available on the semaphore for down to collect
|
|
||||||
* - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait
|
|
||||||
* - if wait_list is not empty, then there are processes waiting for the semaphore
|
|
||||||
*/
|
|
||||||
struct semaphore {
|
|
||||||
unsigned counter;
|
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
unsigned __magic;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
|
|
||||||
#else
|
|
||||||
# define __SEM_DEBUG_INIT(name)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name,count) \
|
|
||||||
{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __down(struct semaphore *sem, unsigned long flags);
|
|
||||||
extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
|
|
||||||
extern void __up(struct semaphore *sem);
|
|
||||||
|
|
||||||
static inline void down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
if (likely(sem->counter > 0)) {
|
|
||||||
sem->counter--;
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
__down(sem, flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
if (likely(sem->counter > 0)) {
|
|
||||||
sem->counter--;
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
ret = __down_interruptible(sem, flags);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* non-blockingly attempt to down() a semaphore.
|
|
||||||
* - returns zero if we acquired it
|
|
||||||
*/
|
|
||||||
static inline int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int success = 0;
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
if (sem->counter > 0) {
|
|
||||||
sem->counter--;
|
|
||||||
success = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
return !success;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
if (!list_empty(&sem->wait_list))
|
|
||||||
__up(sem);
|
|
||||||
else
|
|
||||||
sem->counter++;
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int sem_getcount(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
return sem->counter;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
#ifndef _H8300_SEMAPHORE_HELPER_H
|
|
||||||
#define _H8300_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* based on
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
static inline void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc((atomic_t *)&sem->sleepers);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (sem->sleepers > 0) {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (sem->sleepers > 0) {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 1;
|
|
||||||
if (sem->sleepers <= 0)
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
else {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,190 +1 @@
|
||||||
#ifndef _H8300_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _H8300_SEMAPHORE_H
|
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt-safe semaphores..
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* H8/300 version by Yoshinori Sato
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.sleepers = 0, \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down_failed(void /* special register calling convention */);
|
|
||||||
asmlinkage int __down_failed_interruptible(void /* params in registers */);
|
|
||||||
asmlinkage int __down_failed_trylock(void /* params in registers */);
|
|
||||||
asmlinkage void __up_wakeup(void /* special register calling convention */);
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
extern spinlock_t semaphore_wake_lock;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "down_failed" is a special asm handler that calls the C
|
|
||||||
* routine that actually waits. See arch/m68k/lib/semaphore.S
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
register atomic_t *count asm("er0");
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
count = &(sem->count);
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"stc ccr,r3l\n\t"
|
|
||||||
"orc #0x80,ccr\n\t"
|
|
||||||
"mov.l %2, er1\n\t"
|
|
||||||
"dec.l #1,er1\n\t"
|
|
||||||
"mov.l er1,%0\n\t"
|
|
||||||
"bpl 1f\n\t"
|
|
||||||
"ldc r3l,ccr\n\t"
|
|
||||||
"mov.l %1,er0\n\t"
|
|
||||||
"jsr @___down\n\t"
|
|
||||||
"bra 2f\n"
|
|
||||||
"1:\n\t"
|
|
||||||
"ldc r3l,ccr\n"
|
|
||||||
"2:"
|
|
||||||
: "=m"(*count)
|
|
||||||
: "g"(sem),"m"(*count)
|
|
||||||
: "cc", "er1", "er2", "er3");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
register atomic_t *count asm("er0");
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
count = &(sem->count);
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"stc ccr,r1l\n\t"
|
|
||||||
"orc #0x80,ccr\n\t"
|
|
||||||
"mov.l %3, er2\n\t"
|
|
||||||
"dec.l #1,er2\n\t"
|
|
||||||
"mov.l er2,%1\n\t"
|
|
||||||
"bpl 1f\n\t"
|
|
||||||
"ldc r1l,ccr\n\t"
|
|
||||||
"mov.l %2,er0\n\t"
|
|
||||||
"jsr @___down_interruptible\n\t"
|
|
||||||
"bra 2f\n"
|
|
||||||
"1:\n\t"
|
|
||||||
"ldc r1l,ccr\n\t"
|
|
||||||
"sub.l %0,%0\n\t"
|
|
||||||
"2:\n\t"
|
|
||||||
: "=r" (count),"=m" (*count)
|
|
||||||
: "g"(sem),"m"(*count)
|
|
||||||
: "cc", "er1", "er2", "er3");
|
|
||||||
return (int)count;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
register atomic_t *count asm("er0");
|
|
||||||
|
|
||||||
count = &(sem->count);
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"stc ccr,r3l\n\t"
|
|
||||||
"orc #0x80,ccr\n\t"
|
|
||||||
"mov.l %3,er2\n\t"
|
|
||||||
"dec.l #1,er2\n\t"
|
|
||||||
"mov.l er2,%0\n\t"
|
|
||||||
"bpl 1f\n\t"
|
|
||||||
"ldc r3l,ccr\n\t"
|
|
||||||
"jmp @3f\n\t"
|
|
||||||
LOCK_SECTION_START(".align 2\n\t")
|
|
||||||
"3:\n\t"
|
|
||||||
"mov.l %2,er0\n\t"
|
|
||||||
"jsr @___down_trylock\n\t"
|
|
||||||
"jmp @2f\n\t"
|
|
||||||
LOCK_SECTION_END
|
|
||||||
"1:\n\t"
|
|
||||||
"ldc r3l,ccr\n\t"
|
|
||||||
"sub.l %1,%1\n"
|
|
||||||
"2:"
|
|
||||||
: "=m" (*count),"=r"(count)
|
|
||||||
: "g"(sem),"m"(*count)
|
|
||||||
: "cc", "er1","er2", "er3");
|
|
||||||
return (int)count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
register atomic_t *count asm("er0");
|
|
||||||
|
|
||||||
count = &(sem->count);
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"stc ccr,r3l\n\t"
|
|
||||||
"orc #0x80,ccr\n\t"
|
|
||||||
"mov.l %2,er1\n\t"
|
|
||||||
"inc.l #1,er1\n\t"
|
|
||||||
"mov.l er1,%0\n\t"
|
|
||||||
"ldc r3l,ccr\n\t"
|
|
||||||
"sub.l er2,er2\n\t"
|
|
||||||
"cmp.l er2,er1\n\t"
|
|
||||||
"bgt 1f\n\t"
|
|
||||||
"mov.l %1,er0\n\t"
|
|
||||||
"jsr @___up\n"
|
|
||||||
"1:"
|
|
||||||
: "=m"(*count)
|
|
||||||
: "g"(sem),"m"(*count)
|
|
||||||
: "cc", "er1", "er2", "er3");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,99 +1 @@
|
||||||
#ifndef _ASM_IA64_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _ASM_IA64_SEMAPHORE_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright (C) 1998-2000 Hewlett-Packard Co
|
|
||||||
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.sleepers = 0, \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __down (struct semaphore * sem);
|
|
||||||
extern int __down_interruptible (struct semaphore * sem);
|
|
||||||
extern int __down_trylock (struct semaphore * sem);
|
|
||||||
extern void __up (struct semaphore * sem);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically decrement the semaphore's count. If it goes negative,
|
|
||||||
* block the calling thread in the TASK_UNINTERRUPTIBLE state.
|
|
||||||
*/
|
|
||||||
static inline void
|
|
||||||
down (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically decrement the semaphore's count. If it goes negative,
|
|
||||||
* block the calling thread in the TASK_INTERRUPTIBLE state.
|
|
||||||
*/
|
|
||||||
static inline int
|
|
||||||
down_interruptible (struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
down_trylock (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
|
|
||||||
ret = __down_trylock(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
up (struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_IA64_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,144 +1 @@
|
||||||
#ifndef _ASM_M32R_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _ASM_M32R_SEMAPHORE_H
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores..
|
|
||||||
*
|
|
||||||
* Copyright (C) 1996 Linus Torvalds
|
|
||||||
* Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
#include <asm/assembler.h>
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.sleepers = 0, \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
|
|
||||||
*
|
|
||||||
* i'd rather use the more flexible initialization above, but sadly
|
|
||||||
* GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
|
|
||||||
*/
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
sem->sleepers = 0;
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down_failed(void /* special register calling convention */);
|
|
||||||
asmlinkage int __down_failed_interruptible(void /* params in registers */);
|
|
||||||
asmlinkage int __down_failed_trylock(void /* params in registers */);
|
|
||||||
asmlinkage void __up_wakeup(void /* special register calling convention */);
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically decrement the semaphore's count. If it goes negative,
|
|
||||||
* block the calling thread in the TASK_UNINTERRUPTIBLE state.
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
if (unlikely(atomic_dec_return(&sem->count) < 0))
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interruptible try to acquire a semaphore. If we obtained
|
|
||||||
* it, return zero. If we were interrupted, returns -EINTR
|
|
||||||
*/
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int result = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
if (unlikely(atomic_dec_return(&sem->count) < 0))
|
|
||||||
result = __down_interruptible(sem);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Non-blockingly attempt to down() a semaphore.
|
|
||||||
* Returns zero if we acquired it
|
|
||||||
*/
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
long count;
|
|
||||||
int result = 0;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"# down_trylock \n\t"
|
|
||||||
DCACHE_CLEAR("%0", "r4", "%1")
|
|
||||||
M32R_LOCK" %0, @%1; \n\t"
|
|
||||||
"addi %0, #-1; \n\t"
|
|
||||||
M32R_UNLOCK" %0, @%1; \n\t"
|
|
||||||
: "=&r" (count)
|
|
||||||
: "r" (&sem->count)
|
|
||||||
: "memory"
|
|
||||||
#ifdef CONFIG_CHIP_M32700_TS1
|
|
||||||
, "r4"
|
|
||||||
#endif /* CONFIG_CHIP_M32700_TS1 */
|
|
||||||
);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
if (unlikely(count < 0))
|
|
||||||
result = __down_trylock(sem);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_inc_return(&sem->count) <= 0))
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_M32R_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,142 +0,0 @@
|
||||||
#ifndef _M68K_SEMAPHORE_HELPER_H
|
|
||||||
#define _M68K_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
static inline void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc(&sem->waking);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
extern spinlock_t semaphore_wake_lock;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
#else
|
|
||||||
int tmp1, tmp2;
|
|
||||||
|
|
||||||
__asm__ __volatile__
|
|
||||||
("1: movel %1,%2\n"
|
|
||||||
" jle 2f\n"
|
|
||||||
" subql #1,%2\n"
|
|
||||||
" casl %1,%2,%3\n"
|
|
||||||
" jne 1b\n"
|
|
||||||
" moveq #1,%0\n"
|
|
||||||
"2:"
|
|
||||||
: "=d" (ret), "=d" (tmp1), "=d" (tmp2)
|
|
||||||
: "m" (sem->waking), "0" (0), "1" (sem->waking));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
#else
|
|
||||||
int tmp1, tmp2;
|
|
||||||
|
|
||||||
__asm__ __volatile__
|
|
||||||
("1: movel %1,%2\n"
|
|
||||||
" jle 2f\n"
|
|
||||||
" subql #1,%2\n"
|
|
||||||
" casl %1,%2,%3\n"
|
|
||||||
" jne 1b\n"
|
|
||||||
" moveq #1,%0\n"
|
|
||||||
" jra %a4\n"
|
|
||||||
"2:"
|
|
||||||
: "=d" (ret), "=d" (tmp1), "=d" (tmp2)
|
|
||||||
: "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
|
|
||||||
if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
next:
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
#ifndef CONFIG_RMW_INSNS
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 1;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 0;
|
|
||||||
} else
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
#else
|
|
||||||
int tmp1, tmp2;
|
|
||||||
|
|
||||||
__asm__ __volatile__
|
|
||||||
("1: movel %1,%2\n"
|
|
||||||
" jle 2f\n"
|
|
||||||
" subql #1,%2\n"
|
|
||||||
" casl %1,%2,%3\n"
|
|
||||||
" jne 1b\n"
|
|
||||||
" moveq #0,%0\n"
|
|
||||||
"2:"
|
|
||||||
: "=d" (ret), "=d" (tmp1), "=d" (tmp2)
|
|
||||||
: "m" (sem->waking), "0" (1), "1" (sem->waking));
|
|
||||||
if (ret)
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
#endif
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,163 +1 @@
|
||||||
#ifndef _M68K_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _M68K_SEMAPHORE_H
|
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
#include <linux/stringify.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt-safe semaphores..
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
atomic_t waking;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.waking = ATOMIC_INIT(0), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down_failed(void /* special register calling convention */);
|
|
||||||
asmlinkage int __down_failed_interruptible(void /* params in registers */);
|
|
||||||
asmlinkage int __down_failed_trylock(void /* params in registers */);
|
|
||||||
asmlinkage void __up_wakeup(void /* special register calling convention */);
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "down_failed" is a special asm handler that calls the C
|
|
||||||
* routine that actually waits. See arch/m68k/lib/semaphore.S
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic down operation\n\t"
|
|
||||||
"subql #1,%0@\n\t"
|
|
||||||
"jmi 2f\n\t"
|
|
||||||
"1:\n"
|
|
||||||
LOCK_SECTION_START(".even\n\t")
|
|
||||||
"2:\tpea 1b\n\t"
|
|
||||||
"jbra __down_failed\n"
|
|
||||||
LOCK_SECTION_END
|
|
||||||
: /* no outputs */
|
|
||||||
: "a" (sem1)
|
|
||||||
: "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
|
||||||
register int result __asm__ ("%d0");
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic interruptible down operation\n\t"
|
|
||||||
"subql #1,%1@\n\t"
|
|
||||||
"jmi 2f\n\t"
|
|
||||||
"clrl %0\n"
|
|
||||||
"1:\n"
|
|
||||||
LOCK_SECTION_START(".even\n\t")
|
|
||||||
"2:\tpea 1b\n\t"
|
|
||||||
"jbra __down_failed_interruptible\n"
|
|
||||||
LOCK_SECTION_END
|
|
||||||
: "=d" (result)
|
|
||||||
: "a" (sem1)
|
|
||||||
: "memory");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
|
||||||
register int result __asm__ ("%d0");
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic down trylock operation\n\t"
|
|
||||||
"subql #1,%1@\n\t"
|
|
||||||
"jmi 2f\n\t"
|
|
||||||
"clrl %0\n"
|
|
||||||
"1:\n"
|
|
||||||
LOCK_SECTION_START(".even\n\t")
|
|
||||||
"2:\tpea 1b\n\t"
|
|
||||||
"jbra __down_failed_trylock\n"
|
|
||||||
LOCK_SECTION_END
|
|
||||||
: "=d" (result)
|
|
||||||
: "a" (sem1)
|
|
||||||
: "memory");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic up operation\n\t"
|
|
||||||
"addql #1,%0@\n\t"
|
|
||||||
"jle 2f\n"
|
|
||||||
"1:\n"
|
|
||||||
LOCK_SECTION_START(".even\n\t")
|
|
||||||
"2:\t"
|
|
||||||
"pea 1b\n\t"
|
|
||||||
"jbra __up_wakeup\n"
|
|
||||||
LOCK_SECTION_END
|
|
||||||
: /* no outputs */
|
|
||||||
: "a" (sem1)
|
|
||||||
: "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
#ifndef _M68K_SEMAPHORE_HELPER_H
|
|
||||||
#define _M68K_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*/
|
|
||||||
static inline void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc(&sem->waking);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 0;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*/
|
|
||||||
static inline int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
ret = 1;
|
|
||||||
if (atomic_read(&sem->waking) > 0) {
|
|
||||||
atomic_dec(&sem->waking);
|
|
||||||
ret = 0;
|
|
||||||
} else
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,153 +1 @@
|
||||||
#ifndef _M68K_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _M68K_SEMAPHORE_H
|
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt-safe semaphores..
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* m68k version by Andreas Schwab
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
atomic_t waking;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.waking = ATOMIC_INIT(0), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down_failed(void /* special register calling convention */);
|
|
||||||
asmlinkage int __down_failed_interruptible(void /* params in registers */);
|
|
||||||
asmlinkage int __down_failed_trylock(void /* params in registers */);
|
|
||||||
asmlinkage void __up_wakeup(void /* special register calling convention */);
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
extern spinlock_t semaphore_wake_lock;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is ugly, but we want the default case to fall through.
|
|
||||||
* "down_failed" is a special asm handler that calls the C
|
|
||||||
* routine that actually waits. See arch/m68k/lib/semaphore.S
|
|
||||||
*/
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic down operation\n\t"
|
|
||||||
"movel %0, %%a1\n\t"
|
|
||||||
"lea %%pc@(1f), %%a0\n\t"
|
|
||||||
"subql #1, %%a1@\n\t"
|
|
||||||
"jmi __down_failed\n"
|
|
||||||
"1:"
|
|
||||||
: /* no outputs */
|
|
||||||
: "g" (sem)
|
|
||||||
: "cc", "%a0", "%a1", "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic down operation\n\t"
|
|
||||||
"movel %1, %%a1\n\t"
|
|
||||||
"lea %%pc@(1f), %%a0\n\t"
|
|
||||||
"subql #1, %%a1@\n\t"
|
|
||||||
"jmi __down_failed_interruptible\n\t"
|
|
||||||
"clrl %%d0\n"
|
|
||||||
"1: movel %%d0, %0\n"
|
|
||||||
: "=d" (ret)
|
|
||||||
: "g" (sem)
|
|
||||||
: "cc", "%d0", "%a0", "%a1", "memory");
|
|
||||||
return(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
|
||||||
register int result __asm__ ("%d0");
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic down trylock operation\n\t"
|
|
||||||
"subql #1,%1@\n\t"
|
|
||||||
"jmi 2f\n\t"
|
|
||||||
"clrl %0\n"
|
|
||||||
"1:\n"
|
|
||||||
".section .text.lock,\"ax\"\n"
|
|
||||||
".even\n"
|
|
||||||
"2:\tpea 1b\n\t"
|
|
||||||
"jbra __down_failed_trylock\n"
|
|
||||||
".previous"
|
|
||||||
: "=d" (result)
|
|
||||||
: "a" (sem1)
|
|
||||||
: "memory");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
* The default case (no contention) will result in NO
|
|
||||||
* jumps for both down() and up().
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"| atomic up operation\n\t"
|
|
||||||
"movel %0, %%a1\n\t"
|
|
||||||
"lea %%pc@(1f), %%a0\n\t"
|
|
||||||
"addql #1, %%a1@\n\t"
|
|
||||||
"jle __up_wakeup\n"
|
|
||||||
"1:"
|
|
||||||
: /* no outputs */
|
|
||||||
: "g" (sem)
|
|
||||||
: "cc", "%a0", "%a1", "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,108 +1 @@
|
||||||
/*
|
#include <linux/semaphore.h>
|
||||||
* This file is subject to the terms and conditions of the GNU General Public
|
|
||||||
* License. See the file "COPYING" in the main directory of this archive
|
|
||||||
* for more details.
|
|
||||||
*
|
|
||||||
* Copyright (C) 1996 Linus Torvalds
|
|
||||||
* Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle
|
|
||||||
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
|
|
||||||
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
|
|
||||||
*
|
|
||||||
* In all honesty, little of the old MIPS code left - the PPC64 variant was
|
|
||||||
* just looking nice and portable so I ripped it. Credits to whoever wrote
|
|
||||||
* it.
|
|
||||||
*/
|
|
||||||
#ifndef __ASM_SEMAPHORE_H
|
|
||||||
#define __ASM_SEMAPHORE_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove spinlock-based RW semaphores; RW semaphore definitions are
|
|
||||||
* now in rwsem.h and we use the generic lib/rwsem.c implementation.
|
|
||||||
* Rework semaphores to use atomic_dec_if_positive.
|
|
||||||
* -- Paul Mackerras (paulus@samba.org)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
/*
|
|
||||||
* Note that any negative value of count is equivalent to 0,
|
|
||||||
* but additionally indicates that some process(es) might be
|
|
||||||
* sleeping on `wait'.
|
|
||||||
*/
|
|
||||||
atomic_t count;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __down(struct semaphore * sem);
|
|
||||||
extern int __down_interruptible(struct semaphore * sem);
|
|
||||||
extern void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to get the semaphore, take the slow path if we fail.
|
|
||||||
*/
|
|
||||||
if (unlikely(atomic_dec_return(&sem->count) < 0))
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
if (unlikely(atomic_dec_return(&sem->count) < 0))
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return atomic_dec_if_positive(&sem->count) < 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_inc_return(&sem->count) <= 0))
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
|
||||||
|
|
||||||
#endif /* __ASM_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,169 +1 @@
|
||||||
/* MN10300 Semaphores
|
#include <linux/semaphore.h>
|
||||||
*
|
|
||||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
||||||
* Written by David Howells (dhowells@redhat.com)
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public Licence
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the Licence, or (at your option) any later version.
|
|
||||||
*/
|
|
||||||
#ifndef _ASM_SEMAPHORE_H
|
|
||||||
#define _ASM_SEMAPHORE_H
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#define SEMAPHORE_DEBUG 0
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the semaphore definition
|
|
||||||
* - if count is >0 then there are tokens available on the semaphore for down
|
|
||||||
* to collect
|
|
||||||
* - if count is <=0 then there are no spare tokens, and anyone that wants one
|
|
||||||
* must wait
|
|
||||||
* - if wait_list is not empty, then there are processes waiting for the
|
|
||||||
* semaphore
|
|
||||||
*/
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count; /* it's not really atomic, it's
|
|
||||||
* just that certain modules
|
|
||||||
* expect to be able to access
|
|
||||||
* it directly */
|
|
||||||
spinlock_t wait_lock;
|
|
||||||
struct list_head wait_list;
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
unsigned __magic;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
|
|
||||||
#else
|
|
||||||
# define __SEM_DEBUG_INIT(name)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, init_count) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(init_count), \
|
|
||||||
.wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
|
||||||
.wait_list = LIST_HEAD_INIT((name).wait_list) \
|
|
||||||
__SEM_DEBUG_INIT(name) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
|
|
||||||
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
|
|
||||||
|
|
||||||
static inline void sema_init(struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __down(struct semaphore *sem, unsigned long flags);
|
|
||||||
extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
|
|
||||||
extern void __up(struct semaphore *sem);
|
|
||||||
|
|
||||||
static inline void down(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int count;
|
|
||||||
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
count = atomic_read(&sem->count);
|
|
||||||
if (likely(count > 0)) {
|
|
||||||
atomic_set(&sem->count, count - 1);
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
} else {
|
|
||||||
__down(sem, flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int count, ret = 0;
|
|
||||||
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
count = atomic_read(&sem->count);
|
|
||||||
if (likely(count > 0)) {
|
|
||||||
atomic_set(&sem->count, count - 1);
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
} else {
|
|
||||||
ret = __down_interruptible(sem, flags);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* non-blockingly attempt to down() a semaphore.
|
|
||||||
* - returns zero if we acquired it
|
|
||||||
*/
|
|
||||||
static inline int down_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int count, success = 0;
|
|
||||||
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
count = atomic_read(&sem->count);
|
|
||||||
if (likely(count > 0)) {
|
|
||||||
atomic_set(&sem->count, count - 1);
|
|
||||||
success = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
return !success;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void up(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
#if SEMAPHORE_DEBUG
|
|
||||||
CHECK_MAGIC(sem->__magic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
|
||||||
if (!list_empty(&sem->wait_list))
|
|
||||||
__up(sem);
|
|
||||||
else
|
|
||||||
atomic_set(&sem->count, atomic_read(&sem->count) + 1);
|
|
||||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int sem_getcount(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
return atomic_read(&sem->count);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,89 +0,0 @@
|
||||||
#ifndef _ASM_PARISC_SEMAPHORE_HELPER_H
|
|
||||||
#define _ASM_PARISC_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
* (C) Copyright 1999 Andrea Arcangeli
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*
|
|
||||||
* This is trivially done with load_locked/store_cond,
|
|
||||||
* which we have. Let the rest of the losers suck eggs.
|
|
||||||
*/
|
|
||||||
static __inline__ void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc((atomic_t *)&sem->waking);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->waking > 0) {
|
|
||||||
sem->waking--;
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_interruptible() increment while we are
|
|
||||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->waking > 0) {
|
|
||||||
sem->waking--;
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_trylock() increment while we are
|
|
||||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->waking <= 0)
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
else {
|
|
||||||
sem->waking--;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */
|
|
|
@ -1,145 +1 @@
|
||||||
/* SMP- and interrupt-safe semaphores.
|
#include <linux/semaphore.h>
|
||||||
* PA-RISC version by Matthew Wilcox
|
|
||||||
*
|
|
||||||
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
|
|
||||||
* Copyright (C) 1996 Linus Torvalds
|
|
||||||
* Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org >
|
|
||||||
* Copyright (C) 2000 Grant Grundler < grundler a debian org >
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation; either version 2 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with this program; if not, write to the Free Software
|
|
||||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_PARISC_SEMAPHORE_H
|
|
||||||
#define _ASM_PARISC_SEMAPHORE_H
|
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The `count' is initialised to the number of people who are allowed to
|
|
||||||
* take the lock. (Normally we want a mutex, so this is `1'). if
|
|
||||||
* `count' is positive, the lock can be taken. if it's 0, no-one is
|
|
||||||
* waiting on it. if it's -1, at least one task is waiting.
|
|
||||||
*/
|
|
||||||
struct semaphore {
|
|
||||||
spinlock_t sentry;
|
|
||||||
int count;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.sentry = SPIN_LOCK_UNLOCKED, \
|
|
||||||
.count = n, \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int sem_getcount(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
return sem->count;
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
/* Semaphores can be `tried' from irq context. So we have to disable
|
|
||||||
* interrupts while we're messing with the semaphore. Sorry.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
spin_lock_irq(&sem->sentry);
|
|
||||||
if (sem->count > 0) {
|
|
||||||
sem->count--;
|
|
||||||
} else {
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&sem->sentry);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
might_sleep();
|
|
||||||
spin_lock_irq(&sem->sentry);
|
|
||||||
if (sem->count > 0) {
|
|
||||||
sem->count--;
|
|
||||||
} else {
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&sem->sentry);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* down_trylock returns 0 on success, 1 if we failed to get the lock.
|
|
||||||
* May not sleep, but must preserve irq state
|
|
||||||
*/
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int count;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->sentry, flags);
|
|
||||||
count = sem->count - 1;
|
|
||||||
if (count >= 0)
|
|
||||||
sem->count = count;
|
|
||||||
spin_unlock_irqrestore(&sem->sentry, flags);
|
|
||||||
return (count < 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sem->sentry, flags);
|
|
||||||
if (sem->count < 0) {
|
|
||||||
__up(sem);
|
|
||||||
} else {
|
|
||||||
sem->count++;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&sem->sentry, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_PARISC_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,94 +1 @@
|
||||||
#ifndef _ASM_POWERPC_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define _ASM_POWERPC_SEMAPHORE_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove spinlock-based RW semaphores; RW semaphore definitions are
|
|
||||||
* now in rwsem.h and we use the generic lib/rwsem.c implementation.
|
|
||||||
* Rework semaphores to use atomic_dec_if_positive.
|
|
||||||
* -- Paul Mackerras (paulus@samba.org)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
/*
|
|
||||||
* Note that any negative value of count is equivalent to 0,
|
|
||||||
* but additionally indicates that some process(es) might be
|
|
||||||
* sleeping on `wait'.
|
|
||||||
*/
|
|
||||||
atomic_t count;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __down(struct semaphore * sem);
|
|
||||||
extern int __down_interruptible(struct semaphore * sem);
|
|
||||||
extern void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to get the semaphore, take the slow path if we fail.
|
|
||||||
*/
|
|
||||||
if (unlikely(atomic_dec_return(&sem->count) < 0))
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
if (unlikely(atomic_dec_return(&sem->count) < 0))
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
return atomic_dec_if_positive(&sem->count) < 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_inc_return(&sem->count) <= 0))
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_POWERPC_SEMAPHORE_H */
|
|
||||||
|
|
|
@ -1,107 +1 @@
|
||||||
/*
|
#include <linux/semaphore.h>
|
||||||
* include/asm-s390/semaphore.h
|
|
||||||
*
|
|
||||||
* S390 version
|
|
||||||
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
|
||||||
*
|
|
||||||
* Derived from "include/asm-i386/semaphore.h"
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _S390_SEMAPHORE_H
|
|
||||||
#define _S390_SEMAPHORE_H
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
/*
|
|
||||||
* Note that any negative value of count is equivalent to 0,
|
|
||||||
* but additionally indicates that some process(es) might be
|
|
||||||
* sleeping on `wait'.
|
|
||||||
*/
|
|
||||||
atomic_t count;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name,count) \
|
|
||||||
{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int old_val, new_val;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This inline assembly atomically implements the equivalent
|
|
||||||
* to the following C code:
|
|
||||||
* old_val = sem->count.counter;
|
|
||||||
* if ((new_val = old_val) > 0)
|
|
||||||
* sem->count.counter = --new_val;
|
|
||||||
* In the ppc code this is called atomic_dec_if_positive.
|
|
||||||
*/
|
|
||||||
asm volatile(
|
|
||||||
" l %0,0(%3)\n"
|
|
||||||
"0: ltr %1,%0\n"
|
|
||||||
" jle 1f\n"
|
|
||||||
" ahi %1,-1\n"
|
|
||||||
" cs %0,%1,0(%3)\n"
|
|
||||||
" jl 0b\n"
|
|
||||||
"1:"
|
|
||||||
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
|
|
||||||
: "a" (&sem->count.counter), "m" (sem->count.counter)
|
|
||||||
: "cc", "memory");
|
|
||||||
return old_val <= 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (atomic_inc_return(&sem->count) <= 0)
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -1,89 +0,0 @@
|
||||||
#ifndef __ASM_SH_SEMAPHORE_HELPER_H
|
|
||||||
#define __ASM_SH_SEMAPHORE_HELPER_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores helper functions.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
* (C) Copyright 1999 Andrea Arcangeli
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These two _must_ execute atomically wrt each other.
|
|
||||||
*
|
|
||||||
* This is trivially done with load_locked/store_cond,
|
|
||||||
* which we have. Let the rest of the losers suck eggs.
|
|
||||||
*/
|
|
||||||
static __inline__ void wake_one_more(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
atomic_inc((atomic_t *)&sem->sleepers);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ int waking_non_zero(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->sleepers > 0) {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_interruptible:
|
|
||||||
* 1 got the lock
|
|
||||||
* 0 go to sleep
|
|
||||||
* -EINTR interrupted
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_interruptible() increment while we are
|
|
||||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->sleepers > 0) {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 1;
|
|
||||||
} else if (signal_pending(tsk)) {
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
ret = -EINTR;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* waking_non_zero_trylock:
|
|
||||||
* 1 failed to lock
|
|
||||||
* 0 got the lock
|
|
||||||
*
|
|
||||||
* We must undo the sem->count down_trylock() increment while we are
|
|
||||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
|
||||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
|
||||||
*/
|
|
||||||
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
|
||||||
if (sem->sleepers <= 0)
|
|
||||||
atomic_inc(&sem->count);
|
|
||||||
else {
|
|
||||||
sem->sleepers--;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
|
|
|
@ -1,115 +1 @@
|
||||||
#ifndef __ASM_SH_SEMAPHORE_H
|
#include <linux/semaphore.h>
|
||||||
#define __ASM_SH_SEMAPHORE_H
|
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
/*
|
|
||||||
* SMP- and interrupt-safe semaphores.
|
|
||||||
*
|
|
||||||
* (C) Copyright 1996 Linus Torvalds
|
|
||||||
*
|
|
||||||
* SuperH verison by Niibe Yutaka
|
|
||||||
* (Currently no asm implementation but generic C code...)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
|
|
||||||
#include <asm/system.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
struct semaphore {
|
|
||||||
atomic_t count;
|
|
||||||
int sleepers;
|
|
||||||
wait_queue_head_t wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
|
||||||
{ \
|
|
||||||
.count = ATOMIC_INIT(n), \
|
|
||||||
.sleepers = 0, \
|
|
||||||
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
||||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
||||||
|
|
||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
||||||
|
|
||||||
static inline void sema_init (struct semaphore *sem, int val)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
|
|
||||||
*
|
|
||||||
* i'd rather use the more flexible initialization above, but sadly
|
|
||||||
* GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
|
|
||||||
*/
|
|
||||||
atomic_set(&sem->count, val);
|
|
||||||
sem->sleepers = 0;
|
|
||||||
init_waitqueue_head(&sem->wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
||||||
{
|
|
||||||
sema_init(sem, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
asmlinkage void __down_failed(void /* special register calling convention */);
|
|
||||||
asmlinkage int __down_failed_interruptible(void /* params in registers */);
|
|
||||||
asmlinkage int __down_failed_trylock(void /* params in registers */);
|
|
||||||
asmlinkage void __up_wakeup(void /* special register calling convention */);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
asmlinkage void __down(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
||||||
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
||||||
asmlinkage void __up(struct semaphore * sem);
|
|
||||||
|
|
||||||
extern spinlock_t semaphore_wake_lock;
|
|
||||||
|
|
||||||
static inline void down(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
__down(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_interruptible(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
ret = __down_interruptible(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int down_trylock(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (atomic_dec_return(&sem->count) < 0)
|
|
||||||
ret = __down_trylock(sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note! This is subtle. We jump to wake people up only if
|
|
||||||
* the semaphore was negative (== somebody was waiting on it).
|
|
||||||
*/
|
|
||||||
static inline void up(struct semaphore * sem)
|
|
||||||
{
|
|
||||||
if (atomic_inc_return(&sem->count) <= 0)
|
|
||||||
__up(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
#endif /* __ASM_SH_SEMAPHORE_H */
|
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче