diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-03 10:39:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-03 10:39:02 -0700 |
commit | 7e31aa11fc672bbe0dd0da59513c9efe3809ced7 (patch) | |
tree | 0aafe6a0045230950ce6b26579a053da12614a49 /arch/arm/include/asm/locks.h | |
parent | 071f4924844c435a3ae0cdbab7d7df2f1da85713 (diff) | |
parent | 9cb7117fa4858468014f76bd996076985111e955 (diff) | |
download | linux-7e31aa11fc672bbe0dd0da59513c9efe3809ced7.tar.gz linux-7e31aa11fc672bbe0dd0da59513c9efe3809ced7.tar.bz2 linux-7e31aa11fc672bbe0dd0da59513c9efe3809ced7.zip |
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm:
[ARM] 5182/1: pxa: Fix pcm990 compilation
[ARM] Fix explicit asm(-arm)?/arch-foo references
[ARM] move include/asm-arm to arch/arm/include/asm
[ARM] Remove explicit dependency for misc.o from compressed/Makefile
[ARM] initrd: claim initrd memory exclusively
[ARM] pxa: add support for L2 outer cache on XScale3 (attempt 2)
[ARM] 5180/1: at91: Fix at91_nand -> atmel_nand rename fallout
[ARM] add Sascha Hauer as Freescale i.MX Maintainer
[ARM] i.MX: add missing clock functions exports
[ARM] i.MX: remove set_imx_fb_info() export
[ARM] mx1ads: make mmc platform data available for modules
[ARM] mx2: add missing Kconfig dependency
Diffstat (limited to 'arch/arm/include/asm/locks.h')
-rw-r--r-- | arch/arm/include/asm/locks.h | 274 |
1 files changed, 274 insertions, 0 deletions
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h new file mode 100644 index 000000000000..ef4c897772d1 --- /dev/null +++ b/arch/arm/include/asm/locks.h @@ -0,0 +1,274 @@ +/* + * arch/arm/include/asm/locks.h + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Interrupt safe locking assembler. + */ +#ifndef __ASM_PROC_LOCKS_H +#define __ASM_PROC_LOCKS_H + +#if __LINUX_ARM_ARCH__ >= 6 + +#define __down_op(ptr,fail) \ + ({ \ + __asm__ __volatile__( \ + "@ down_op\n" \ +"1: ldrex lr, [%0]\n" \ +" sub lr, lr, %1\n" \ +" strex ip, lr, [%0]\n" \ +" teq ip, #0\n" \ +" bne 1b\n" \ +" teq lr, #0\n" \ +" movmi ip, %0\n" \ +" blmi " #fail \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + }) + +#define __down_op_ret(ptr,fail) \ + ({ \ + unsigned int ret; \ + __asm__ __volatile__( \ + "@ down_op_ret\n" \ +"1: ldrex lr, [%1]\n" \ +" sub lr, lr, %2\n" \ +" strex ip, lr, [%1]\n" \ +" teq ip, #0\n" \ +" bne 1b\n" \ +" teq lr, #0\n" \ +" movmi ip, %1\n" \ +" movpl ip, #0\n" \ +" blmi " #fail "\n" \ +" mov %0, ip" \ + : "=&r" (ret) \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + ret; \ + }) + +#define __up_op(ptr,wake) \ + ({ \ + smp_mb(); \ + __asm__ __volatile__( \ + "@ up_op\n" \ +"1: ldrex lr, [%0]\n" \ +" add lr, lr, %1\n" \ +" strex ip, lr, [%0]\n" \ +" teq ip, #0\n" \ +" bne 1b\n" \ +" cmp lr, #0\n" \ +" movle ip, %0\n" \ +" blle " #wake \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + }) + +/* + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that sub'ing + * BIAS once per CPU will result in the long remaining + * negative. + */ +#define RW_LOCK_BIAS 0x01000000 +#define RW_LOCK_BIAS_STR "0x01000000" + +#define __down_op_write(ptr,fail) \ + ({ \ + __asm__ __volatile__( \ + "@ down_op_write\n" \ +"1: ldrex lr, [%0]\n" \ +" sub lr, lr, %1\n" \ +" strex ip, lr, [%0]\n" \ +" teq ip, #0\n" \ +" bne 1b\n" \ +" teq lr, #0\n" \ +" movne ip, %0\n" \ +" blne " #fail \ + : \ + : "r" (ptr), "I" (RW_LOCK_BIAS) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + }) + +#define __up_op_write(ptr,wake) \ + ({ \ + smp_mb(); \ + __asm__ __volatile__( \ + "@ up_op_write\n" \ +"1: ldrex lr, [%0]\n" \ +" adds lr, lr, %1\n" \ +" strex ip, lr, [%0]\n" \ +" teq ip, #0\n" \ +" bne 1b\n" \ +" movcs ip, %0\n" \ +" blcs " #wake \ + : \ + : "r" (ptr), "I" (RW_LOCK_BIAS) \ + : "ip", "lr", "cc"); \ + }) + +#define __down_op_read(ptr,fail) \ + __down_op(ptr, fail) + +#define __up_op_read(ptr,wake) \ + ({ \ + smp_mb(); \ + __asm__ __volatile__( \ + "@ up_op_read\n" \ +"1: ldrex lr, [%0]\n" \ +" add lr, lr, %1\n" \ +" strex ip, lr, [%0]\n" \ +" teq ip, #0\n" \ +" bne 1b\n" \ +" teq lr, #0\n" \ +" moveq ip, %0\n" \ +" bleq " #wake \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + }) + +#else + +#define __down_op(ptr,fail) \ + ({ \ + __asm__ __volatile__( \ + "@ down_op\n" \ +" mrs ip, cpsr\n" \ +" orr lr, ip, #128\n" \ +" msr cpsr_c, lr\n" \ +" ldr lr, [%0]\n" \ +" subs lr, lr, %1\n" \ +" str lr, [%0]\n" \ +" msr cpsr_c, ip\n" \ +" movmi ip, %0\n" \ +" blmi " #fail \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + }) + +#define __down_op_ret(ptr,fail) \ + ({ \ + unsigned int ret; \ + __asm__ __volatile__( \ + "@ down_op_ret\n" \ +" mrs ip, cpsr\n" \ +" orr lr, ip, #128\n" \ +" msr cpsr_c, lr\n" \ +" ldr lr, [%1]\n" \ +" subs lr, lr, %2\n" \ +" str lr, [%1]\n" \ +" msr cpsr_c, ip\n" \ +" movmi ip, %1\n" \ +" movpl ip, #0\n" \ +" blmi " #fail "\n" \ +" mov %0, ip" \ + : "=&r" (ret) \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + ret; \ + }) + +#define __up_op(ptr,wake) \ + ({ \ + smp_mb(); \ + __asm__ __volatile__( \ + "@ up_op\n" \ +" mrs ip, cpsr\n" \ +" orr lr, ip, #128\n" \ +" msr cpsr_c, lr\n" \ +" ldr lr, [%0]\n" \ +" adds lr, lr, %1\n" \ +" str lr, [%0]\n" \ +" msr cpsr_c, ip\n" \ +" movle ip, %0\n" \ +" blle " #wake \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + }) + +/* + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that sub'ing + * BIAS once per CPU will result in the long remaining + * negative. + */ +#define RW_LOCK_BIAS 0x01000000 +#define RW_LOCK_BIAS_STR "0x01000000" + +#define __down_op_write(ptr,fail) \ + ({ \ + __asm__ __volatile__( \ + "@ down_op_write\n" \ +" mrs ip, cpsr\n" \ +" orr lr, ip, #128\n" \ +" msr cpsr_c, lr\n" \ +" ldr lr, [%0]\n" \ +" subs lr, lr, %1\n" \ +" str lr, [%0]\n" \ +" msr cpsr_c, ip\n" \ +" movne ip, %0\n" \ +" blne " #fail \ + : \ + : "r" (ptr), "I" (RW_LOCK_BIAS) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + }) + +#define __up_op_write(ptr,wake) \ + ({ \ + __asm__ __volatile__( \ + "@ up_op_write\n" \ +" mrs ip, cpsr\n" \ +" orr lr, ip, #128\n" \ +" msr cpsr_c, lr\n" \ +" ldr lr, [%0]\n" \ +" adds lr, lr, %1\n" \ +" str lr, [%0]\n" \ +" msr cpsr_c, ip\n" \ +" movcs ip, %0\n" \ +" blcs " #wake \ + : \ + : "r" (ptr), "I" (RW_LOCK_BIAS) \ + : "ip", "lr", "cc"); \ + smp_mb(); \ + }) + +#define __down_op_read(ptr,fail) \ + __down_op(ptr, fail) + +#define __up_op_read(ptr,wake) \ + ({ \ + smp_mb(); \ + __asm__ __volatile__( \ + "@ up_op_read\n" \ +" mrs ip, cpsr\n" \ +" orr lr, ip, #128\n" \ +" msr cpsr_c, lr\n" \ +" ldr lr, [%0]\n" \ +" adds lr, lr, %1\n" \ +" str lr, [%0]\n" \ +" msr cpsr_c, ip\n" \ +" moveq ip, %0\n" \ +" bleq " #wake \ + : \ + : "r" (ptr), "I" (1) \ + : "ip", "lr", "cc"); \ + }) + +#endif + +#endif |