2 Copyright (C) 2001 Paul Davis and others (see below)
3 Code derived from various headers from the Linux kernel.
4 Copyright attributions maintained where present.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef __libpbd_atomic_h__
24 #define __libpbd_atomic_h__
26 #ifdef HAVE_SMP /* a macro we control, to manage ... */
27 #define CONFIG_SMP /* ... the macro the kernel headers use */
30 #if defined(__powerpc__) || defined(__ppc__)
33 * BK Id: SCCS/s.atomic.h 1.15 10/28/01 10:37:22 trini
36 * PowerPC atomic operations
39 #ifndef _ASM_PPC_ATOMIC_H_
40 #define _ASM_PPC_ATOMIC_H_
42 typedef struct { volatile int counter; } atomic_t;
45 #define ATOMIC_INIT(i) { (i) }
47 #define atomic_read(v) ((v)->counter)
48 #define atomic_set(v,i) (((v)->counter) = (i))
50 extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
51 extern void atomic_set_mask(unsigned long mask, unsigned long *addr);
54 #define SMP_ISYNC "\n\tisync"
59 static __inline__ void atomic_add(int a, atomic_t *v)
68 : "=&r" (t), "=m" (v->counter)
69 : "r" (a), "r" (&v->counter), "m" (v->counter)
73 static __inline__ int atomic_add_return(int a, atomic_t *v)
84 : "r" (a), "r" (&v->counter)
90 static __inline__ void atomic_sub(int a, atomic_t *v)
99 : "=&r" (t), "=m" (v->counter)
100 : "r" (a), "r" (&v->counter), "m" (v->counter)
104 static __inline__ int atomic_sub_return(int a, atomic_t *v)
108 __asm__ __volatile__(
115 : "r" (a), "r" (&v->counter)
121 static __inline__ void atomic_inc(atomic_t *v)
125 __asm__ __volatile__(
130 : "=&r" (t), "=m" (v->counter)
131 : "r" (&v->counter), "m" (v->counter)
135 static __inline__ int atomic_inc_return(atomic_t *v)
139 __asm__ __volatile__(
152 static __inline__ void atomic_dec(atomic_t *v)
156 __asm__ __volatile__(
161 : "=&r" (t), "=m" (v->counter)
162 : "r" (&v->counter), "m" (v->counter)
166 static __inline__ int atomic_dec_return(atomic_t *v)
170 __asm__ __volatile__(
183 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
184 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
187 * Atomically test *v and decrement if it is greater than 0.
188 * The function returns the old value of *v minus 1.
190 static __inline__ int atomic_dec_if_positive(atomic_t *v)
194 __asm__ __volatile__(
209 #define smp_mb__before_atomic_dec() smp_mb()
210 #define smp_mb__after_atomic_dec() smp_mb()
211 #define smp_mb__before_atomic_inc() smp_mb()
212 #define smp_mb__after_atomic_inc() smp_mb()
214 #endif /* _ASM_PPC_ATOMIC_H_ */
216 /***********************************************************************/
220 #if defined(__i386__) || defined(__x86_64__)
222 #ifndef __ARCH_I386_ATOMIC__
223 #define __ARCH_I386_ATOMIC__
226 * Atomic operations that C can't guarantee us. Useful for
227 * resource counting etc..
231 #define SMP_LOCK "lock ; "
237 * Make sure gcc doesn't try to be clever and move things around
238 * on us. We need to use _exactly_ the address the user gave us,
239 * not some alias that contains the same information.
241 typedef struct { volatile int counter; } atomic_t;
243 #define ATOMIC_INIT(i) { (i) }
246 * atomic_read - read atomic variable
247 * @v: pointer of type atomic_t
249 * Atomically reads the value of @v. Note that the guaranteed
250 * useful range of an atomic_t is only 24 bits.
252 #define atomic_read(v) ((v)->counter)
255 * atomic_set - set atomic variable
256 * @v: pointer of type atomic_t
259 * Atomically sets the value of @v to @i. Note that the guaranteed
260 * useful range of an atomic_t is only 24 bits.
262 #define atomic_set(v,i) (((v)->counter) = (i))
265 * atomic_add - add integer to atomic variable
266 * @i: integer value to add
267 * @v: pointer of type atomic_t
269 * Atomically adds @i to @v. Note that the guaranteed useful range
270 * of an atomic_t is only 24 bits.
272 static __inline__ void atomic_add(int i, atomic_t *v)
274 __asm__ __volatile__(
275 SMP_LOCK "addl %1,%0"
277 :"ir" (i), "m" (v->counter));
281 * atomic_sub - subtract the atomic variable
282 * @i: integer value to subtract
283 * @v: pointer of type atomic_t
285 * Atomically subtracts @i from @v. Note that the guaranteed
286 * useful range of an atomic_t is only 24 bits.
288 static __inline__ void atomic_sub(int i, atomic_t *v)
290 __asm__ __volatile__(
291 SMP_LOCK "subl %1,%0"
293 :"ir" (i), "m" (v->counter));
297 * atomic_sub_and_test - subtract value from variable and test result
298 * @i: integer value to subtract
299 * @v: pointer of type atomic_t
301 * Atomically subtracts @i from @v and returns
302 * true if the result is zero, or false for all
303 * other cases. Note that the guaranteed
304 * useful range of an atomic_t is only 24 bits.
306 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
310 __asm__ __volatile__(
311 SMP_LOCK "subl %2,%0; sete %1"
312 :"=m" (v->counter), "=qm" (c)
313 :"ir" (i), "m" (v->counter) : "memory");
318 * atomic_inc - increment atomic variable
319 * @v: pointer of type atomic_t
321 * Atomically increments @v by 1. Note that the guaranteed
322 * useful range of an atomic_t is only 24 bits.
324 static __inline__ void atomic_inc(atomic_t *v)
326 __asm__ __volatile__(
333 * atomic_dec - decrement atomic variable
334 * @v: pointer of type atomic_t
336 * Atomically decrements @v by 1. Note that the guaranteed
337 * useful range of an atomic_t is only 24 bits.
339 static __inline__ void atomic_dec(atomic_t *v)
341 __asm__ __volatile__(
348 * atomic_dec_and_test - decrement and test
349 * @v: pointer of type atomic_t
351 * Atomically decrements @v by 1 and
352 * returns true if the result is 0, or false for all other
353 * cases. Note that the guaranteed
354 * useful range of an atomic_t is only 24 bits.
356 static __inline__ int atomic_dec_and_test(atomic_t *v)
360 __asm__ __volatile__(
361 SMP_LOCK "decl %0; sete %1"
362 :"=m" (v->counter), "=qm" (c)
363 :"m" (v->counter) : "memory");
368 * atomic_inc_and_test - increment and test
369 * @v: pointer of type atomic_t
371 * Atomically increments @v by 1
372 * and returns true if the result is zero, or false for all
373 * other cases. Note that the guaranteed
374 * useful range of an atomic_t is only 24 bits.
376 static __inline__ int atomic_inc_and_test(atomic_t *v)
380 __asm__ __volatile__(
381 SMP_LOCK "incl %0; sete %1"
382 :"=m" (v->counter), "=qm" (c)
383 :"m" (v->counter) : "memory");
388 * atomic_add_negative - add and test if negative
389 * @v: pointer of type atomic_t
390 * @i: integer value to add
392 * Atomically adds @i to @v and returns true
393 * if the result is negative, or false when
394 * result is greater than or equal to zero. Note that the guaranteed
395 * useful range of an atomic_t is only 24 bits.
397 static __inline__ int atomic_add_negative(int i, atomic_t *v)
401 __asm__ __volatile__(
402 SMP_LOCK "addl %2,%0; sets %1"
403 :"=m" (v->counter), "=qm" (c)
404 :"ir" (i), "m" (v->counter) : "memory");
408 /* These are x86-specific, used by some header files */
409 #define atomic_clear_mask(mask, addr) \
410 __asm__ __volatile__(SMP_LOCK "andl %0,%1" \
411 : : "r" (~(mask)),"m" (*addr) : "memory")
413 #define atomic_set_mask(mask, addr) \
414 __asm__ __volatile__(SMP_LOCK "orl %0,%1" \
415 : : "r" (mask),"m" (*addr) : "memory")
417 /* Atomic operations are already serializing on x86 */
418 #define smp_mb__before_atomic_dec() barrier()
419 #define smp_mb__after_atomic_dec() barrier()
420 #define smp_mb__before_atomic_inc() barrier()
421 #define smp_mb__after_atomic_inc() barrier()
423 #endif /* __ARCH_I386_ATOMIC__ */
425 /***********************************************************************/
427 #else /* !PPC && !i386 */
431 /* atomic.h: These still suck, but the I-cache hit rate is higher.
433 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
434 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
437 #ifndef __ARCH_SPARC_ATOMIC__
438 #define __ARCH_SPARC_ATOMIC__
440 typedef struct { volatile int counter; } atomic_t;
444 #define ATOMIC_INIT(i) { (i) }
445 #define atomic_read(v) ((v)->counter)
446 #define atomic_set(v, i) (((v)->counter) = i)
449 /* We do the bulk of the actual work out of line in two common
450 * routines in assembler, see arch/sparc/lib/atomic.S for the
453 * For SMP the trick is you embed the spin lock byte within
454 * the word, use the low byte so signedness is easily retained
455 * via a quick arithmetic shift. It looks like this:
457 * ----------------------------------------
458 * | signed 24-bit counter value | lock | atomic_t
459 * ----------------------------------------
463 #define ATOMIC_INIT(i) { (i << 8) }
465 static __inline__ int atomic_read(atomic_t *v)
467 int ret = v->counter;
475 #define atomic_set(v, i) (((v)->counter) = ((i) << 8))
478 static __inline__ int __atomic_add(int i, atomic_t *v)
480 register volatile int *ptr asm("g1");
481 register int increment asm("g2");
486 __asm__ __volatile__(
488 "call ___atomic_add\n\t"
489 " add %%o7, 8, %%o7\n"
491 : "0" (increment), "r" (ptr)
492 : "g3", "g4", "g7", "memory", "cc");
497 static __inline__ int __atomic_sub(int i, atomic_t *v)
499 register volatile int *ptr asm("g1");
500 register int increment asm("g2");
505 __asm__ __volatile__(
507 "call ___atomic_sub\n\t"
508 " add %%o7, 8, %%o7\n"
510 : "0" (increment), "r" (ptr)
511 : "g3", "g4", "g7", "memory", "cc");
516 #define atomic_add(i, v) ((void)__atomic_add((i), (v)))
517 #define atomic_sub(i, v) ((void)__atomic_sub((i), (v)))
519 #define atomic_dec_return(v) __atomic_sub(1, (v))
520 #define atomic_inc_return(v) __atomic_add(1, (v))
522 #define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0)
523 #define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0)
525 #define atomic_inc(v) ((void)__atomic_add(1, (v)))
526 #define atomic_dec(v) ((void)__atomic_sub(1, (v)))
528 #define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0)
530 /* Atomic operations are already serializing */
531 #define smp_mb__before_atomic_dec() barrier()
532 #define smp_mb__after_atomic_dec() barrier()
533 #define smp_mb__before_atomic_inc() barrier()
534 #define smp_mb__after_atomic_inc() barrier()
537 #endif /* !(__ARCH_SPARC_ATOMIC__) */
539 /***********************************************************************/
545 #ifndef __ARCH_IA64_ATOMIC__
546 #define __ARCH_IA64_ATOMIC__
548 typedef volatile int atomic_t;
552 atomic_read (const atomic_t * a)
559 atomic_set(atomic_t *a, int v)
566 atomic_inc (atomic_t *v)
571 old = atomic_read(v);
572 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old));
573 __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv"
574 : "=r"(r) : "r"(v), "r"(old + 1)
581 atomic_dec (atomic_t *v)
586 old = atomic_read(v);
587 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old));
588 __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv"
589 : "=r"(r) : "r"(v), "r"(old - 1)
596 atomic_dec_and_test (atomic_t *v)
601 old = atomic_read(v);
602 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old));
603 __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv"
604 : "=r"(r) : "r"(v), "r"(old - 1)
610 #endif /* !(__ARCH_IA64_ATOMIC__) */
616 #ifndef _ALPHA_ATOMIC_H
617 #define _ALPHA_ATOMIC_H
620 * Atomic operations that C can't guarantee us. Useful for
621 * resource counting etc...
623 * But use these as seldom as possible since they are much slower
624 * than regular operations.
629 * Counter is volatile to make sure gcc doesn't try to be clever
630 * and move things around on us. We need to use _exactly_ the address
631 * the user gave us, not some alias that contains the same information.
633 typedef struct { volatile int counter; } atomic_t;
635 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
637 #define atomic_read(v) ((v)->counter)
638 #define atomic_set(v,i) ((v)->counter = (i))
641 * To get proper branch prediction for the main line, we must branch
642 * forward to code at the end of this object's .text section, then
643 * branch back to restart the operation.
646 static __inline__ void atomic_add(int i, atomic_t * v)
649 __asm__ __volatile__(
657 :"=&r" (temp), "=m" (v->counter)
658 :"Ir" (i), "m" (v->counter));
661 static __inline__ void atomic_sub(int i, atomic_t * v)
664 __asm__ __volatile__(
672 :"=&r" (temp), "=m" (v->counter)
673 :"Ir" (i), "m" (v->counter));
677 * Same as above, but return the result value
679 static __inline__ long atomic_add_return(int i, atomic_t * v)
682 __asm__ __volatile__(
692 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
693 :"Ir" (i), "m" (v->counter) : "memory");
697 static __inline__ long atomic_sub_return(int i, atomic_t * v)
700 __asm__ __volatile__(
710 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
711 :"Ir" (i), "m" (v->counter) : "memory");
715 #define atomic_dec_return(v) atomic_sub_return(1,(v))
716 #define atomic_inc_return(v) atomic_add_return(1,(v))
718 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
719 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
721 #define atomic_inc(v) atomic_add(1,(v))
722 #define atomic_dec(v) atomic_sub(1,(v))
724 #define smp_mb__before_atomic_dec() smp_mb()
725 #define smp_mb__after_atomic_dec() smp_mb()
726 #define smp_mb__before_atomic_inc() smp_mb()
727 #define smp_mb__after_atomic_inc() smp_mb()
729 #endif /* _ALPHA_ATOMIC_H */
735 #ifndef __ARCH_S390_ATOMIC__
736 #define __ARCH_S390_ATOMIC__
739 * include/asm-s390/atomic.h
742 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
743 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
744 * Denis Joseph Barrow
746 * Derived from "include/asm-i386/bitops.h"
747 * Copyright (C) 1992, Linus Torvalds
752 * Atomic operations that C can't guarantee us. Useful for
753 * resource counting etc..
754 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
757 typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t;
758 #define ATOMIC_INIT(i) { (i) }
760 #define atomic_eieio() __asm__ __volatile__ ("BCR 15,0")
762 #define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \
763 __asm__ __volatile__(" l %0,0(%2)\n" \
765 op_string " %1,%3\n" \
766 " cs %0,%1,0(%2)\n" \
768 : "=&d" (old_val), "=&d" (new_val) \
769 : "a" (ptr), "d" (op_val) : "cc" );
771 #define atomic_read(v) ((v)->counter)
772 #define atomic_set(v,i) (((v)->counter) = (i))
774 static __inline__ void atomic_add(int i, atomic_t *v)
776 int old_val, new_val;
777 __CS_LOOP(old_val, new_val, v, i, "ar");
780 static __inline__ int atomic_add_return (int i, atomic_t *v)
782 int old_val, new_val;
783 __CS_LOOP(old_val, new_val, v, i, "ar");
787 static __inline__ int atomic_add_negative(int i, atomic_t *v)
789 int old_val, new_val;
790 __CS_LOOP(old_val, new_val, v, i, "ar");
794 static __inline__ void atomic_sub(int i, atomic_t *v)
796 int old_val, new_val;
797 __CS_LOOP(old_val, new_val, v, i, "sr");
800 static __inline__ void atomic_inc(volatile atomic_t *v)
802 int old_val, new_val;
803 __CS_LOOP(old_val, new_val, v, 1, "ar");
806 static __inline__ int atomic_inc_return(volatile atomic_t *v)
808 int old_val, new_val;
809 __CS_LOOP(old_val, new_val, v, 1, "ar");
813 static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
815 int old_val, new_val;
816 __CS_LOOP(old_val, new_val, v, 1, "ar");
820 static __inline__ void atomic_dec(volatile atomic_t *v)
822 int old_val, new_val;
823 __CS_LOOP(old_val, new_val, v, 1, "sr");
826 static __inline__ int atomic_dec_return(volatile atomic_t *v)
828 int old_val, new_val;
829 __CS_LOOP(old_val, new_val, v, 1, "sr");
833 static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
835 int old_val, new_val;
836 __CS_LOOP(old_val, new_val, v, 1, "sr");
840 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v)
842 int old_val, new_val;
843 __CS_LOOP(old_val, new_val, v, ~mask, "nr");
846 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v)
848 int old_val, new_val;
849 __CS_LOOP(old_val, new_val, v, mask, "or");
853 returns 0 if expected_oldval==value in *v ( swap was successful )
854 returns 1 if unsuccessful.
856 static __inline__ int
857 atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
861 __asm__ __volatile__(
868 : "a" (v), "d" (expected_oldval) , "d" (new_val)
874 Spin till *v = expected_oldval then swap with newval.
876 static __inline__ void
877 atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
879 __asm__ __volatile__(
883 : : "a" (v), "d" (expected_oldval) , "d" (new_val)
887 #define smp_mb__before_atomic_dec() smp_mb()
888 #define smp_mb__after_atomic_dec() smp_mb()
889 #define smp_mb__before_atomic_inc() smp_mb()
890 #define smp_mb__after_atomic_inc() smp_mb()
892 #endif /* __ARCH_S390_ATOMIC __ */
899 * Atomic operations that C can't guarantee us. Useful for
900 * resource counting etc..
902 * But use these as seldom as possible since they are much more slower
903 * than regular operations.
905 * This file is subject to the terms and conditions of the GNU General Public
906 * License. See the file "COPYING" in the main directory of this archive
909 * Copyright (C) 1996, 1997, 2000 by Ralf Baechle
911 #ifndef __ASM_ATOMIC_H
912 #define __ASM_ATOMIC_H
914 typedef struct { volatile int counter; } atomic_t;
916 #define ATOMIC_INIT(i) { (i) }
919 * atomic_read - read atomic variable
920 * @v: pointer of type atomic_t
922 * Atomically reads the value of @v. Note that the guaranteed
923 * useful range of an atomic_t is only 24 bits.
925 #define atomic_read(v) ((v)->counter)
928 * atomic_set - set atomic variable
929 * @v: pointer of type atomic_t
932 * Atomically sets the value of @v to @i. Note that the guaranteed
933 * useful range of an atomic_t is only 24 bits.
935 #define atomic_set(v,i) ((v)->counter = (i))
938 * ... while for MIPS II and better we can use ll/sc instruction. This
939 * implementation is SMP safe ...
943 * atomic_add - add integer to atomic variable
944 * @i: integer value to add
945 * @v: pointer of type atomic_t
947 * Atomically adds @i to @v. Note that the guaranteed useful range
948 * of an atomic_t is only 24 bits.
950 extern __inline__ void atomic_add(int i, atomic_t * v)
954 __asm__ __volatile__(
955 ".set push # atomic_add\n"
962 : "=&r" (temp), "=m" (v->counter)
963 : "Ir" (i), "m" (v->counter));
967 * atomic_sub - subtract the atomic variable
968 * @i: integer value to subtract
969 * @v: pointer of type atomic_t
971 * Atomically subtracts @i from @v. Note that the guaranteed
972 * useful range of an atomic_t is only 24 bits.
974 extern __inline__ void atomic_sub(int i, atomic_t * v)
978 __asm__ __volatile__(
979 ".set push # atomic_sub\n"
986 : "=&r" (temp), "=m" (v->counter)
987 : "Ir" (i), "m" (v->counter));
991 * Same as above, but return the result value
993 extern __inline__ int atomic_add_return(int i, atomic_t * v)
995 unsigned long temp, result;
997 __asm__ __volatile__(
998 ".set push # atomic_add_return\n"
1002 " addu %0, %1, %3 \n"
1005 " addu %0, %1, %3 \n"
1008 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
1009 : "Ir" (i), "m" (v->counter)
1015 extern __inline__ int atomic_sub_return(int i, atomic_t * v)
1017 unsigned long temp, result;
1019 __asm__ __volatile__(
1020 ".set push # atomic_sub_return\n"
1024 " subu %0, %1, %3 \n"
1027 " subu %0, %1, %3 \n"
1030 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
1031 : "Ir" (i), "m" (v->counter)
1037 #define atomic_dec_return(v) atomic_sub_return(1,(v))
1038 #define atomic_inc_return(v) atomic_add_return(1,(v))
1041 * atomic_sub_and_test - subtract value from variable and test result
1042 * @i: integer value to subtract
1043 * @v: pointer of type atomic_t
1045 * Atomically subtracts @i from @v and returns
1046 * true if the result is zero, or false for all
1047 * other cases. Note that the guaranteed
1048 * useful range of an atomic_t is only 24 bits.
1050 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
1053 * atomic_inc_and_test - increment and test
1054 * @v: pointer of type atomic_t
1056 * Atomically increments @v by 1
1057 * and returns true if the result is zero, or false for all
1058 * other cases. Note that the guaranteed
1059 * useful range of an atomic_t is only 24 bits.
1061 #define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0)
1064 * atomic_dec_and_test - decrement by 1 and test
1065 * @v: pointer of type atomic_t
1067 * Atomically decrements @v by 1 and
1068 * returns true if the result is 0, or false for all other
1069 * cases. Note that the guaranteed
1070 * useful range of an atomic_t is only 24 bits.
1072 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
1075 * atomic_inc - increment atomic variable
1076 * @v: pointer of type atomic_t
1078 * Atomically increments @v by 1. Note that the guaranteed
1079 * useful range of an atomic_t is only 24 bits.
1081 #define atomic_inc(v) atomic_add(1,(v))
1084 * atomic_dec - decrement and test
1085 * @v: pointer of type atomic_t
1087 * Atomically decrements @v by 1. Note that the guaranteed
1088 * useful range of an atomic_t is only 24 bits.
1090 #define atomic_dec(v) atomic_sub(1,(v))
1093 * atomic_add_negative - add and test if negative
1094 * @v: pointer of type atomic_t
1095 * @i: integer value to add
1097 * Atomically adds @i to @v and returns true
1098 * if the result is negative, or false when
1099 * result is greater than or equal to zero. Note that the guaranteed
1100 * useful range of an atomic_t is only 24 bits.
1102 * Currently not implemented for MIPS.
1105 /* Atomic operations are already serializing */
1106 #define smp_mb__before_atomic_dec() smp_mb()
1107 #define smp_mb__after_atomic_dec() smp_mb()
1108 #define smp_mb__before_atomic_inc() smp_mb()
1109 #define smp_mb__after_atomic_inc() smp_mb()
1111 #endif /* __ASM_ATOMIC_H */
1115 #if defined(__m68k__)
1117 #ifndef __ARCH_M68K_ATOMIC__
1118 #define __ARCH_M68K_ATOMIC__
1121 * Atomic operations that C can't guarantee us. Useful for
1122 * resource counting etc..
1126 * We do not have SMP m68k systems, so we don't have to deal with that.
1129 typedef struct { int counter; } atomic_t;
1130 #define ATOMIC_INIT(i) { (i) }
1132 #define atomic_read(v) ((v)->counter)
1133 #define atomic_set(v, i) (((v)->counter) = i)
1135 static __inline__ void atomic_add(int i, atomic_t *v)
1137 __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v));
1140 static __inline__ void atomic_sub(int i, atomic_t *v)
1142 __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v));
1145 static __inline__ void atomic_inc(volatile atomic_t *v)
1147 __asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v));
1150 static __inline__ void atomic_dec(volatile atomic_t *v)
1152 __asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v));
1155 static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
1158 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v));
1162 #define atomic_clear_mask(mask, v) \
1163 __asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v))
1165 #define atomic_set_mask(mask, v) \
1166 __asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v))
1168 /* Atomic operations are already serializing */
1169 #define smp_mb__before_atomic_dec() barrier()
1170 #define smp_mb__after_atomic_dec() barrier()
1171 #define smp_mb__before_atomic_inc() barrier()
1172 #define smp_mb__after_atomic_inc() barrier()
1174 #endif /* __ARCH_M68K_ATOMIC __ */
1178 #warning libs/pbd has no implementation of strictly atomic operations for your hardware.
1180 #define __NO_STRICT_ATOMIC
1181 #ifdef __NO_STRICT_ATOMIC
1184 * Because the implementations from the kernel (where all these come
1185 * from) use cli and spinlocks for hppa and arm...
1188 typedef struct { volatile int counter; } atomic_t;
1190 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
1192 #define atomic_read(v) ((v)->counter)
1193 #define atomic_set(v,i) ((v)->counter = (i))
1195 static __inline__ void atomic_inc(atomic_t *v)
1200 static __inline__ void atomic_dec(atomic_t *v)
1205 static __inline__ int atomic_dec_and_test(atomic_t *v)
1213 static __inline__ int atomic_inc_and_test(atomic_t *v)
1221 # endif /* __NO_STRICT_ATOMIC */
1231 #endif /* __libpbd_atomic_h__ */