1 /* 1 2 * Copyright (C) 2014 Stefan Kristiansson <ste 3 * 4 * This file is licensed under the terms of th 5 * version 2. This program is licensed "as is 6 * kind, whether express or implied. 7 */ 8 9 #ifndef __ASM_OPENRISC_ATOMIC_H 10 #define __ASM_OPENRISC_ATOMIC_H 11 12 #include <linux/types.h> 13 14 /* Atomically perform op with v->counter and i 15 #define ATOMIC_OP(op) 16 static inline void arch_atomic_##op(int i, ato 17 { 18 int tmp; 19 20 __asm__ __volatile__( 21 "1: l.lwa %0,0(%1) 22 " l." #op " %0,%0,%2 23 " l.swa 0(%1),%0 24 " l.bnf 1b 25 " l.nop 26 : "=&r"(tmp) 27 : "r"(&v->counter), "r"(i) 28 : "cc", "memory"); 29 } 30 31 /* Atomically perform op with v->counter and i 32 #define ATOMIC_OP_RETURN(op) 33 static inline int arch_atomic_##op##_return(in 34 { 35 int tmp; 36 37 __asm__ __volatile__( 38 "1: l.lwa %0,0(%1) 39 " l." #op " %0,%0,%2 40 " l.swa 0(%1),%0 41 " l.bnf 1b 42 " l.nop 43 : "=&r"(tmp) 44 : "r"(&v->counter), "r"(i) 45 : "cc", "memory"); 46 47 return tmp; 48 } 49 50 /* Atomically perform op with v->counter and i 51 #define ATOMIC_FETCH_OP(op) 52 static inline int arch_atomic_fetch_##op(int i 53 { 54 int tmp, old; 55 56 __asm__ __volatile__( 57 "1: l.lwa %0,0(%2) 58 " l." #op " %1,%0,%3 59 " l.swa 0(%2),%1 60 " l.bnf 1b 61 " l.nop 62 : "=&r"(old), "=&r"(tmp) 63 : "r"(&v->counter), "r"(i) 64 : "cc", "memory"); 65 66 return old; 67 } 68 69 ATOMIC_OP_RETURN(add) 70 ATOMIC_OP_RETURN(sub) 71 72 ATOMIC_FETCH_OP(add) 73 ATOMIC_FETCH_OP(sub) 74 ATOMIC_FETCH_OP(and) 75 ATOMIC_FETCH_OP(or) 76 ATOMIC_FETCH_OP(xor) 77 78 ATOMIC_OP(add) 79 ATOMIC_OP(sub) 80 ATOMIC_OP(and) 81 ATOMIC_OP(or) 82 ATOMIC_OP(xor) 83 84 #undef ATOMIC_FETCH_OP 85 #undef ATOMIC_OP_RETURN 86 #undef ATOMIC_OP 87 88 #define arch_atomic_add_return arch_atomic_ad 89 #define arch_atomic_sub_return arch_atomic_su 90 #define arch_atomic_fetch_add arch_atomic_fe 91 #define arch_atomic_fetch_sub arch_atomic_fe 92 #define arch_atomic_fetch_and arch_atomic_fe 93 #define arch_atomic_fetch_or arch_atomic_fe 94 #define arch_atomic_fetch_xor arch_atomic_fe 95 #define arch_atomic_add arch_atomic_ad 96 #define arch_atomic_sub arch_atomic_su 97 #define arch_atomic_and arch_atomic_an 98 #define arch_atomic_or arch_atomic_or 99 #define arch_atomic_xor arch_atomic_xo 100 101 /* 102 * Atomically add a to v->counter as long as v 103 * Returns the original value at v->counter. 104 * 105 * This is often used through atomic_inc_not_z 106 */ 107 static inline int arch_atomic_fetch_add_unless 108 { 109 int old, tmp; 110 111 __asm__ __volatile__( 112 "1: l.lwa %0, 0(%2) 113 " l.sfeq %0, %4 114 " l.bf 2f 115 " l.add %1, %0, %3 116 " l.swa 0(%2), %1 117 " l.bnf 1b 118 " l.nop 119 "2: 120 : "=&r"(old), "=&r" (tmp) 121 : "r"(&v->counter), "r"(a), "r 122 : "cc", "memory"); 123 124 return old; 125 } 126 #define arch_atomic_fetch_add_unless arch_a 127 128 #define arch_atomic_read(v) READ_O 129 #define arch_atomic_set(v,i) WRITE_ 130 131 #include <asm/cmpxchg.h> 132 133 #endif /* __ASM_OPENRISC_ATOMIC_H */ 134
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.