[453] | 1 |
|
---|
| 2 | /* Copyright 2018 Canaan Inc.
|
---|
| 3 | *
|
---|
| 4 | * Licensed under the Apache License, Version 2.0 (the "License");
|
---|
| 5 | * you may not use this file except in compliance with the License.
|
---|
| 6 | * You may obtain a copy of the License at
|
---|
| 7 | *
|
---|
| 8 | * http://www.apache.org/licenses/LICENSE-2.0
|
---|
| 9 | *
|
---|
| 10 | * Unless required by applicable law or agreed to in writing, software
|
---|
| 11 | * distributed under the License is distributed on an "AS IS" BASIS,
|
---|
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
---|
| 13 | * See the License for the specific language governing permissions and
|
---|
| 14 | * limitations under the License.
|
---|
| 15 | */
|
---|
| 16 | #ifndef _BSP_ATOMIC_H
|
---|
| 17 | #define _BSP_ATOMIC_H
|
---|
| 18 |
|
---|
| 19 | #ifdef __cplusplus
|
---|
| 20 | extern "C" {
|
---|
| 21 | #endif
|
---|
| 22 |
|
---|
| 23 | #define SPINLOCK_INIT \
|
---|
| 24 | { \
|
---|
| 25 | 0 \
|
---|
| 26 | }
|
---|
| 27 |
|
---|
| 28 | #define CORELOCK_INIT \
|
---|
| 29 | { \
|
---|
| 30 | .lock = SPINLOCK_INIT, \
|
---|
| 31 | .count = 0, \
|
---|
| 32 | .core = -1 \
|
---|
| 33 | }
|
---|
| 34 |
|
---|
| 35 | /* Defination of memory barrier macro */
|
---|
| 36 | #define mb() \
|
---|
| 37 | { \
|
---|
| 38 | asm volatile("fence" :: \
|
---|
| 39 | : "memory"); \
|
---|
| 40 | }
|
---|
| 41 |
|
---|
| 42 | #define atomic_set(ptr, val) (*(volatile typeof(*(ptr)) *)(ptr) = val)
|
---|
| 43 | #define atomic_read(ptr) (*(volatile typeof(*(ptr)) *)(ptr))
|
---|
| 44 |
|
---|
| 45 | #ifndef __riscv_atomic
|
---|
| 46 | #error "atomic extension is required."
|
---|
| 47 | #endif
|
---|
| 48 | #define atomic_add(ptr, inc) __sync_fetch_and_add(ptr, inc)
|
---|
| 49 | #define atomic_or(ptr, inc) __sync_fetch_and_or(ptr, inc)
|
---|
| 50 | #define atomic_swap(ptr, swp) __sync_lock_test_and_set(ptr, swp)
|
---|
| 51 | #define atomic_cas(ptr, cmp, swp) __sync_val_compare_and_swap(ptr, cmp, swp)
|
---|
| 52 |
|
---|
| 53 | typedef struct _spinlock
|
---|
| 54 | {
|
---|
| 55 | int lock;
|
---|
| 56 | } spinlock_t;
|
---|
| 57 |
|
---|
| 58 | typedef struct _semaphore
|
---|
| 59 | {
|
---|
| 60 | spinlock_t lock;
|
---|
| 61 | int count;
|
---|
| 62 | int waiting;
|
---|
| 63 | } semaphore_t;
|
---|
| 64 |
|
---|
| 65 | typedef struct _corelock
|
---|
| 66 | {
|
---|
| 67 | spinlock_t lock;
|
---|
| 68 | int count;
|
---|
| 69 | int core;
|
---|
| 70 | } corelock_t;
|
---|
| 71 |
|
---|
| 72 | static inline int spinlock_trylock(spinlock_t *lock)
|
---|
| 73 | {
|
---|
| 74 | int res = atomic_swap(&lock->lock, -1);
|
---|
| 75 | /* Use memory barrier to keep coherency */
|
---|
| 76 | mb();
|
---|
| 77 | return res;
|
---|
| 78 | }
|
---|
| 79 |
|
---|
| 80 | static inline void spinlock_lock(spinlock_t *lock)
|
---|
| 81 | {
|
---|
| 82 | while(spinlock_trylock(lock))
|
---|
| 83 | ;
|
---|
| 84 | }
|
---|
| 85 |
|
---|
| 86 | static inline void spinlock_unlock(spinlock_t *lock)
|
---|
| 87 | {
|
---|
| 88 | /* Use memory barrier to keep coherency */
|
---|
| 89 | mb();
|
---|
| 90 | atomic_set(&lock->lock, 0);
|
---|
| 91 | asm volatile("nop");
|
---|
| 92 | }
|
---|
| 93 |
|
---|
| 94 | static inline void semaphore_signal(semaphore_t *semaphore, int i)
|
---|
| 95 | {
|
---|
| 96 | spinlock_lock(&(semaphore->lock));
|
---|
| 97 | semaphore->count += i;
|
---|
| 98 | spinlock_unlock(&(semaphore->lock));
|
---|
| 99 | }
|
---|
| 100 |
|
---|
| 101 | static inline void semaphore_wait(semaphore_t *semaphore, int i)
|
---|
| 102 | {
|
---|
| 103 | atomic_add(&(semaphore->waiting), 1);
|
---|
| 104 | while(1)
|
---|
| 105 | {
|
---|
| 106 | spinlock_lock(&(semaphore->lock));
|
---|
| 107 | if(semaphore->count >= i)
|
---|
| 108 | {
|
---|
| 109 | semaphore->count -= i;
|
---|
| 110 | atomic_add(&(semaphore->waiting), -1);
|
---|
| 111 | spinlock_unlock(&(semaphore->lock));
|
---|
| 112 | break;
|
---|
| 113 | }
|
---|
| 114 | spinlock_unlock(&(semaphore->lock));
|
---|
| 115 | }
|
---|
| 116 | }
|
---|
| 117 |
|
---|
| 118 | static inline int semaphore_count(semaphore_t *semaphore)
|
---|
| 119 | {
|
---|
| 120 | int res = 0;
|
---|
| 121 |
|
---|
| 122 | spinlock_lock(&(semaphore->lock));
|
---|
| 123 | res = semaphore->count;
|
---|
| 124 | spinlock_unlock(&(semaphore->lock));
|
---|
| 125 | return res;
|
---|
| 126 | }
|
---|
| 127 |
|
---|
| 128 | static inline int semaphore_waiting(semaphore_t *semaphore)
|
---|
| 129 | {
|
---|
| 130 | return atomic_read(&(semaphore->waiting));
|
---|
| 131 | }
|
---|
| 132 |
|
---|
| 133 | static inline int corelock_trylock(corelock_t *lock)
|
---|
| 134 | {
|
---|
| 135 | int res = 0;
|
---|
| 136 | unsigned long core;
|
---|
| 137 |
|
---|
| 138 | asm volatile("csrr %0, mhartid;"
|
---|
| 139 | : "=r"(core));
|
---|
| 140 | if(spinlock_trylock(&lock->lock))
|
---|
| 141 | {
|
---|
| 142 | return -1;
|
---|
| 143 | }
|
---|
| 144 |
|
---|
| 145 | if(lock->count == 0)
|
---|
| 146 | {
|
---|
| 147 | /* First time get lock */
|
---|
| 148 | lock->count++;
|
---|
| 149 | lock->core = core;
|
---|
| 150 | res = 0;
|
---|
| 151 | } else if(lock->core == core)
|
---|
| 152 | {
|
---|
| 153 | /* Same core get lock */
|
---|
| 154 | lock->count++;
|
---|
| 155 | res = 0;
|
---|
| 156 | } else
|
---|
| 157 | {
|
---|
| 158 | /* Different core get lock */
|
---|
| 159 | res = -1;
|
---|
| 160 | }
|
---|
| 161 | spinlock_unlock(&lock->lock);
|
---|
| 162 |
|
---|
| 163 | return res;
|
---|
| 164 | }
|
---|
| 165 |
|
---|
| 166 | static inline void corelock_lock(corelock_t *lock)
|
---|
| 167 | {
|
---|
| 168 | unsigned long core;
|
---|
| 169 |
|
---|
| 170 | asm volatile("csrr %0, mhartid;"
|
---|
| 171 | : "=r"(core));
|
---|
| 172 | spinlock_lock(&lock->lock);
|
---|
| 173 |
|
---|
| 174 | if(lock->count == 0)
|
---|
| 175 | {
|
---|
| 176 | /* First time get lock */
|
---|
| 177 | lock->count++;
|
---|
| 178 | lock->core = core;
|
---|
| 179 | } else if(lock->core == core)
|
---|
| 180 | {
|
---|
| 181 | /* Same core get lock */
|
---|
| 182 | lock->count++;
|
---|
| 183 | } else
|
---|
| 184 | {
|
---|
| 185 | /* Different core get lock */
|
---|
| 186 | spinlock_unlock(&lock->lock);
|
---|
| 187 |
|
---|
| 188 | do
|
---|
| 189 | {
|
---|
| 190 | while(atomic_read(&lock->count))
|
---|
| 191 | ;
|
---|
| 192 | } while(corelock_trylock(lock));
|
---|
| 193 | return;
|
---|
| 194 | }
|
---|
| 195 | spinlock_unlock(&lock->lock);
|
---|
| 196 | }
|
---|
| 197 |
|
---|
| 198 | static inline void corelock_unlock(corelock_t *lock)
|
---|
| 199 | {
|
---|
| 200 | unsigned long core;
|
---|
| 201 |
|
---|
| 202 | asm volatile("csrr %0, mhartid;"
|
---|
| 203 | : "=r"(core));
|
---|
| 204 | spinlock_lock(&lock->lock);
|
---|
| 205 |
|
---|
| 206 | if(lock->core == core)
|
---|
| 207 | {
|
---|
| 208 | /* Same core release lock */
|
---|
| 209 | lock->count--;
|
---|
| 210 | if(lock->count <= 0)
|
---|
| 211 | {
|
---|
| 212 | lock->core = -1;
|
---|
| 213 | lock->count = 0;
|
---|
| 214 | }
|
---|
| 215 | } else
|
---|
| 216 | {
|
---|
| 217 | /* Different core release lock */
|
---|
| 218 | spinlock_unlock(&lock->lock);
|
---|
| 219 |
|
---|
| 220 | register unsigned long a7 asm("a7") = 93;
|
---|
| 221 | register unsigned long a0 asm("a0") = 0;
|
---|
| 222 | register unsigned long a1 asm("a1") = 0;
|
---|
| 223 | register unsigned long a2 asm("a2") = 0;
|
---|
| 224 |
|
---|
| 225 | asm volatile("scall"
|
---|
| 226 | : "+r"(a0)
|
---|
| 227 | : "r"(a1), "r"(a2), "r"(a7));
|
---|
| 228 | }
|
---|
| 229 | spinlock_unlock(&lock->lock);
|
---|
| 230 | }
|
---|
| 231 |
|
---|
| 232 | #ifdef __cplusplus
|
---|
| 233 | }
|
---|
| 234 | #endif
|
---|
| 235 |
|
---|
| 236 | #endif /* _BSP_ATOMIC_H */
|
---|