diff options
| author | Aditya Naik | 2021-01-31 06:05:55 -0500 |
|---|---|---|
| committer | Aditya Naik | 2021-01-31 06:05:55 -0500 |
| commit | 951e300c10613c8500705d7b54467169be53a7f5 (patch) | |
| tree | 0ea92fc4b6f75e6c6bb96e37bf26ffe371135f08 /kernel/spinlock.c | |
| parent | 077323a8f0b3440fcc3d082096a2d83fe5461d70 (diff) | |
With only RISCV64-I extension
Diffstat (limited to 'kernel/spinlock.c')
| -rw-r--r-- | kernel/spinlock.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 9840302..9603578 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -25,18 +25,23 @@ acquire(struct spinlock *lk) if(holding(lk)) panic("acquire"); + // TODO This depends on a macro that is only available in the Atomic extension that is + // not being implemented. Fix this. + // When NCPU is set to 1 (as is currently the case), disabling this shouldn't make a difference. + // On RISC-V, sync_lock_test_and_set turns into an atomic swap: // a5 = 1 // s1 = &lk->locked // amoswap.w.aq a5, a5, (s1) - while(__sync_lock_test_and_set(&lk->locked, 1) != 0) - ; + while (lk->locked == 1); + lk->locked = 1; + /* while(__sync_lock_test_and_set(&lk->locked, 1) != 0); */ // Tell the C compiler and the processor to not move loads or stores // past this point, to ensure that the critical section's memory // references happen strictly after the lock is acquired. // On RISC-V, this emits a fence instruction. - __sync_synchronize(); + /* __sync_synchronize(); */ // Record info about lock acquisition for holding() and debugging. lk->cpu = mycpu(); |
