summaryrefslogtreecommitdiff
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 9840302..9603578 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -25,18 +25,23 @@ acquire(struct spinlock *lk)
if(holding(lk))
panic("acquire");
+ // TODO This depends on a macro that is only available in the Atomic extension that is
+ // not being implemented. Fix this.
+ // When NCPU is set to 1 (as is currently the case), disabling this shouldn't make a difference.
+
// On RISC-V, sync_lock_test_and_set turns into an atomic swap:
// a5 = 1
// s1 = &lk->locked
// amoswap.w.aq a5, a5, (s1)
- while(__sync_lock_test_and_set(&lk->locked, 1) != 0)
- ;
+ while (lk->locked == 1);
+ lk->locked = 1;
+ /* while(__sync_lock_test_and_set(&lk->locked, 1) != 0); */
// Tell the C compiler and the processor to not move loads or stores
// past this point, to ensure that the critical section's memory
// references happen strictly after the lock is acquired.
// On RISC-V, this emits a fence instruction.
- __sync_synchronize();
+ /* __sync_synchronize(); */
// Record info about lock acquisition for holding() and debugging.
lk->cpu = mycpu();