From e5c1fdf12929db4af6225c19b91710f7c4420e58 Mon Sep 17 00:00:00 2001 From: Balazs Gerofi Date: Mon, 18 Mar 2019 11:29:10 +0900 Subject: [PATCH] MCS lock: make implementation arch independent Change-Id: Ie5b2182555bbe1a11a005988db069d4b38f85401 --- arch/arm64/kernel/include/arch-lock.h | 84 ------------------- arch/x86_64/kernel/include/arch-lock.h | 87 -------------------- lib/include/ihk/lock.h | 107 +++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 171 deletions(-) diff --git a/arch/arm64/kernel/include/arch-lock.h b/arch/arm64/kernel/include/arch-lock.h index b464fd8b..0086671f 100644 --- a/arch/arm64/kernel/include/arch-lock.h +++ b/arch/arm64/kernel/include/arch-lock.h @@ -255,90 +255,6 @@ static void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags) cpu_restore_interrupt(flags); } -/* An implementation of the Mellor-Crummey Scott (MCS) lock */ -typedef struct mcs_lock_node { - unsigned long locked; - struct mcs_lock_node *next; - unsigned long irqsave; -#ifndef ENABLE_UBSAN -} __aligned(64) mcs_lock_node_t; -#else -} mcs_lock_node_t; -#endif - -typedef mcs_lock_node_t mcs_lock_t; - -static void mcs_lock_init(struct mcs_lock_node *node) -{ - node->locked = 0; - node->next = NULL; -} - -static void __mcs_lock_lock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - struct mcs_lock_node *pred; - - node->next = NULL; - node->locked = 0; - pred = xchg8(&(lock->next), node); - - if (pred) { - node->locked = 1; - pred->next = node; - while (node->locked != 0) { - cpu_pause(); - } - } -} - -static void __mcs_lock_unlock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - if (node->next == NULL) { - struct mcs_lock_node *old = atomic_cmpxchg8(&(lock->next), node, 0); - - if (old == node) { - return; - } - - while (node->next == NULL) { - cpu_pause(); - } - } - - node->next->locked = 0; -} - -static void mcs_lock_lock_noirq(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - preempt_disable(); - __mcs_lock_lock(lock, node); -} - -static void mcs_lock_unlock_noirq(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - __mcs_lock_unlock(lock, node); - preempt_enable(); -} - -static void mcs_lock_lock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - node->irqsave = cpu_disable_interrupt_save(); - mcs_lock_lock_noirq(lock, node); -} - -static void mcs_lock_unlock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - mcs_lock_unlock_noirq(lock, node); - cpu_restore_interrupt(node->irqsave); -} - - #define SPINLOCK_IN_MCS_RWLOCK // reader/writer lock diff --git a/arch/x86_64/kernel/include/arch-lock.h b/arch/x86_64/kernel/include/arch-lock.h index e464fa19..dbc93ab0 100644 --- a/arch/x86_64/kernel/include/arch-lock.h +++ b/arch/x86_64/kernel/include/arch-lock.h @@ -183,93 +183,6 @@ static inline void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, cpu_restore_interrupt(flags); } -/* An implementation of the Mellor-Crummey Scott (MCS) lock */ -typedef struct mcs_lock_node { - unsigned long locked; - struct mcs_lock_node *next; - unsigned long irqsave; -#ifndef ENABLE_UBSAN -} __aligned(64) mcs_lock_node_t; -#else -} mcs_lock_node_t; -#endif - -typedef mcs_lock_node_t mcs_lock_t; - -static inline void mcs_lock_init(struct mcs_lock_node *node) -{ - node->locked = 0; - node->next = NULL; -} - -static inline void __mcs_lock_lock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - struct mcs_lock_node *pred; - - node->next = NULL; - node->locked = 0; - pred = (struct mcs_lock_node *)xchg8((unsigned long *)&lock->next, - (unsigned long)node); - - if (pred) { - node->locked = 1; - pred->next = node; - while (node->locked != 0) { - cpu_pause(); - } - } -} - -static inline void __mcs_lock_unlock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - if (node->next == NULL) { - struct mcs_lock_node *old = (struct mcs_lock_node *) - atomic_cmpxchg8((unsigned long *)&lock->next, - (unsigned long)node, (unsigned long)0); - - if (old == node) { - return; - } - - while (node->next == NULL) { - cpu_pause(); - } - } - - node->next->locked = 0; -} - -static inline void mcs_lock_lock_noirq(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - preempt_disable(); - __mcs_lock_lock(lock, node); -} - -static inline void mcs_lock_unlock_noirq(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - __mcs_lock_unlock(lock, node); - preempt_enable(); -} - -static inline void mcs_lock_lock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - node->irqsave = cpu_disable_interrupt_save(); - mcs_lock_lock_noirq(lock, node); -} - -static inline void mcs_lock_unlock(struct mcs_lock_node *lock, - struct mcs_lock_node *node) -{ - mcs_lock_unlock_noirq(lock, node); - cpu_restore_interrupt(node->irqsave); -} - - #define SPINLOCK_IN_MCS_RWLOCK // reader/writer lock diff --git a/lib/include/ihk/lock.h b/lib/include/ihk/lock.h index 47ca8acb..4a25e085 100644 --- a/lib/include/ihk/lock.h +++ b/lib/include/ihk/lock.h @@ -15,6 +15,113 @@ #include +#ifndef ARCH_MCS_LOCK +/* An architecture independent implementation of the + * Mellor-Crummey Scott (MCS) lock */ + +typedef struct mcs_lock_node { +#ifndef SPIN_LOCK_IN_MCS + unsigned long locked; + struct mcs_lock_node *next; +#endif + unsigned long irqsave; +#ifdef SPIN_LOCK_IN_MCS + ihk_spinlock_t spinlock; +#endif +#ifndef ENABLE_UBSAN +} __aligned(64) mcs_lock_node_t; +#else +} mcs_lock_node_t; +#endif + +typedef mcs_lock_node_t mcs_lock_t; + +static void mcs_lock_init(struct mcs_lock_node *node) +{ +#ifdef SPIN_LOCK_IN_MCS + ihk_mc_spinlock_init(&node->spinlock); +#else + node->locked = 0; + node->next = NULL; +#endif // SPIN_LOCK_IN_MCS +} + +static void __mcs_lock_lock(struct mcs_lock_node *lock, + struct mcs_lock_node *node) +{ +#ifdef SPIN_LOCK_IN_MCS + ihk_mc_spinlock_lock_noirq(&lock->spinlock); +#else + struct mcs_lock_node *pred; + + node->next = NULL; + node->locked = 0; + __atomic_exchange(&(lock->next), &node, &pred, __ATOMIC_SEQ_CST); + + if (pred) { + node->locked = 1; + pred->next = node; + while (node->locked != 0) { + cpu_pause(); + } + } +#endif // SPIN_LOCK_IN_MCS +} + +static void __mcs_lock_unlock(struct mcs_lock_node *lock, + struct mcs_lock_node *node) +{ +#ifdef SPIN_LOCK_IN_MCS + ihk_mc_spinlock_unlock_noirq(&lock->spinlock); +#else + if (node->next == NULL) { + struct mcs_lock_node *desired = NULL; + struct mcs_lock_node *expected = node; + if (__atomic_compare_exchange(&(lock->next), &expected, &desired, 0, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { + return; + } + + while (node->next == NULL) { + cpu_pause(); + } + } + + node->next->locked = 0; +#endif // SPIN_LOCK_IN_MCS +} + +static void mcs_lock_lock_noirq(struct mcs_lock_node *lock, + struct mcs_lock_node *node) +{ + preempt_disable(); + __mcs_lock_lock(lock, node); +} + +static void mcs_lock_unlock_noirq(struct mcs_lock_node *lock, + struct mcs_lock_node *node) +{ + __mcs_lock_unlock(lock, node); + preempt_enable(); +} + +static void mcs_lock_lock(struct mcs_lock_node *lock, + struct mcs_lock_node *node) +{ + node->irqsave = cpu_disable_interrupt_save(); + mcs_lock_lock_noirq(lock, node); +} + +static void mcs_lock_unlock(struct mcs_lock_node *lock, + struct mcs_lock_node *node) +{ + mcs_lock_unlock_noirq(lock, node); + cpu_restore_interrupt(node->irqsave); +} +#endif // ARCH_MCS_LOCK + + + #ifndef IHK_STATIC_SPINLOCK_FUNCS void ihk_mc_spinlock_init(ihk_spinlock_t *); void ihk_mc_spinlock_lock(ihk_spinlock_t *, unsigned long *);