ck_spinlock_trylock(3) ck_spinlock_anderson_locked

Other Alias

ck_spinlock_init, ck_spinlock_lock, ck_spinlock_unlock, ck_spinlock_locked, ck_spinlock_anderson_init

LIBRARY

Concurrency Kit (libck, -lck)

SYNOPSIS

In ck_spinlock.h

ck_spinlock_t spinlock = CK_SPINLOCK_INITIALIZER; Ft void Fn ck_spinlock_init ck_spinlock_t *lock Ft void Fn ck_spinlock_lock ck_spinlock_t *lock Ft void Fn ck_spinlock_unlock ck_spinlock_t *lock Ft bool Fn ck_spinlock_locked ck_spinlock_t *lock Ft bool Fn ck_spinlock_trylock ck_spinlock_t *lock Ft void Fn ck_spinlock_anderson_init ck_spinlock_anderson_t *lock ck_spinlock_anderson_thread_t *slots unsigned int count Ft bool Fn ck_spinlock_anderson_locked ck_spinlock_anderson_t *lock Ft void Fn ck_spinlock_anderson_lock ck_spinlock_anderson_t *lock ck_spinlock_anderson_thread_t **slot Ft void Fn ck_spinlock_anderson_unlock ck_spinlock_anderson_t *lock ck_spinlock_anderson_thread_t *slot

ck_spinlock_cas_t spinlock = CK_SPINLOCK_CAS_INITIALIZER; Ft void Fn ck_spinlock_cas_init ck_spinlock_cas_t *lock Ft bool Fn ck_spinlock_cas_locked ck_spinlock_cas_t *lock Ft void Fn ck_spinlock_cas_lock ck_spinlock_cas_t *lock Ft void Fn ck_spinlock_cas_lock_eb ck_spinlock_cas_t *lock Ft bool Fn ck_spinlock_cas_trylock ck_spinlock_cas_t *lock Ft void Fn ck_spinlock_cas_unlock ck_spinlock_cas_t *lock Ft void Fn ck_spinlock_clh_init ck_spinlock_clh_t **lock ck_spinlock_clh_t *unowned Ft bool Fn ck_spinlock_clh_locked ck_spinlock_clh_t **lock Ft void Fn ck_spinlock_clh_lock ck_spinlock_clh_t **lock ck_spinlock_clh_t *node Ft void Fn ck_spinlock_clh_unlock ck_spinlock_clh_t **node

ck_spinlock_dec_t spinlock = CK_SPINLOCK_DEC_INITIALIZER; Ft void Fn ck_spinlock_dec_init ck_spinlock_dec_t *lock Ft bool Fn ck_spinlock_dec_locked ck_spinlock_dec_t *lock Ft void Fn ck_spinlock_dec_lock ck_spinlock_dec_t *lock Ft void Fn ck_spinlock_dec_lock_eb ck_spinlock_dec_t *lock Ft bool Fn ck_spinlock_dec_trylock ck_spinlock_dec_t *lock Ft void Fn ck_spinlock_dec_unlock ck_spinlock_dec_t *lock

ck_spinlock_fas_t spinlock = CK_SPINLOCK_FAS_INITIALIZER; Ft void Fn ck_spinlock_fas_init ck_spinlock_fas_t *lock Ft void Fn ck_spinlock_fas_lock ck_spinlock_fas_t *lock Ft void Fn ck_spinlock_fas_lock_eb ck_spinlock_fas_t *lock Ft bool Fn ck_spinlock_fas_locked ck_spinlock_fas_t *lock Ft bool Fn ck_spinlock_fas_trylock ck_spinlock_fas_t *lock Ft void Fn ck_spinlock_fas_unlock ck_spinlock_fas_t *lock

Ft void Fn ck_spinlock_hclh_init ck_spinlock_hclh_t **lock ck_spinlock_hclh_t *unowned Ft bool Fn ck_spinlock_hclh_locked ck_spinlock_hclh_t **lock Ft void Fn ck_spinlock_hclh_lock ck_spinlock_hclh_t **lock ck_spinlock_hclh_t *node Ft void Fn ck_spinlock_hclh_unlock ck_spinlock_hclh_t **node

ck_spinlock_mcs_t spinlock = CK_SPINLOCK_MCS_INITIALIZER; Ft void Fn ck_spinlock_mcs_init ck_spinlock_mcs_t **lock Ft bool Fn ck_spinlock_mcs_locked ck_spinlock_mcs_t **lock Ft void Fn ck_spinlock_mcs_lock ck_spinlock_mcs_t **lock ck_spinlock_mcs_t *node Ft bool Fn ck_spinlock_mcs_trylock ck_spinlock_mcs_t **lock ck_spinlock_mcs_t *node Ft void Fn ck_spinlock_mcs_unlock ck_spinlock_mcs_t **lock ck_spinlock_mcs_t *node

ck_spinlock_ticket_t spinlock = CK_SPINLOCK_TICKET_INITIALIZER; Ft void Fn ck_spinlock_ticket_init ck_spinlock_ticket_t *lock Ft bool Fn ck_spinlock_ticket_locked ck_spinlock_ticket_t *lock Ft void Fn ck_spinlock_ticket_lock ck_spinlock_ticket_t *lock Ft void Fn ck_spinlock_ticket_lock_pb ck_spinlock_ticket_t *lock unsigned int period Ft bool Fn ck_spinlock_ticket_trylock ck_spinlock_ticket_t *lock Ft void Fn ck_spinlock_ticket_unlock ck_spinlock_ticket_t *lock

DESCRIPTION

A family of busy-wait spinlock implementations. The ck_spinlock_t implementation is simply a wrapper around the fetch-and-swap (ck_spinlock_fas_t) implementation. The table below provides a summary of the current implementations.
|            Namespace | Algorithm                   | Type          | Restrictions            | Fair   |
'----------------------|-----------------------------|---------------|-------------------------|--------'
  ck_spinlock_anderson   Anderson                      Array           Fixed number of threads   Yes
       ck_spinlock_cas   Compare-and-Swap              Centralized     None                      No
       ck_spinlock_clh   Craig, Landin and Hagersten   Queue           Lifetime requirements     Yes
       ck_spinlock_dec   Decrement (Linux kernel)      Centralized     UINT_MAX concurrency      No
       ck_spinlock_fas   Fetch-and-store               Centralized     None                      No
       ck_spinlock_hclh  Hierarchical CLH              Queue           Lifetime requirements     Yes *
       ck_spinlock_mcs   Mellor-Crummey and Scott      Queue           None                      Yes
    ck_spinlock_ticket   Ticket                        Centralized     None                      Yes

* Hierarchical CLH only offers weak fairness for threads accross cluster nodes.

If contention is low and there is no hard requirement for starvation-freedom then a centralized greedy (unfair) spinlock is recommended. If contention is high and there is no requirement for starvation-freedom then a centralized greedy spinlock is recommended to be used with an exponential backoff mechanism. If contention is generally low and there is a hard requirement for starvation-freedom then the ticket lock is recommended. If contention is high and there is a hard requirement for starvation-freedom then the Craig and Landin and Hagersten queue spinlock is recommended unless stack allocation is necessary or NUMA factor is high, in which case the Mellor-Crummey and Scott spinlock is recommended. If you cannot afford O(n) space-usage from array or queue spinlocks but still require fairness under high contention then the ticket lock with proportional back-off is recommended. If NUMA factor is high but prefer a greedy lock, then please see ck_cohort3.

EXAMPLE

#include <ck_spinlock.h>
#include <stdbool.h>
/*
 * Alternatively, the mutex may be initialized at run-time with
 * ck_spinlock_init(&mutex).
 */
ck_spinlock_t mutex = CK_SPINLOCK_INITIALIZER;
void
example(void)
{
        ck_spinlock_lock(&mutex);
        /*
         * Critical section.
         */
        ck_spinlock_unlock(&mutex);
        ck_spinlock_lock_eb(&mutex);
        /*
         * Critical section.
         */
        ck_spinlock_unlock(&mutex);
        if (ck_spinlock_trylock(&mutex) == true) {
                /*
                 * Critical section.
                 */
                ck_spinlock_unlock(&mutex);
        }
}