Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/common/rwlock.c
Line
Count
Source (jump to first uncovered line)
1
#include <xen/rwlock.h>
2
#include <xen/irq.h>
3
4
/*
5
 * rspin_until_writer_unlock - spin until writer is gone.
6
 * @lock  : Pointer to queue rwlock structure.
7
 * @cnts: Current queue rwlock writer status byte.
8
 *
9
 * In interrupt context or at the head of the queue, the reader will just
10
 * increment the reader count & wait until the writer releases the lock.
11
 */
12
static inline void rspin_until_writer_unlock(rwlock_t *lock, u32 cnts)
13
138
{
14
138
    while ( (cnts & _QW_WMASK) == _QW_LOCKED )
15
0
    {
16
0
        cpu_relax();
17
0
        smp_rmb();
18
0
        cnts = atomic_read(&lock->cnts);
19
0
    }
20
138
}
21
22
/*
23
 * queue_read_lock_slowpath - acquire read lock of a queue rwlock.
24
 * @lock: Pointer to queue rwlock structure.
25
 */
26
void queue_read_lock_slowpath(rwlock_t *lock)
27
138
{
28
138
    u32 cnts;
29
138
30
138
    /*
31
138
     * Readers come here when they cannot get the lock without waiting.
32
138
     */
33
138
    atomic_sub(_QR_BIAS, &lock->cnts);
34
138
35
138
    /*
36
138
     * Put the reader into the wait queue.
37
138
     */
38
138
    spin_lock(&lock->lock);
39
138
40
138
    /*
41
138
     * At the head of the wait queue now, wait until the writer state
42
138
     * goes to 0 and then try to increment the reader count and get
43
138
     * the lock. It is possible that an incoming writer may steal the
44
138
     * lock in the interim, so it is necessary to check the writer byte
45
138
     * to make sure that the write lock isn't taken.
46
138
     */
47
547k
    while ( atomic_read(&lock->cnts) & _QW_WMASK )
48
547k
        cpu_relax();
49
138
50
138
    cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
51
138
    rspin_until_writer_unlock(lock, cnts);
52
138
53
138
    /*
54
138
     * Signal the next one in queue to become queue head.
55
138
     */
56
138
    spin_unlock(&lock->lock);
57
138
}
58
59
/*
60
 * queue_write_lock_slowpath - acquire write lock of a queue rwlock
61
 * @lock : Pointer to queue rwlock structure.
62
 */
63
void queue_write_lock_slowpath(rwlock_t *lock)
64
736
{
65
736
    u32 cnts;
66
736
67
736
    /* Put the writer into the wait queue. */
68
736
    spin_lock(&lock->lock);
69
736
70
736
    /* Try to acquire the lock directly if no reader is present. */
71
736
    if ( !atomic_read(&lock->cnts) &&
72
35
         (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) )
73
34
        goto unlock;
74
736
75
736
    /*
76
736
     * Set the waiting flag to notify readers that a writer is pending,
77
736
     * or wait for a previous writer to go away.
78
736
     */
79
702
    for ( ; ; )
80
2.29M
    {
81
2.29M
        cnts = atomic_read(&lock->cnts);
82
2.29M
        if ( !(cnts & _QW_WMASK) &&
83
751
             (atomic_cmpxchg(&lock->cnts, cnts,
84
751
                             cnts | _QW_WAITING) == cnts) )
85
739
            break;
86
2.29M
87
2.29M
        cpu_relax();
88
2.29M
    }
89
702
90
702
    /* When no more readers, set the locked flag. */
91
702
    for ( ; ; )
92
739
    {
93
739
        cnts = atomic_read(&lock->cnts);
94
739
        if ( (cnts == _QW_WAITING) &&
95
739
             (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
96
739
                             _QW_LOCKED) == _QW_WAITING) )
97
739
            break;
98
739
99
0
        cpu_relax();
100
0
    }
101
773
 unlock:
102
773
    spin_unlock(&lock->lock);
103
773
}
104
105
106
static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
107
108
void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
109
                percpu_rwlock_t *percpu_rwlock)
110
510k
{
111
510k
    unsigned int cpu;
112
510k
    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
113
510k
114
510k
    /* Validate the correct per_cpudata variable has been provided. */
115
510k
    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
116
510k
117
510k
    /*
118
510k
     * First take the write lock to protect against other writers or slow
119
510k
     * path readers.
120
510k
     */
121
510k
    write_lock(&percpu_rwlock->rwlock);
122
510k
123
510k
    /* Now set the global variable so that readers start using read_lock. */
124
510k
    percpu_rwlock->writer_activating = 1;
125
510k
    smp_mb();
126
510k
127
510k
    /* Using a per cpu cpumask is only safe if there is no nesting. */
128
510k
    ASSERT(!in_irq());
129
510k
    cpumask_copy(rwlock_readers, &cpu_online_map);
130
510k
131
510k
    /* Check if there are any percpu readers in progress on this rwlock. */
132
510k
    for ( ; ; )
133
511k
    {
134
511k
        for_each_cpu(cpu, rwlock_readers)
135
6.13M
        {
136
6.13M
            /*
137
6.13M
             * Remove any percpu readers not contending on this rwlock
138
6.13M
             * from our check mask.
139
6.13M
             */
140
6.13M
            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
141
6.13M
                __cpumask_clear_cpu(cpu, rwlock_readers);
142
6.13M
        }
143
511k
        /* Check if we've cleared all percpu readers from check mask. */
144
511k
        if ( cpumask_empty(rwlock_readers) )
145
510k
            break;
146
511k
        /* Give the coherency fabric a break. */
147
45
        cpu_relax();
148
45
    };
149
510k
}