/root/src/xen/xen/common/softirq.c
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | /****************************************************************************** | 
| 2 |  |  * common/softirq.c | 
| 3 |  |  *  | 
| 4 |  |  * Softirqs in Xen are only executed in an outermost activation (e.g., never  | 
| 5 |  |  * within an interrupt activation). This simplifies some things and generally  | 
| 6 |  |  * seems a good thing. | 
| 7 |  |  *  | 
| 8 |  |  * Copyright (c) 2003, K A Fraser | 
| 9 |  |  * Copyright (c) 1992, Linus Torvalds | 
| 10 |  |  */ | 
| 11 |  |  | 
| 12 |  | #include <xen/init.h> | 
| 13 |  | #include <xen/mm.h> | 
| 14 |  | #include <xen/preempt.h> | 
| 15 |  | #include <xen/sched.h> | 
| 16 |  | #include <xen/rcupdate.h> | 
| 17 |  | #include <xen/softirq.h> | 
| 18 |  |  | 
| 19 |  | #ifndef __ARCH_IRQ_STAT | 
| 20 |  | irq_cpustat_t irq_stat[NR_CPUS]; | 
| 21 |  | #endif | 
| 22 |  |  | 
| 23 |  | static softirq_handler softirq_handlers[NR_SOFTIRQS]; | 
| 24 |  |  | 
| 25 |  | static DEFINE_PER_CPU(cpumask_t, batch_mask); | 
| 26 |  | static DEFINE_PER_CPU(unsigned int, batching); | 
| 27 |  |  | 
| 28 |  | static void __do_softirq(unsigned long ignore_mask) | 
| 29 | 8.57M | { | 
| 30 | 8.57M |     unsigned int i, cpu; | 
| 31 | 8.57M |     unsigned long pending; | 
| 32 | 8.57M |  | 
| 33 | 8.57M |     for ( ; ; ) | 
| 34 | 11.9M |     { | 
| 35 | 11.9M |         /* | 
| 36 | 11.9M |          * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ may move | 
| 37 | 11.9M |          * us to another processor. | 
| 38 | 11.9M |          */ | 
| 39 | 11.9M |         cpu = smp_processor_id(); | 
| 40 | 11.9M |  | 
| 41 | 11.9M |         if ( rcu_pending(cpu) ) | 
| 42 | 0 |             rcu_check_callbacks(cpu); | 
| 43 | 11.9M |  | 
| 44 | 11.9M |         if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0) | 
| 45 | 9.17M |              || cpu_is_offline(cpu) ) | 
| 46 | 3.83M |             break; | 
| 47 | 11.9M |  | 
| 48 | 8.15M |         i = find_first_set_bit(pending); | 
| 49 | 8.15M |         clear_bit(i, &softirq_pending(cpu)); | 
| 50 | 8.15M |         (*softirq_handlers[i])(); | 
| 51 | 8.15M |     } | 
| 52 | 8.57M | } | 
| 53 |  |  | 
| 54 |  | void process_pending_softirqs(void) | 
| 55 | 1.92M | { | 
| 56 | 1.92M |     ASSERT(!in_irq() && local_irq_is_enabled()); | 
| 57 | 1.92M |     /* Do not enter scheduler as it can preempt the calling context. */ | 
| 58 | 1.92M |     __do_softirq(1ul<<SCHEDULE_SOFTIRQ); | 
| 59 | 1.92M | } | 
| 60 |  |  | 
| 61 |  | void do_softirq(void) | 
| 62 | 6.61M | { | 
| 63 | 6.61M |     ASSERT_NOT_IN_ATOMIC(); | 
| 64 | 6.61M |     __do_softirq(0); | 
| 65 | 6.61M | } | 
| 66 |  |  | 
| 67 |  | void open_softirq(int nr, softirq_handler handler) | 
| 68 | 11 | { | 
| 69 | 11 |     ASSERT(nr < NR_SOFTIRQS); | 
| 70 | 11 |     softirq_handlers[nr] = handler; | 
| 71 | 11 | } | 
| 72 |  |  | 
| 73 |  | void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr) | 
| 74 | 66.8k | { | 
| 75 | 66.8k |     unsigned int cpu, this_cpu = smp_processor_id(); | 
| 76 | 66.8k |     cpumask_t send_mask, *raise_mask; | 
| 77 | 66.8k |  | 
| 78 | 66.8k |     if ( !per_cpu(batching, this_cpu) || in_irq() ) | 
| 79 | 66.8k |     { | 
| 80 | 66.8k |         cpumask_clear(&send_mask); | 
| 81 | 66.8k |         raise_mask = &send_mask; | 
| 82 | 66.8k |     } | 
| 83 | 66.8k |     else | 
| 84 | 18.4E |         raise_mask = &per_cpu(batch_mask, this_cpu); | 
| 85 | 66.8k |  | 
| 86 | 66.8k |     for_each_cpu(cpu, mask) | 
| 87 | 66.8k |         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) && | 
| 88 | 66.8k |              cpu != this_cpu && | 
| 89 | 64.2k |              !arch_skip_send_event_check(cpu) ) | 
| 90 | 1.12k |             __cpumask_set_cpu(cpu, raise_mask); | 
| 91 | 66.8k |  | 
| 92 | 66.8k |     if ( raise_mask == &send_mask ) | 
| 93 | 66.8k |         smp_send_event_check_mask(raise_mask); | 
| 94 | 66.8k | } | 
| 95 |  |  | 
| 96 |  | void cpu_raise_softirq(unsigned int cpu, unsigned int nr) | 
| 97 | 3.77M | { | 
| 98 | 3.77M |     unsigned int this_cpu = smp_processor_id(); | 
| 99 | 3.77M |  | 
| 100 | 3.77M |     if ( test_and_set_bit(nr, &softirq_pending(cpu)) | 
| 101 | 4.16M |          || (cpu == this_cpu) | 
| 102 | 34.4k |          || arch_skip_send_event_check(cpu) ) | 
| 103 | 4.10M |         return; | 
| 104 | 3.77M |  | 
| 105 | 18.4E |     if ( !per_cpu(batching, this_cpu) || in_irq() ) | 
| 106 | 34.4k |         smp_send_event_check_cpu(cpu); | 
| 107 | 18.4E |     else | 
| 108 | 18.4E |         __cpumask_set_cpu(cpu, &per_cpu(batch_mask, this_cpu)); | 
| 109 | 18.4E | } | 
| 110 |  |  | 
| 111 |  | void cpu_raise_softirq_batch_begin(void) | 
| 112 | 0 | { | 
| 113 | 0 |     ++this_cpu(batching); | 
| 114 | 0 | } | 
| 115 |  |  | 
| 116 |  | void cpu_raise_softirq_batch_finish(void) | 
| 117 | 0 | { | 
| 118 | 0 |     unsigned int cpu, this_cpu = smp_processor_id(); | 
| 119 | 0 |     cpumask_t *mask = &per_cpu(batch_mask, this_cpu); | 
| 120 | 0 | 
 | 
| 121 | 0 |     ASSERT(per_cpu(batching, this_cpu)); | 
| 122 | 0 |     for_each_cpu ( cpu, mask ) | 
| 123 | 0 |         if ( !softirq_pending(cpu) ) | 
| 124 | 0 |             __cpumask_clear_cpu(cpu, mask); | 
| 125 | 0 |     smp_send_event_check_mask(mask); | 
| 126 | 0 |     cpumask_clear(mask); | 
| 127 | 0 |     --per_cpu(batching, this_cpu); | 
| 128 | 0 | } | 
| 129 |  |  | 
| 130 |  | void raise_softirq(unsigned int nr) | 
| 131 | 4.73M | { | 
| 132 | 4.73M |     set_bit(nr, &softirq_pending(smp_processor_id())); | 
| 133 | 4.73M | } | 
| 134 |  |  | 
| 135 |  | void __init softirq_init(void) | 
| 136 | 1 | { | 
| 137 | 1 | } | 
| 138 |  |  | 
| 139 |  | /* | 
| 140 |  |  * Local variables: | 
| 141 |  |  * mode: C | 
| 142 |  |  * c-file-style: "BSD" | 
| 143 |  |  * c-basic-offset: 4 | 
| 144 |  |  * tab-width: 4 | 
| 145 |  |  * indent-tabs-mode: nil | 
| 146 |  |  * End: | 
| 147 |  |  */ |