Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/machine_kexec.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * machine_kexec.c
3
 *
4
 * Copyright (C) 2013 Citrix Systems R&D Ltd.
5
 *
6
 * Portions derived from Linux's arch/x86/kernel/machine_kexec_64.c.
7
 *
8
 *   Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
9
 *
10
 * Xen port written by:
11
 * - Simon 'Horms' Horman <horms@verge.net.au>
12
 * - Magnus Damm <magnus@valinux.co.jp>
13
 *
14
 * This source code is licensed under the GNU General Public License,
15
 * Version 2.  See the file COPYING for more details.
16
 */
17
18
#include <xen/types.h>
19
#include <xen/kexec.h>
20
#include <xen/guest_access.h>
21
#include <asm/fixmap.h>
22
#include <asm/hpet.h>
23
#include <asm/page.h>
24
#include <asm/machine_kexec.h>
25
26
/*
27
 * Add a mapping for a page to the page tables used during kexec.
28
 */
29
int machine_kexec_add_page(struct kexec_image *image, unsigned long vaddr,
30
                           unsigned long maddr)
31
0
{
32
0
    struct page_info *l4_page;
33
0
    struct page_info *l3_page;
34
0
    struct page_info *l2_page;
35
0
    struct page_info *l1_page;
36
0
    l4_pgentry_t *l4 = NULL;
37
0
    l3_pgentry_t *l3 = NULL;
38
0
    l2_pgentry_t *l2 = NULL;
39
0
    l1_pgentry_t *l1 = NULL;
40
0
    int ret = -ENOMEM;
41
0
42
0
    l4_page = image->aux_page;
43
0
    if ( !l4_page )
44
0
    {
45
0
        l4_page = kimage_alloc_control_page(image, 0);
46
0
        if ( !l4_page )
47
0
            goto out;
48
0
        image->aux_page = l4_page;
49
0
    }
50
0
51
0
    l4 = __map_domain_page(l4_page);
52
0
    l4 += l4_table_offset(vaddr);
53
0
    if ( !(l4e_get_flags(*l4) & _PAGE_PRESENT) )
54
0
    {
55
0
        l3_page = kimage_alloc_control_page(image, 0);
56
0
        if ( !l3_page )
57
0
            goto out;
58
0
        l4e_write(l4, l4e_from_page(l3_page, __PAGE_HYPERVISOR));
59
0
    }
60
0
    else
61
0
        l3_page = l4e_get_page(*l4);
62
0
63
0
    l3 = __map_domain_page(l3_page);
64
0
    l3 += l3_table_offset(vaddr);
65
0
    if ( !(l3e_get_flags(*l3) & _PAGE_PRESENT) )
66
0
    {
67
0
        l2_page = kimage_alloc_control_page(image, 0);
68
0
        if ( !l2_page )
69
0
            goto out;
70
0
        l3e_write(l3, l3e_from_page(l2_page, __PAGE_HYPERVISOR));
71
0
    }
72
0
    else
73
0
        l2_page = l3e_get_page(*l3);
74
0
75
0
    l2 = __map_domain_page(l2_page);
76
0
    l2 += l2_table_offset(vaddr);
77
0
    if ( !(l2e_get_flags(*l2) & _PAGE_PRESENT) )
78
0
    {
79
0
        l1_page = kimage_alloc_control_page(image, 0);
80
0
        if ( !l1_page )
81
0
            goto out;
82
0
        l2e_write(l2, l2e_from_page(l1_page, __PAGE_HYPERVISOR));
83
0
    }
84
0
    else
85
0
        l1_page = l2e_get_page(*l2);
86
0
87
0
    l1 = __map_domain_page(l1_page);
88
0
    l1 += l1_table_offset(vaddr);
89
0
    l1e_write(l1, l1e_from_pfn(maddr >> PAGE_SHIFT, __PAGE_HYPERVISOR));
90
0
91
0
    ret = 0;
92
0
out:
93
0
    if ( l1 )
94
0
        unmap_domain_page(l1);
95
0
    if ( l2 )
96
0
        unmap_domain_page(l2);
97
0
    if ( l3 )
98
0
        unmap_domain_page(l3);
99
0
    if ( l4 )
100
0
        unmap_domain_page(l4);
101
0
    return ret;
102
0
}
103
104
int machine_kexec_load(struct kexec_image *image)
105
0
{
106
0
    void *code_page;
107
0
    int ret;
108
0
109
0
    switch ( image->arch )
110
0
    {
111
0
    case EM_386:
112
0
    case EM_X86_64:
113
0
        break;
114
0
    default:
115
0
        return -EINVAL;
116
0
    }
117
0
118
0
    code_page = __map_domain_page(image->control_code_page);
119
0
    memcpy(code_page, kexec_reloc, kexec_reloc_size);
120
0
    unmap_domain_page(code_page);
121
0
122
0
    /*
123
0
     * Add a mapping for the control code page to the same virtual
124
0
     * address as kexec_reloc.  This allows us to keep running after
125
0
     * these page tables are loaded in kexec_reloc.
126
0
     */
127
0
    ret = machine_kexec_add_page(image, (unsigned long)kexec_reloc,
128
0
                                 page_to_maddr(image->control_code_page));
129
0
    if ( ret < 0 )
130
0
        return ret;
131
0
132
0
    return 0;
133
0
}
134
135
void machine_kexec_unload(struct kexec_image *image)
136
0
{
137
0
    /* no-op. kimage_free() frees all control pages. */
138
0
}
139
140
void machine_reboot_kexec(struct kexec_image *image)
141
0
{
142
0
    BUG_ON(smp_processor_id() != 0);
143
0
    smp_send_stop();
144
0
    machine_kexec(image);
145
0
    BUG();
146
0
}
147
148
void machine_kexec(struct kexec_image *image)
149
0
{
150
0
    int i;
151
0
    unsigned long reloc_flags = 0;
152
0
153
0
    /* We are about to permenantly jump out of the Xen context into the kexec
154
0
     * purgatory code.  We really dont want to be still servicing interupts.
155
0
     */
156
0
    local_irq_disable();
157
0
158
0
    /* Now regular interrupts are disabled, we need to reduce the impact
159
0
     * of interrupts not disabled by 'cli'.
160
0
     *
161
0
     * The NMI handlers have already been set up nmi_shootdown_cpus().  All
162
0
     * pcpus other than us have the nmi_crash handler, while we have the nop
163
0
     * handler.
164
0
     *
165
0
     * The MCE handlers touch extensive areas of Xen code and data.  At this
166
0
     * point, there is nothing we can usefully do, so set the nop handler.
167
0
     */
168
0
    for ( i = 0; i < nr_cpu_ids; i++ )
169
0
    {
170
0
        if ( idt_tables[i] == NULL )
171
0
            continue;
172
0
        _update_gate_addr_lower(&idt_tables[i][TRAP_machine_check], &trap_nop);
173
0
    }
174
0
175
0
    /* Explicitly enable NMIs on this CPU.  Some crashdump kernels do
176
0
     * not like running with NMIs disabled. */
177
0
    enable_nmis();
178
0
179
0
    if ( image->arch == EM_386 )
180
0
        reloc_flags |= KEXEC_RELOC_FLAG_COMPAT;
181
0
182
0
    kexec_reloc(page_to_maddr(image->control_code_page),
183
0
                page_to_maddr(image->aux_page),
184
0
                image->head, image->entry_maddr, reloc_flags);
185
0
}
186
187
int machine_kexec_get(xen_kexec_range_t *range)
188
0
{
189
0
  if (range->range != KEXEC_RANGE_MA_XEN)
190
0
    return -EINVAL;
191
0
  return machine_kexec_get_xen(range);
192
0
}
193
194
void arch_crash_save_vmcoreinfo(void)
195
0
{
196
0
  VMCOREINFO_SYMBOL(dom_xen);
197
0
  VMCOREINFO_SYMBOL(dom_io);
198
0
199
0
  VMCOREINFO_SYMBOL_ALIAS(pgd_l4, idle_pg_table);
200
0
}
201
202
/*
203
 * Local variables:
204
 * mode: C
205
 * c-file-style: "BSD"
206
 * c-basic-offset: 4
207
 * tab-width: 4
208
 * indent-tabs-mode: nil
209
 * End:
210
 */