debuggers.hg

view linux-2.6.10-xen-sparse/drivers/xen/privcmd/privcmd.c @ 3668:d55d523078f7

bitkeeper revision 1.1159.212.77 (4202221693AFbvFZWeMHHIjQfbzTIQ)

More x86_64 prgress. Many more gaps filled in. Next step is DOM0
construction.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 13:07:34 2005 +0000 (2005-02-03)
parents 39a7a74fd6f9
children bbe8541361dd
line source
1 /******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
25 #include <asm/uaccess.h>
26 #include <asm/tlb.h>
27 #include <asm-xen/linux-public/privcmd.h>
28 #include <asm-xen/xen-public/dom0_ops.h>
29 #include <asm-xen/xen_proc.h>
31 static struct proc_dir_entry *privcmd_intf;
33 static int privcmd_ioctl(struct inode *inode, struct file *file,
34 unsigned int cmd, unsigned long data)
35 {
36 int ret = -ENOSYS;
38 switch ( cmd )
39 {
40 case IOCTL_PRIVCMD_HYPERCALL:
41 {
42 privcmd_hypercall_t hypercall;
44 if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
45 return -EFAULT;
47 __asm__ __volatile__ (
48 "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
49 "movl 4(%%eax),%%ebx ;"
50 "movl 8(%%eax),%%ecx ;"
51 "movl 12(%%eax),%%edx ;"
52 "movl 16(%%eax),%%esi ;"
53 "movl 20(%%eax),%%edi ;"
54 "movl (%%eax),%%eax ;"
55 TRAP_INSTR "; "
56 "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
57 : "=a" (ret) : "0" (&hypercall) : "memory" );
59 }
60 break;
62 case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
63 {
64 extern int initdom_ctrlif_domcontroller_port;
65 ret = initdom_ctrlif_domcontroller_port;
66 }
67 break;
69 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
70 case IOCTL_PRIVCMD_MMAP:
71 {
72 #define PRIVCMD_MMAP_SZ 32
73 privcmd_mmap_t mmapcmd;
74 privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
75 int i, rc;
77 if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
78 return -EFAULT;
80 p = mmapcmd.entry;
82 for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
83 {
84 int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
85 PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
86 if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
87 return -EFAULT;
89 for ( j = 0; j < n; j++ )
90 {
91 struct vm_area_struct *vma =
92 find_vma( current->mm, msg[j].va );
94 if ( !vma )
95 return -EINVAL;
97 if ( msg[j].va > PAGE_OFFSET )
98 return -EINVAL;
100 if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
101 return -EINVAL;
103 if ( (rc = direct_remap_area_pages(vma->vm_mm,
104 msg[j].va&PAGE_MASK,
105 msg[j].mfn<<PAGE_SHIFT,
106 msg[j].npages<<PAGE_SHIFT,
107 vma->vm_page_prot,
108 mmapcmd.dom)) < 0 )
109 return rc;
110 }
111 }
112 ret = 0;
113 }
114 break;
116 case IOCTL_PRIVCMD_MMAPBATCH:
117 {
118 #define MAX_DIRECTMAP_MMU_QUEUE 130
119 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
120 privcmd_mmapbatch_t m;
121 struct vm_area_struct *vma = NULL;
122 unsigned long *p, addr;
123 unsigned long mfn;
124 int i;
126 if ( copy_from_user(&m, (void *)data, sizeof(m)) )
127 { ret = -EFAULT; goto batch_err; }
129 vma = find_vma( current->mm, m.addr );
131 if ( !vma )
132 { ret = -EINVAL; goto batch_err; }
134 if ( m.addr > PAGE_OFFSET )
135 { ret = -EFAULT; goto batch_err; }
137 if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
138 { ret = -EFAULT; goto batch_err; }
140 u[0].ptr = MMU_EXTENDED_COMMAND;
141 u[0].val = MMUEXT_SET_FOREIGNDOM;
142 u[0].val |= (unsigned long)m.dom << 16;
143 v = w = &u[1];
145 p = m.arr;
146 addr = m.addr;
147 for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
148 {
149 if ( get_user(mfn, p) )
150 return -EFAULT;
152 v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
154 __direct_remap_area_pages(vma->vm_mm,
155 addr,
156 PAGE_SIZE,
157 v);
159 if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
160 put_user( 0xF0000000 | mfn, p );
162 v = w;
163 }
164 ret = 0;
165 break;
167 batch_err:
168 printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
169 ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
170 break;
171 }
172 break;
173 #endif
175 case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
176 {
177 unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
178 pgd_t *pgd = pgd_offset_k(m2pv);
179 pmd_t *pmd = pmd_offset(pgd, m2pv);
180 unsigned long m2p_start_mfn = pmd_val(*pmd) >> PAGE_SHIFT;
181 ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
182 }
183 break;
185 default:
186 ret = -EINVAL;
187 break;
188 }
189 return ret;
190 }
192 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
193 {
194 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
195 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
197 return 0;
198 }
200 static struct file_operations privcmd_file_ops = {
201 ioctl : privcmd_ioctl,
202 mmap: privcmd_mmap
203 };
206 static int __init privcmd_init(void)
207 {
208 if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
209 return 0;
211 privcmd_intf = create_xen_proc_entry("privcmd", 0400);
212 if ( privcmd_intf != NULL )
213 privcmd_intf->proc_fops = &privcmd_file_ops;
215 return 0;
216 }
218 __initcall(privcmd_init);