debuggers.hg

view xenolinux-2.4.21-sparse/drivers/char/mem.c @ 648:cda951fc1bef

bitkeeper revision 1.341 (3f1120a2WW6KGE81TArq_p654xy38Q)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Sun Jul 13 09:04:34 2003 +0000 (2003-07-13)
parents 9339f3942f4e
children
line source
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 *
10 * MODIFIED FOR XENOLINUX by Keir Fraser, 10th July 2003.
11 * Xenolinux has strange semantics for /dev/mem and /dev/kmem!!
12 * 1. mmap will not work on /dev/kmem
13 * 2. mmap on /dev/mem interprets the 'file offset' as a machine address
14 * rather than a physical address.
15 * I don't believe anyone sane mmaps /dev/kmem, but /dev/mem is mmapped
16 * to get at memory-mapped I/O spaces (eg. the VESA X server does this).
17 * For this to work at all we need to expect machine addresses.
18 * Reading/writing of /dev/kmem expects kernel virtual addresses, as usual.
19 * Reading/writing of /dev/mem expects 'physical addresses' as usual -- this
20 * is because /dev/mem can only read/write existing kernel mappings, which
21 * will be normal RAM, and we should present pseudo-physical layout for all
22 * except I/O (which is the sticky case that mmap is hacked to deal with).
23 */
25 #include <linux/config.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/tpqic02.h>
29 #include <linux/ftape.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/mman.h>
33 #include <linux/random.h>
34 #include <linux/init.h>
35 #include <linux/raw.h>
36 #include <linux/tty.h>
37 #include <linux/capability.h>
39 #include <asm/uaccess.h>
40 #include <asm/io.h>
41 #include <asm/pgalloc.h>
43 #ifdef CONFIG_I2C
44 extern int i2c_init_all(void);
45 #endif
46 #ifdef CONFIG_FB
47 extern void fbmem_init(void);
48 #endif
49 #ifdef CONFIG_PROM_CONSOLE
50 extern void prom_con_init(void);
51 #endif
52 #ifdef CONFIG_MDA_CONSOLE
53 extern void mda_console_init(void);
54 #endif
55 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
56 extern void tapechar_init(void);
57 #endif
59 static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
60 const char * buf, size_t count, loff_t *ppos)
61 {
62 ssize_t written;
64 written = 0;
65 #if defined(__sparc__) || defined(__mc68000__)
66 /* we don't have page 0 mapped on sparc and m68k.. */
67 if (realp < PAGE_SIZE) {
68 unsigned long sz = PAGE_SIZE-realp;
69 if (sz > count) sz = count;
70 /* Hmm. Do something? */
71 buf+=sz;
72 p+=sz;
73 count-=sz;
74 written+=sz;
75 }
76 #endif
77 if (copy_from_user(p, buf, count))
78 return -EFAULT;
79 written += count;
80 *ppos += written;
81 return written;
82 }
85 /*
86 * This funcion reads the *physical* memory. The f_pos points directly to the
87 * memory location.
88 */
89 static ssize_t read_mem(struct file * file, char * buf,
90 size_t count, loff_t *ppos)
91 {
92 unsigned long p = *ppos;
93 unsigned long end_mem;
94 ssize_t read;
96 end_mem = __pa(high_memory);
97 if (p >= end_mem)
98 return 0;
99 if (count > end_mem - p)
100 count = end_mem - p;
101 read = 0;
102 #if defined(__sparc__) || defined(__mc68000__)
103 /* we don't have page 0 mapped on sparc and m68k.. */
104 if (p < PAGE_SIZE) {
105 unsigned long sz = PAGE_SIZE-p;
106 if (sz > count)
107 sz = count;
108 if (sz > 0) {
109 if (clear_user(buf, sz))
110 return -EFAULT;
111 buf += sz;
112 p += sz;
113 count -= sz;
114 read += sz;
115 }
116 }
117 #endif
118 if (copy_to_user(buf, __va(p), count))
119 return -EFAULT;
120 read += count;
121 *ppos += read;
122 return read;
123 }
125 static ssize_t write_mem(struct file * file, const char * buf,
126 size_t count, loff_t *ppos)
127 {
128 unsigned long p = *ppos;
129 unsigned long end_mem;
131 end_mem = __pa(high_memory);
132 if (p >= end_mem)
133 return 0;
134 if (count > end_mem - p)
135 count = end_mem - p;
136 return do_write_mem(file, __va(p), p, buf, count, ppos);
137 }
139 #ifndef pgprot_noncached
141 /*
142 * This should probably be per-architecture in <asm/pgtable.h>
143 */
144 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
145 {
146 unsigned long prot = pgprot_val(_prot);
148 #if defined(__i386__) || defined(__x86_64__)
149 /* On PPro and successors, PCD alone doesn't always mean
150 uncached because of interactions with the MTRRs. PCD | PWT
151 means definitely uncached. */
152 if (boot_cpu_data.x86 > 3)
153 prot |= _PAGE_PCD | _PAGE_PWT;
154 #elif defined(__powerpc__)
155 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
156 #elif defined(__mc68000__)
157 #ifdef SUN3_PAGE_NOCACHE
158 if (MMU_IS_SUN3)
159 prot |= SUN3_PAGE_NOCACHE;
160 else
161 #endif
162 if (MMU_IS_851 || MMU_IS_030)
163 prot |= _PAGE_NOCACHE030;
164 /* Use no-cache mode, serialized */
165 else if (MMU_IS_040 || MMU_IS_060)
166 prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
167 #endif
169 return __pgprot(prot);
170 }
172 #endif /* !pgprot_noncached */
174 /*
175 * Architectures vary in how they handle caching for addresses
176 * outside of main memory.
177 */
178 static inline int noncached_address(unsigned long addr)
179 {
180 #if defined(__i386__)
181 /*
182 * On the PPro and successors, the MTRRs are used to set
183 * memory types for physical addresses outside main memory,
184 * so blindly setting PCD or PWT on those pages is wrong.
185 * For Pentiums and earlier, the surround logic should disable
186 * caching for the high addresses through the KEN pin, but
187 * we maintain the tradition of paranoia in this code.
188 */
189 return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
190 test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
191 test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
192 test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
193 && addr >= __pa(high_memory);
194 #else
195 return addr >= __pa(high_memory);
196 #endif
197 }
199 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
200 {
201 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
203 #if defined(CONFIG_XENO) && defined(CONFIG_XENO_PRIV)
204 if (!(start_info.flags & SIF_PRIVILEGED))
205 return -ENXIO;
207 /* DONTCOPY is essential for Xenolinux as copy_page_range is broken. */
208 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
209 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
210 if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
211 vma->vm_end-vma->vm_start, vma->vm_page_prot))
212 return -EAGAIN;
213 return 0;
214 #elif defined(CONFIG_XENO)
215 return -ENXIO;
216 #else
217 /*
218 * Accessing memory above the top the kernel knows about or
219 * through a file pointer that was marked O_SYNC will be
220 * done non-cached.
221 */
222 if (noncached_address(offset) || (file->f_flags & O_SYNC))
223 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
225 /* Don't try to swap out physical pages.. */
226 vma->vm_flags |= VM_RESERVED;
228 /*
229 * Don't dump addresses that are not real memory to a core file.
230 */
231 if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
232 vma->vm_flags |= VM_IO;
234 if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
235 vma->vm_page_prot))
236 return -EAGAIN;
237 return 0;
238 #endif
239 }
241 /*
242 * This function reads the *virtual* memory as seen by the kernel.
243 */
244 static ssize_t read_kmem(struct file *file, char *buf,
245 size_t count, loff_t *ppos)
246 {
247 unsigned long p = *ppos;
248 ssize_t read = 0;
249 ssize_t virtr = 0;
250 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
252 if (p < (unsigned long) high_memory) {
253 read = count;
254 if (count > (unsigned long) high_memory - p)
255 read = (unsigned long) high_memory - p;
257 #if defined(__sparc__) || defined(__mc68000__)
258 /* we don't have page 0 mapped on sparc and m68k.. */
259 if (p < PAGE_SIZE && read > 0) {
260 size_t tmp = PAGE_SIZE - p;
261 if (tmp > read) tmp = read;
262 if (clear_user(buf, tmp))
263 return -EFAULT;
264 buf += tmp;
265 p += tmp;
266 read -= tmp;
267 count -= tmp;
268 }
269 #endif
270 if (copy_to_user(buf, (char *)p, read))
271 return -EFAULT;
272 p += read;
273 buf += read;
274 count -= read;
275 }
277 if (count > 0) {
278 kbuf = (char *)__get_free_page(GFP_KERNEL);
279 if (!kbuf)
280 return -ENOMEM;
281 while (count > 0) {
282 int len = count;
284 if (len > PAGE_SIZE)
285 len = PAGE_SIZE;
286 len = vread(kbuf, (char *)p, len);
287 if (!len)
288 break;
289 if (copy_to_user(buf, kbuf, len)) {
290 free_page((unsigned long)kbuf);
291 return -EFAULT;
292 }
293 count -= len;
294 buf += len;
295 virtr += len;
296 p += len;
297 }
298 free_page((unsigned long)kbuf);
299 }
300 *ppos = p;
301 return virtr + read;
302 }
304 extern long vwrite(char *buf, char *addr, unsigned long count);
306 /*
307 * This function writes to the *virtual* memory as seen by the kernel.
308 */
309 static ssize_t write_kmem(struct file * file, const char * buf,
310 size_t count, loff_t *ppos)
311 {
312 unsigned long p = *ppos;
313 ssize_t wrote = 0;
314 ssize_t virtr = 0;
315 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
317 if (p < (unsigned long) high_memory) {
318 wrote = count;
319 if (count > (unsigned long) high_memory - p)
320 wrote = (unsigned long) high_memory - p;
322 wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
324 p += wrote;
325 buf += wrote;
326 count -= wrote;
327 }
329 if (count > 0) {
330 kbuf = (char *)__get_free_page(GFP_KERNEL);
331 if (!kbuf)
332 return -ENOMEM;
333 while (count > 0) {
334 int len = count;
336 if (len > PAGE_SIZE)
337 len = PAGE_SIZE;
338 if (len && copy_from_user(kbuf, buf, len)) {
339 free_page((unsigned long)kbuf);
340 return -EFAULT;
341 }
342 len = vwrite(kbuf, (char *)p, len);
343 count -= len;
344 buf += len;
345 virtr += len;
346 p += len;
347 }
348 free_page((unsigned long)kbuf);
349 }
351 *ppos = p;
352 return virtr + wrote;
353 }
355 #if defined(CONFIG_ISA) || !defined(__mc68000__)
356 static ssize_t read_port(struct file * file, char * buf,
357 size_t count, loff_t *ppos)
358 {
359 unsigned long i = *ppos;
360 char *tmp = buf;
362 if (verify_area(VERIFY_WRITE,buf,count))
363 return -EFAULT;
364 while (count-- > 0 && i < 65536) {
365 if (__put_user(inb(i),tmp) < 0)
366 return -EFAULT;
367 i++;
368 tmp++;
369 }
370 *ppos = i;
371 return tmp-buf;
372 }
374 static ssize_t write_port(struct file * file, const char * buf,
375 size_t count, loff_t *ppos)
376 {
377 unsigned long i = *ppos;
378 const char * tmp = buf;
380 if (verify_area(VERIFY_READ,buf,count))
381 return -EFAULT;
382 while (count-- > 0 && i < 65536) {
383 char c;
384 if (__get_user(c, tmp))
385 return -EFAULT;
386 outb(c,i);
387 i++;
388 tmp++;
389 }
390 *ppos = i;
391 return tmp-buf;
392 }
393 #endif
395 static ssize_t read_null(struct file * file, char * buf,
396 size_t count, loff_t *ppos)
397 {
398 return 0;
399 }
401 static ssize_t write_null(struct file * file, const char * buf,
402 size_t count, loff_t *ppos)
403 {
404 return count;
405 }
407 /*
408 * For fun, we are using the MMU for this.
409 */
410 static inline size_t read_zero_pagealigned(char * buf, size_t size)
411 {
412 struct mm_struct *mm;
413 struct vm_area_struct * vma;
414 unsigned long addr=(unsigned long)buf;
416 mm = current->mm;
417 /* Oops, this was forgotten before. -ben */
418 down_read(&mm->mmap_sem);
420 /* For private mappings, just map in zero pages. */
421 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
422 unsigned long count;
424 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
425 goto out_up;
426 if (vma->vm_flags & VM_SHARED)
427 break;
428 #if defined(CONFIG_XENO_PRIV)
429 if (vma->vm_flags & VM_IO)
430 break;
431 #endif
432 count = vma->vm_end - addr;
433 if (count > size)
434 count = size;
436 zap_page_range(mm, addr, count);
437 zeromap_page_range(addr, count, PAGE_COPY);
439 size -= count;
440 buf += count;
441 addr += count;
442 if (size == 0)
443 goto out_up;
444 }
446 up_read(&mm->mmap_sem);
448 /* The shared case is hard. Let's do the conventional zeroing. */
449 do {
450 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
451 if (unwritten)
452 return size + unwritten - PAGE_SIZE;
453 if (current->need_resched)
454 schedule();
455 buf += PAGE_SIZE;
456 size -= PAGE_SIZE;
457 } while (size);
459 return size;
460 out_up:
461 up_read(&mm->mmap_sem);
462 return size;
463 }
465 static ssize_t read_zero(struct file * file, char * buf,
466 size_t count, loff_t *ppos)
467 {
468 unsigned long left, unwritten, written = 0;
470 if (!count)
471 return 0;
473 if (!access_ok(VERIFY_WRITE, buf, count))
474 return -EFAULT;
476 left = count;
478 /* do we want to be clever? Arbitrary cut-off */
479 if (count >= PAGE_SIZE*4) {
480 unsigned long partial;
482 /* How much left of the page? */
483 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
484 unwritten = clear_user(buf, partial);
485 written = partial - unwritten;
486 if (unwritten)
487 goto out;
488 left -= partial;
489 buf += partial;
490 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
491 written += (left & PAGE_MASK) - unwritten;
492 if (unwritten)
493 goto out;
494 buf += left & PAGE_MASK;
495 left &= ~PAGE_MASK;
496 }
497 unwritten = clear_user(buf, left);
498 written += left - unwritten;
499 out:
500 return written ? written : -EFAULT;
501 }
503 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
504 {
505 if (vma->vm_flags & VM_SHARED)
506 return shmem_zero_setup(vma);
507 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
508 return -EAGAIN;
509 return 0;
510 }
512 static ssize_t write_full(struct file * file, const char * buf,
513 size_t count, loff_t *ppos)
514 {
515 return -ENOSPC;
516 }
518 /*
519 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
520 * can fopen() both devices with "a" now. This was previously impossible.
521 * -- SRB.
522 */
524 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
525 {
526 return file->f_pos = 0;
527 }
529 /*
530 * The memory devices use the full 32/64 bits of the offset, and so we cannot
531 * check against negative addresses: they are ok. The return value is weird,
532 * though, in that case (0).
533 *
534 * also note that seeking relative to the "end of file" isn't supported:
535 * it has no meaning, so it returns -EINVAL.
536 */
537 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
538 {
539 switch (orig) {
540 case 0:
541 file->f_pos = offset;
542 return file->f_pos;
543 case 1:
544 file->f_pos += offset;
545 return file->f_pos;
546 default:
547 return -EINVAL;
548 }
549 }
551 static int open_port(struct inode * inode, struct file * filp)
552 {
553 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
554 }
556 struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
557 {
558 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
559 unsigned long kaddr;
560 pgd_t *pgd;
561 pmd_t *pmd;
562 pte_t *ptep, pte;
563 struct page *page = NULL;
565 /* address is user VA; convert to kernel VA of desired page */
566 kaddr = (address - vma->vm_start) + offset;
567 kaddr = VMALLOC_VMADDR(kaddr);
569 spin_lock(&init_mm.page_table_lock);
571 /* Lookup page structure for kernel VA */
572 pgd = pgd_offset(&init_mm, kaddr);
573 if (pgd_none(*pgd) || pgd_bad(*pgd))
574 goto out;
575 pmd = pmd_offset(pgd, kaddr);
576 if (pmd_none(*pmd) || pmd_bad(*pmd))
577 goto out;
578 ptep = pte_offset(pmd, kaddr);
579 if (!ptep)
580 goto out;
581 pte = *ptep;
582 if (!pte_present(pte))
583 goto out;
584 if (write && !pte_write(pte))
585 goto out;
586 page = pte_page(pte);
587 if (!VALID_PAGE(page)) {
588 page = NULL;
589 goto out;
590 }
592 /* Increment reference count on page */
593 get_page(page);
595 out:
596 spin_unlock(&init_mm.page_table_lock);
598 return page;
599 }
601 struct vm_operations_struct kmem_vm_ops = {
602 nopage: kmem_vm_nopage,
603 };
605 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
606 {
607 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
608 unsigned long size = vma->vm_end - vma->vm_start;
610 #if defined(CONFIG_XENO)
611 return -ENXIO;
612 #endif
614 /*
615 * If the user is not attempting to mmap a high memory address then
616 * the standard mmap_mem mechanism will work. High memory addresses
617 * need special handling, as remap_page_range expects a physically-
618 * contiguous range of kernel addresses (such as obtained in kmalloc).
619 */
620 if ((offset + size) < (unsigned long) high_memory)
621 return mmap_mem(file, vma);
623 /*
624 * Accessing memory above the top the kernel knows about or
625 * through a file pointer that was marked O_SYNC will be
626 * done non-cached.
627 */
628 if (noncached_address(offset) || (file->f_flags & O_SYNC))
629 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
631 /* Don't do anything here; "nopage" will fill the holes */
632 vma->vm_ops = &kmem_vm_ops;
634 /* Don't try to swap out physical pages.. */
635 vma->vm_flags |= VM_RESERVED;
637 /*
638 * Don't dump addresses that are not real memory to a core file.
639 */
640 vma->vm_flags |= VM_IO;
642 return 0;
643 }
645 #define zero_lseek null_lseek
646 #define full_lseek null_lseek
647 #define write_zero write_null
648 #define read_full read_zero
649 #define open_mem open_port
650 #define open_kmem open_mem
652 static struct file_operations mem_fops = {
653 llseek: memory_lseek,
654 read: read_mem,
655 write: write_mem,
656 mmap: mmap_mem,
657 open: open_mem,
658 };
660 static struct file_operations kmem_fops = {
661 llseek: memory_lseek,
662 read: read_kmem,
663 write: write_kmem,
664 mmap: mmap_kmem,
665 open: open_kmem,
666 };
668 static struct file_operations null_fops = {
669 llseek: null_lseek,
670 read: read_null,
671 write: write_null,
672 };
674 #if defined(CONFIG_ISA) || !defined(__mc68000__)
675 static struct file_operations port_fops = {
676 llseek: memory_lseek,
677 read: read_port,
678 write: write_port,
679 open: open_port,
680 };
681 #endif
683 static struct file_operations zero_fops = {
684 llseek: zero_lseek,
685 read: read_zero,
686 write: write_zero,
687 mmap: mmap_zero,
688 };
690 static struct file_operations full_fops = {
691 llseek: full_lseek,
692 read: read_full,
693 write: write_full,
694 };
696 static int memory_open(struct inode * inode, struct file * filp)
697 {
698 switch (MINOR(inode->i_rdev)) {
699 case 1:
700 filp->f_op = &mem_fops;
701 break;
702 case 2:
703 filp->f_op = &kmem_fops;
704 break;
705 case 3:
706 filp->f_op = &null_fops;
707 break;
708 #if defined(CONFIG_ISA) || !defined(__mc68000__)
709 case 4:
710 #if defined(CONFIG_XENO)
711 #if defined(CONFIG_XENO_PRIV)
712 if (!(start_info.flags & SIF_PRIVILEGED))
713 #endif
714 return -ENXIO;
715 #endif
716 filp->f_op = &port_fops;
717 break;
718 #endif
719 case 5:
720 filp->f_op = &zero_fops;
721 break;
722 case 7:
723 filp->f_op = &full_fops;
724 break;
725 case 8:
726 filp->f_op = &random_fops;
727 break;
728 case 9:
729 filp->f_op = &urandom_fops;
730 break;
731 default:
732 return -ENXIO;
733 }
734 if (filp->f_op && filp->f_op->open)
735 return filp->f_op->open(inode,filp);
736 return 0;
737 }
739 void __init memory_devfs_register (void)
740 {
741 /* These are never unregistered */
742 static const struct {
743 unsigned short minor;
744 char *name;
745 umode_t mode;
746 struct file_operations *fops;
747 } list[] = { /* list of minor devices */
748 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
749 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
750 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
751 #if defined(CONFIG_ISA) || !defined(__mc68000__)
752 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
753 #endif
754 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
755 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
756 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
757 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}
758 };
759 int i;
761 for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
762 devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
763 MEM_MAJOR, list[i].minor,
764 list[i].mode | S_IFCHR,
765 list[i].fops, NULL);
766 }
768 static struct file_operations memory_fops = {
769 open: memory_open, /* just a selector for the real open */
770 };
772 int __init chr_dev_init(void)
773 {
774 if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
775 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
776 memory_devfs_register();
777 rand_initialize();
778 #ifdef CONFIG_I2C
779 i2c_init_all();
780 #endif
781 #if defined (CONFIG_FB)
782 fbmem_init();
783 #endif
784 #if defined (CONFIG_PROM_CONSOLE)
785 prom_con_init();
786 #endif
787 #if defined (CONFIG_MDA_CONSOLE)
788 mda_console_init();
789 #endif
790 tty_init();
791 #ifdef CONFIG_M68K_PRINTER
792 lp_m68k_init();
793 #endif
794 misc_init();
795 #if CONFIG_QIC02_TAPE
796 qic02_tape_init();
797 #endif
798 #ifdef CONFIG_FTAPE
799 ftape_init();
800 #endif
801 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
802 tapechar_init();
803 #endif
804 return 0;
805 }
807 __initcall(chr_dev_init);