debuggers.hg

view xen/include/asm-x86/p2m.h @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents e9277ab43947
children
line source
1 /******************************************************************************
2 * include/asm-x86/paging.h
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7 * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #ifndef _XEN_P2M_H
27 #define _XEN_P2M_H
29 #include <xen/config.h>
30 #include <xen/paging.h>
31 #include <asm/mem_sharing.h>
32 #include <asm/page.h> /* for pagetable_t */
34 /*
35 * The phys_to_machine_mapping maps guest physical frame numbers
36 * to machine frame numbers. It only exists for paging_mode_translate
37 * guests. It is organised in page-table format, which:
38 *
39 * (1) allows us to use it directly as the second pagetable in hardware-
40 * assisted paging and (hopefully) iommu support; and
41 * (2) lets us map it directly into the guest vcpus' virtual address space
42 * as a linear pagetable, so we can read and write it easily.
43 *
44 * For (2) we steal the address space that would have normally been used
45 * by the read-only MPT map in a non-translated guest. (For
46 * paging_mode_external() guests this mapping is in the monitor table.)
47 */
48 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
50 #ifdef __x86_64__
51 #define HAVE_GRANT_MAP_P2M
52 #endif
54 /*
55 * The upper levels of the p2m pagetable always contain full rights; all
56 * variation in the access control bits is made in the level-1 PTEs.
57 *
58 * In addition to the phys-to-machine translation, each p2m PTE contains
59 * *type* information about the gfn it translates, helping Xen to decide
60 * on the correct course of action when handling a page-fault to that
61 * guest frame. We store the type in the "available" bits of the PTEs
62 * in the table, which gives us 8 possible types on 32-bit systems.
63 * Further expansions of the type system will only be supported on
64 * 64-bit Xen.
65 */
66 typedef enum {
67 p2m_invalid = 0, /* Nothing mapped here */
68 p2m_ram_rw = 1, /* Normal read/write guest RAM */
69 p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */
70 p2m_ram_ro = 3, /* Read-only; writes are silently dropped */
71 p2m_mmio_dm = 4, /* Reads and write go to the device model */
72 p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */
73 p2m_populate_on_demand = 6, /* Place-holder for empty memory */
75 /* Note that these can only be used if HAVE_GRANT_MAP_P2M is
76 defined. They get defined anyway so as to avoid lots of
77 #ifdef's everywhere else. */
78 p2m_grant_map_rw = 7, /* Read/write grant mapping */
79 p2m_grant_map_ro = 8, /* Read-only grant mapping */
81 /* Likewise, although these are defined in all builds, they can only
82 * be used in 64-bit builds */
83 p2m_ram_paging_out = 9, /* Memory that is being paged out */
84 p2m_ram_paged = 10, /* Memory that has been paged out */
85 p2m_ram_paging_in = 11, /* Memory that is being paged in */
86 p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
87 p2m_ram_shared = 13, /* Shared or sharable memory */
88 p2m_ram_broken =14, /* Broken page, access cause domain crash */
89 } p2m_type_t;
91 /*
92 * Additional access types, which are used to further restrict
93 * the permissions given my the p2m_type_t memory type. Violations
94 * caused by p2m_access_t restrictions are sent to the mem_event
95 * interface.
96 *
97 * The access permissions are soft state: when any ambigious change of page
98 * type or use occurs, or when pages are flushed, swapped, or at any other
99 * convenient type, the access permissions can get reset to the p2m_domain
100 * default.
101 */
102 typedef enum {
103 p2m_access_n = 0, /* No access permissions allowed */
104 p2m_access_r = 1,
105 p2m_access_w = 2,
106 p2m_access_rw = 3,
107 p2m_access_x = 4,
108 p2m_access_rx = 5,
109 p2m_access_wx = 6,
110 p2m_access_rwx = 7,
111 p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
113 /* NOTE: Assumed to be only 4 bits right now */
114 } p2m_access_t;
116 typedef enum {
117 p2m_query = 0, /* Do not populate a PoD entries */
118 p2m_alloc = 1, /* Automatically populate PoD entries */
119 p2m_guest = 2, /* Guest demand-fault; implies alloc */
120 } p2m_query_t;
122 /* We use bitmaps and maks to handle groups of types */
123 #define p2m_to_mask(_t) (1UL << (_t))
125 /* RAM types, which map to real machine frames */
126 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
127 | p2m_to_mask(p2m_ram_logdirty) \
128 | p2m_to_mask(p2m_ram_ro) \
129 | p2m_to_mask(p2m_ram_paging_out) \
130 | p2m_to_mask(p2m_ram_paged) \
131 | p2m_to_mask(p2m_ram_paging_in_start) \
132 | p2m_to_mask(p2m_ram_paging_in) \
133 | p2m_to_mask(p2m_ram_shared))
135 /* Grant mapping types, which map to a real machine frame in another
136 * VM */
137 #define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) \
138 | p2m_to_mask(p2m_grant_map_ro) )
140 /* MMIO types, which don't have to map to anything in the frametable */
141 #define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \
142 | p2m_to_mask(p2m_mmio_direct))
144 /* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */
145 #define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \
146 | p2m_to_mask(p2m_ram_ro) \
147 | p2m_to_mask(p2m_grant_map_ro) \
148 | p2m_to_mask(p2m_ram_shared) )
150 #define P2M_MAGIC_TYPES (p2m_to_mask(p2m_populate_on_demand))
152 /* Pageable types */
153 #define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw))
155 #define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out) \
156 | p2m_to_mask(p2m_ram_paged) \
157 | p2m_to_mask(p2m_ram_paging_in_start) \
158 | p2m_to_mask(p2m_ram_paging_in))
160 #define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
162 /* Shared types */
163 /* XXX: Sharable types could include p2m_ram_ro too, but we would need to
164 * reinit the type correctly after fault */
165 #define P2M_SHARABLE_TYPES (p2m_to_mask(p2m_ram_rw))
166 #define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared))
167 #define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken))
169 /* Useful predicates */
170 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
171 #define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
172 #define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES)
173 #define p2m_is_magic(_t) (p2m_to_mask(_t) & P2M_MAGIC_TYPES)
174 #define p2m_is_grant(_t) (p2m_to_mask(_t) & P2M_GRANT_TYPES)
175 /* Grant types are *not* considered valid, because they can be
176 unmapped at any time and, unless you happen to be the shadow or p2m
177 implementations, there's no way of synchronising against that. */
178 #define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
179 #define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct)))
180 #define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
181 #define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES)
182 #define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES)
183 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
184 #define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
185 #define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
187 /* Populate-on-demand */
188 #define POPULATE_ON_DEMAND_MFN (1<<9)
189 #define POD_PAGE_ORDER 9
191 #define PAGING_MFN INVALID_MFN
193 struct p2m_domain {
194 /* Lock that protects updates to the p2m */
195 spinlock_t lock;
196 int locker; /* processor which holds the lock */
197 const char *locker_function; /* Func that took it */
199 /* Shadow translated domain: p2m mapping */
200 pagetable_t phys_table;
202 struct domain *domain; /* back pointer to domain */
204 /* Pages used to construct the p2m */
205 struct page_list_head pages;
207 int (*set_entry )(struct p2m_domain *p2m,
208 unsigned long gfn,
209 mfn_t mfn, unsigned int page_order,
210 p2m_type_t p2mt,
211 p2m_access_t p2ma);
212 mfn_t (*get_entry )(struct p2m_domain *p2m,
213 unsigned long gfn,
214 p2m_type_t *p2mt,
215 p2m_access_t *p2ma,
216 p2m_query_t q);
217 mfn_t (*get_entry_current)(struct p2m_domain *p2m,
218 unsigned long gfn,
219 p2m_type_t *p2mt,
220 p2m_access_t *p2ma,
221 p2m_query_t q);
222 void (*change_entry_type_global)(struct p2m_domain *p2m,
223 p2m_type_t ot,
224 p2m_type_t nt);
226 /* Default P2M access type for each page in the the domain: new pages,
227 * swapped in pages, cleared pages, and pages that are ambiquously
228 * retyped get this access type. See definition of p2m_access_t. */
229 p2m_access_t default_access;
231 /* If true, and an access fault comes in and there is no mem_event listener,
232 * pause domain. Otherwise, remove access restrictions. */
233 bool_t access_required;
235 /* Highest guest frame that's ever been mapped in the p2m */
236 unsigned long max_mapped_pfn;
238 /* Populate-on-demand variables
239 * NB on locking. {super,single,count} are
240 * covered by d->page_alloc_lock, since they're almost always used in
241 * conjunction with that functionality. {entry_count} is covered by
242 * the domain p2m lock, since it's almost always used in conjunction
243 * with changing the p2m tables.
244 *
245 * At this point, both locks are held in two places. In both,
246 * the order is [p2m,page_alloc]:
247 * + p2m_pod_decrease_reservation() calls p2m_pod_cache_add(),
248 * which grabs page_alloc
249 * + p2m_pod_demand_populate() grabs both; the p2m lock to avoid
250 * double-demand-populating of pages, the page_alloc lock to
251 * protect moving stuff from the PoD cache to the domain page list.
252 */
253 struct {
254 struct page_list_head super, /* List of superpages */
255 single; /* Non-super lists */
256 int count, /* # of pages in cache lists */
257 entry_count; /* # of pages in p2m marked pod */
258 unsigned reclaim_super; /* Last gpfn of a scan */
259 unsigned reclaim_single; /* Last gpfn of a scan */
260 unsigned max_guest; /* gpfn of max guest demand-populate */
261 } pod;
262 };
264 /* get host p2m table */
265 #define p2m_get_hostp2m(d) ((d)->arch.p2m)
267 #define p2m_get_pagetable(p2m) ((p2m)->phys_table)
269 /*
270 * The P2M lock. This protects all updates to the p2m table.
271 * Updates are expected to be safe against concurrent reads,
272 * which do *not* require the lock.
273 *
274 * Locking discipline: always acquire this lock before the shadow or HAP one
275 */
277 #define p2m_lock_init(_p2m) \
278 do { \
279 spin_lock_init(&(_p2m)->lock); \
280 (_p2m)->locker = -1; \
281 (_p2m)->locker_function = "nobody"; \
282 } while (0)
284 #define p2m_lock(_p2m) \
285 do { \
286 if ( unlikely((_p2m)->locker == current->processor) ) \
287 { \
288 printk("Error: p2m lock held by %s\n", \
289 (_p2m)->locker_function); \
290 BUG(); \
291 } \
292 spin_lock(&(_p2m)->lock); \
293 ASSERT((_p2m)->locker == -1); \
294 (_p2m)->locker = current->processor; \
295 (_p2m)->locker_function = __func__; \
296 } while (0)
298 #define p2m_unlock(_p2m) \
299 do { \
300 ASSERT((_p2m)->locker == current->processor); \
301 (_p2m)->locker = -1; \
302 (_p2m)->locker_function = "nobody"; \
303 spin_unlock(&(_p2m)->lock); \
304 } while (0)
306 #define p2m_locked_by_me(_p2m) \
307 (current->processor == (_p2m)->locker)
310 /* Extract the type from the PTE flags that store it */
311 static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
312 {
313 /* Type is stored in the "available" bits */
314 #ifdef __x86_64__
315 return (flags >> 9) & 0x3fff;
316 #else
317 return (flags >> 9) & 0x7;
318 #endif
319 }
321 /* Read the current domain's p2m table. Do not populate PoD pages. */
322 static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
323 unsigned long gfn, p2m_type_t *t,
324 p2m_access_t *a,
325 p2m_query_t q)
326 {
327 return p2m->get_entry_current(p2m, gfn, t, a, q);
328 }
330 /* Read P2M table, mapping pages as we go.
331 * Do not populate PoD pages. */
332 static inline mfn_t
333 gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
334 p2m_type_t *t, p2m_query_t q)
335 {
336 p2m_access_t a = 0;
337 return p2m->get_entry(p2m, gfn, t, &a, q);
338 }
341 /* General conversion function from gfn to mfn */
342 static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m,
343 unsigned long gfn, p2m_type_t *t,
344 p2m_query_t q)
345 {
346 mfn_t mfn;
347 p2m_access_t a;
349 if ( !p2m || !paging_mode_translate(p2m->domain) )
350 {
351 /* Not necessarily true, but for non-translated guests, we claim
352 * it's the most generic kind of memory */
353 *t = p2m_ram_rw;
354 mfn = _mfn(gfn);
355 }
356 else if ( likely(current->domain == p2m->domain) )
357 mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q);
358 else
359 mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);
361 #ifdef __x86_64__
362 if (unlikely((p2m_is_broken(*t))))
363 {
364 /* Return invalid_mfn to avoid caller's access */
365 mfn = _mfn(INVALID_MFN);
366 if (q == p2m_guest)
367 domain_crash(p2m->domain);
368 }
369 #endif
371 return mfn;
372 }
374 #define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc)
375 #define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_query)
376 #define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_guest)
378 static inline mfn_t gfn_to_mfn_unshare(struct p2m_domain *p2m,
379 unsigned long gfn,
380 p2m_type_t *p2mt,
381 int must_succeed)
382 {
383 mfn_t mfn;
385 mfn = gfn_to_mfn(p2m, gfn, p2mt);
386 #ifdef __x86_64__
387 if ( p2m_is_shared(*p2mt) )
388 {
389 if ( mem_sharing_unshare_page(p2m, gfn,
390 must_succeed
391 ? MEM_SHARING_MUST_SUCCEED : 0) )
392 {
393 BUG_ON(must_succeed);
394 return mfn;
395 }
396 mfn = gfn_to_mfn(p2m, gfn, p2mt);
397 }
398 #endif
400 return mfn;
401 }
404 /* Compatibility function exporting the old untyped interface */
405 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
406 {
407 mfn_t mfn;
408 p2m_type_t t;
409 mfn = gfn_to_mfn(d->arch.p2m, gpfn, &t);
410 if ( p2m_is_valid(t) )
411 return mfn_x(mfn);
412 return INVALID_MFN;
413 }
415 /* General conversion function from mfn to gfn */
416 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
417 {
418 if ( paging_mode_translate(d) )
419 return get_gpfn_from_mfn(mfn_x(mfn));
420 else
421 return mfn_x(mfn);
422 }
424 /* Init the datastructures for later use by the p2m code */
425 int p2m_init(struct domain *d);
427 /* Allocate a new p2m table for a domain.
428 *
429 * Returns 0 for success or -errno. */
430 int p2m_alloc_table(struct p2m_domain *p2m);
432 /* Return all the p2m resources to Xen. */
433 void p2m_teardown(struct p2m_domain *p2m);
434 void p2m_final_teardown(struct domain *d);
436 /* Dump PoD information about the domain */
437 void p2m_pod_dump_data(struct p2m_domain *p2m);
439 /* Move all pages from the populate-on-demand cache to the domain page_list
440 * (usually in preparation for domain destruction) */
441 void p2m_pod_empty_cache(struct domain *d);
443 /* Set populate-on-demand cache size so that the total memory allocated to a
444 * domain matches target */
445 int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
447 /* Call when decreasing memory reservation to handle PoD entries properly.
448 * Will return '1' if all entries were handled and nothing more need be done.*/
449 int
450 p2m_pod_decrease_reservation(struct domain *d,
451 xen_pfn_t gpfn,
452 unsigned int order);
454 /* Called by p2m code when demand-populating a PoD page */
455 int
456 p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
457 unsigned int order,
458 p2m_query_t q);
460 /* Add a page to a domain's p2m table */
461 int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
462 unsigned long mfn, unsigned int page_order,
463 p2m_type_t t);
465 /* Remove a page from a domain's p2m table */
466 void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
467 unsigned long mfn, unsigned int page_order);
469 /* Set a p2m range as populate-on-demand */
470 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
471 unsigned int order);
473 /* Untyped version for RAM only, for compatibility
474 *
475 * Return 0 for success
476 */
477 static inline int guest_physmap_add_page(struct domain *d,
478 unsigned long gfn,
479 unsigned long mfn,
480 unsigned int page_order)
481 {
482 return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, p2m_ram_rw);
483 }
485 /* Remove a page from a domain's p2m table */
486 static inline void guest_physmap_remove_page(struct domain *d,
487 unsigned long gfn,
488 unsigned long mfn, unsigned int page_order)
489 {
490 guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
491 }
493 /* Change types across all p2m entries in a domain */
494 void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
495 void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
497 /* Compare-exchange the type of a single p2m entry */
498 p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
499 p2m_type_t ot, p2m_type_t nt);
501 /* Set mmio addresses in the p2m table (for pass-through) */
502 int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
503 int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
506 #ifdef __x86_64__
507 /* Modify p2m table for shared gfn */
508 int set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
510 /* Check if a nominated gfn is valid to be paged out */
511 int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
512 /* Evict a frame */
513 int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
514 /* Tell xenpaging to drop a paged out frame */
515 void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
516 /* Start populating a paged out frame */
517 void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
518 /* Prepare the p2m for paging a frame in */
519 int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn);
520 /* Resume normal operation (in case a domain was paused) */
521 void p2m_mem_paging_resume(struct p2m_domain *p2m);
522 #else
523 static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
524 { }
525 static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
526 { }
527 #endif
529 #ifdef __x86_64__
530 /* Send mem event based on the access (gla is -1ull if not available). Handles
531 * the rw2rx conversion */
532 void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
533 bool_t access_r, bool_t access_w, bool_t access_x);
534 /* Resumes the running of the VCPU, restarting the last instruction */
535 void p2m_mem_access_resume(struct p2m_domain *p2m);
536 #else
537 static inline void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
538 unsigned long gla, bool_t access_r,
539 bool_t access_w, bool_t access_x)
540 { }
541 #endif
543 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
544 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
546 #endif /* _XEN_P2M_H */
548 /*
549 * Local variables:
550 * mode: C
551 * c-set-style: "BSD"
552 * c-basic-offset: 4
553 * indent-tabs-mode: nil
554 * End:
555 */