debuggers.hg
changeset 16554:aa430556d33f
Merge with ia64.
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Tue Dec 04 11:06:05 2007 +0000 (2007-12-04) |
parents | 32ec5dbe2978 0c234da66b4a |
children | 62717554d4cb |
files |
line diff
1.1 --- a/Makefile Fri Nov 30 08:54:33 2007 -0700 1.2 +++ b/Makefile Tue Dec 04 11:06:05 2007 +0000 1.3 @@ -211,7 +211,7 @@ linux26: 1.4 # tboot targets 1.5 # 1.6 1.7 -TBOOT_TARFILE = tboot-20071029.tar.gz 1.8 +TBOOT_TARFILE = tboot-20071128.tar.gz 1.9 TBOOT_BASE_URL = http://downloads.sourceforge.net/tboot 1.10 1.11 .PHONY: build-tboot 1.12 @@ -222,6 +222,10 @@ build-tboot: download_tboot 1.13 install-tboot: download_tboot 1.14 $(MAKE) -C tboot install 1.15 1.16 +.PHONY: dist-tboot 1.17 +dist-tboot: download_tboot 1.18 + $(MAKE) DESTDIR=$(DISTDIR)/install -C tboot dist 1.19 + 1.20 .PHONY: clean-tboot 1.21 clean-tboot: 1.22 [ ! -d tboot ] || $(MAKE) -C tboot clean
2.1 --- a/extras/mini-os/arch/x86/arch.mk Fri Nov 30 08:54:33 2007 -0700 2.2 +++ b/extras/mini-os/arch/x86/arch.mk Tue Dec 04 11:06:05 2007 +0000 2.3 @@ -17,7 +17,7 @@ endif 2.4 endif 2.5 2.6 ifeq ($(TARGET_ARCH),x86_64) 2.7 -ARCH_CFLAGS := -m64 -mno-red-zone -fpic -fno-reorder-blocks 2.8 +ARCH_CFLAGS := -m64 -mno-red-zone -fno-reorder-blocks 2.9 ARCH_CFLAGS += -fno-asynchronous-unwind-tables 2.10 ARCH_ASFLAGS := -m64 2.11 ARCH_LDFLAGS := -m elf_x86_64
3.1 --- a/tools/blktap/drivers/blktapctrl.c Fri Nov 30 08:54:33 2007 -0700 3.2 +++ b/tools/blktap/drivers/blktapctrl.c Tue Dec 04 11:06:05 2007 +0000 3.3 @@ -41,7 +41,6 @@ 3.4 #include <err.h> 3.5 #include <errno.h> 3.6 #include <sys/types.h> 3.7 -#include <linux/types.h> 3.8 #include <sys/wait.h> 3.9 #include <signal.h> 3.10 #include <fcntl.h> 3.11 @@ -50,7 +49,6 @@ 3.12 #include <string.h> 3.13 #include <unistd.h> 3.14 #include <xs.h> 3.15 -#include <printf.h> 3.16 #include <sys/time.h> 3.17 #include <syslog.h> 3.18
4.1 --- a/tools/blktap/drivers/bswap.h Fri Nov 30 08:54:33 2007 -0700 4.2 +++ b/tools/blktap/drivers/bswap.h Tue Dec 04 11:06:05 2007 +0000 4.3 @@ -5,6 +5,11 @@ 4.4 4.5 #include <inttypes.h> 4.6 4.7 +#if defined(__NetBSD__) || defined(__OpenBSD__) 4.8 +#include <sys/endian.h> 4.9 +#include <sys/types.h> 4.10 +#else 4.11 + 4.12 #ifdef HAVE_BYTESWAP_H 4.13 #include <byteswap.h> 4.14 #else 4.15 @@ -73,6 +78,8 @@ static inline void bswap64s(uint64_t *s) 4.16 *s = bswap64(*s); 4.17 } 4.18 4.19 +#endif 4.20 + 4.21 #if defined(WORDS_BIGENDIAN) 4.22 #define be_bswap(v, size) (v) 4.23 #define le_bswap(v, size) bswap ## size(v)
5.1 --- a/tools/blktap/drivers/qcow-create.c Fri Nov 30 08:54:33 2007 -0700 5.2 +++ b/tools/blktap/drivers/qcow-create.c Tue Dec 04 11:06:05 2007 +0000 5.3 @@ -37,7 +37,6 @@ 5.4 #include <sys/statvfs.h> 5.5 #include <sys/stat.h> 5.6 #include <sys/ioctl.h> 5.7 -#include <linux/fs.h> 5.8 #include <string.h> 5.9 #include "tapdisk.h" 5.10
6.1 --- a/tools/blktap/drivers/tapdisk.c Fri Nov 30 08:54:33 2007 -0700 6.2 +++ b/tools/blktap/drivers/tapdisk.c Tue Dec 04 11:06:05 2007 +0000 6.3 @@ -27,7 +27,6 @@ 6.4 #include <poll.h> 6.5 #include <sys/statvfs.h> 6.6 #include <sys/ioctl.h> 6.7 -#include <linux/fs.h> 6.8 #include "blktaplib.h" 6.9 #include "tapdisk.h" 6.10
7.1 --- a/tools/blktap/lib/xenbus.c Fri Nov 30 08:54:33 2007 -0700 7.2 +++ b/tools/blktap/lib/xenbus.c Tue Dec 04 11:06:05 2007 +0000 7.3 @@ -37,7 +37,6 @@ 7.4 7.5 #include <stdio.h> 7.6 #include <stdlib.h> 7.7 -#include <printf.h> 7.8 #include <string.h> 7.9 #include <err.h> 7.10 #include <stdarg.h>
8.1 --- a/tools/blktap/lib/xs_api.c Fri Nov 30 08:54:33 2007 -0700 8.2 +++ b/tools/blktap/lib/xs_api.c Tue Dec 04 11:06:05 2007 +0000 8.3 @@ -34,7 +34,6 @@ 8.4 8.5 #include <stdio.h> 8.6 #include <stdlib.h> 8.7 -#include <printf.h> 8.8 #include <string.h> 8.9 #include <err.h> 8.10 #include <stdarg.h>
9.1 --- a/tools/libxc/xc_dom_core.c Fri Nov 30 08:54:33 2007 -0700 9.2 +++ b/tools/libxc/xc_dom_core.c Tue Dec 04 11:06:05 2007 +0000 9.3 @@ -352,15 +352,19 @@ void *xc_dom_pfn_to_ptr(struct xc_dom_im 9.4 } 9.5 else 9.6 { 9.7 + int err; 9.8 + 9.9 mode = "anonymous memory"; 9.10 phys->ptr = mmap(NULL, phys->count << page_shift, 9.11 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 9.12 -1, 0); 9.13 if ( phys->ptr == MAP_FAILED ) 9.14 { 9.15 + err = errno; 9.16 xc_dom_panic(XC_OUT_OF_MEMORY, 9.17 - "%s: oom: can't allocate 0x%" PRIpfn " pages\n", 9.18 - __FUNCTION__, count); 9.19 + "%s: oom: can't allocate 0x%" PRIpfn " pages" 9.20 + " [mmap, errno=%i (%s)]\n", 9.21 + __FUNCTION__, count, err, strerror(err)); 9.22 return NULL; 9.23 } 9.24 dom->alloc_mem_map += phys->count << page_shift;
10.1 --- a/tools/python/xen/xend/XendNetwork.py Fri Nov 30 08:54:33 2007 -0700 10.2 +++ b/tools/python/xen/xend/XendNetwork.py Tue Dec 04 11:06:05 2007 +0000 10.3 @@ -65,7 +65,7 @@ class XendNetwork(XendBase): 10.4 return XendBase.getMethods() + methods 10.5 10.6 def getFuncs(self): 10.7 - funcs = ['create'] 10.8 + funcs = ['create', 'get_by_name_label'] 10.9 return XendBase.getFuncs() + funcs 10.10 10.11 getClass = classmethod(getClass) 10.12 @@ -133,9 +133,15 @@ class XendNetwork(XendBase): 10.13 10.14 return uuid 10.15 10.16 - create_phy = classmethod(create_phy) 10.17 - recreate = classmethod(recreate) 10.18 - create = classmethod(create) 10.19 + def get_by_name_label(cls, name): 10.20 + return [inst.get_uuid() 10.21 + for inst in XendAPIStore.get_all(cls.getClass()) 10.22 + if inst.get_name_label() == name] 10.23 + 10.24 + create_phy = classmethod(create_phy) 10.25 + recreate = classmethod(recreate) 10.26 + create = classmethod(create) 10.27 + get_by_name_label = classmethod(get_by_name_label) 10.28 10.29 def __init__(self, record, uuid): 10.30 XendBase.__init__(self, uuid, record)
11.1 --- a/tools/vnet/libxutil/Makefile Fri Nov 30 08:54:33 2007 -0700 11.2 +++ b/tools/vnet/libxutil/Makefile Tue Dec 04 11:06:05 2007 +0000 11.3 @@ -24,7 +24,7 @@ LIB_SRCS += util.c 11.4 LIB_OBJS := $(LIB_SRCS:.c=.o) 11.5 PIC_OBJS := $(LIB_SRCS:.c=.opic) 11.6 11.7 -CFLAGS += -Werror -fno-strict-aliasing 11.8 +CFLAGS += -Werror -fno-strict-aliasing $(call cc-option,$(CC),-fgnu89-inline,) 11.9 CFLAGS += -O3 11.10 #CFLAGS += -g 11.11
12.1 --- a/tools/vnet/vnet-module/Makefile.ver Fri Nov 30 08:54:33 2007 -0700 12.2 +++ b/tools/vnet/vnet-module/Makefile.ver Tue Dec 04 11:06:05 2007 +0000 12.3 @@ -20,14 +20,14 @@ 12.4 12.5 LINUX_SERIES?=2.6 12.6 12.7 -LINUX_VERSION?=$(shell (/bin/ls -d $(XEN_ROOT)/pristine-linux-$(LINUX_SERIES).* 2>/dev/null) | \ 12.8 - sed -e 's!^.*linux-\(.\+\)!\1!' ) 12.9 +LINUX_VERSION?=$(shell (/bin/ls -d $(XEN_ROOT)/linux-$(LINUX_SERIES).* 2>/dev/null) | \ 12.10 + sed -e 's!^.*linux-\(.\+\).hg!\1!' ) 12.11 12.12 ifeq ($(LINUX_VERSION),) 12.13 $(error Kernel source for linux $(LINUX_SERIES) not found) 12.14 endif 12.15 12.16 -KERNEL_VERSION?=$(shell (/bin/ls -d $(XEN_ROOT)/build-linux-$(LINUX_VERSION)-xen* 2>/dev/null) | \ 12.17 +KERNEL_VERSION?=$(shell (/bin/ls -d $(XEN_ROOT)/build-linux-$(LINUX_VERSION)* 2>/dev/null) | \ 12.18 grep -v -m 1 -e '-xenU' | \ 12.19 sed -e 's!^.*linux-\(.\+\)!\1!' ) 12.20
13.1 --- a/tools/vnet/vnet-module/sa.c Fri Nov 30 08:54:33 2007 -0700 13.2 +++ b/tools/vnet/vnet-module/sa.c Tue Dec 04 11:06:05 2007 +0000 13.3 @@ -16,7 +16,6 @@ 13.4 * 59 Temple Place, suite 330, Boston, MA 02111-1307 USA 13.5 * 13.6 */ 13.7 -#include <linux/config.h> 13.8 #include <linux/kernel.h> 13.9 13.10 #include <tunnel.h>
14.1 --- a/tools/vnet/vnet-module/varp_socket.c Fri Nov 30 08:54:33 2007 -0700 14.2 +++ b/tools/vnet/vnet-module/varp_socket.c Tue Dec 04 11:06:05 2007 +0000 14.3 @@ -77,9 +77,10 @@ static inline _syscall3(int, fcntl, 14.4 * Some architectures use socketcall() to multiplex the socket-related calls, 14.5 * but others define individual syscalls instead. 14.6 * Architectures using socketcall() define __ARCH_WANT_SYS_SOCKETCALL. 14.7 + * NB. x86_64 architecture asserts __ARCH_WANT_SYS_SOCKETCALL in error. 14.8 */ 14.9 14.10 -#ifdef __ARCH_WANT_SYS_SOCKETCALL 14.11 +#if defined(__ARCH_WANT_SYS_SOCKETCALL) && !defined(__x86_64__) 14.12 14.13 /* Define the socketcall() syscall. 14.14 * Multiplexes all the socket-related calls.
15.1 --- a/tools/xenmon/xenmon.py Fri Nov 30 08:54:33 2007 -0700 15.2 +++ b/tools/xenmon/xenmon.py Tue Dec 04 11:06:05 2007 +0000 15.3 @@ -653,21 +653,36 @@ def writelog(): 15.4 # start xenbaked 15.5 def start_xenbaked(): 15.6 global options 15.7 - 15.8 - os.system("killall -9 xenbaked") 15.9 - # assumes that xenbaked is in your path 15.10 - os.system("xenbaked --ms_per_sample=%d &" % 15.11 + global kill_cmd 15.12 + global xenbaked_cmd 15.13 + 15.14 + os.system(kill_cmd) 15.15 + os.system(xenbaked_cmd + " --ms_per_sample=%d &" % 15.16 options.mspersample) 15.17 time.sleep(1) 15.18 15.19 # stop xenbaked 15.20 def stop_xenbaked(): 15.21 - os.system("killall -s INT xenbaked") 15.22 + global stop_cmd 15.23 + os.system(stop_cmd) 15.24 15.25 def main(): 15.26 global options 15.27 global args 15.28 global domains 15.29 + global stop_cmd 15.30 + global kill_cmd 15.31 + global xenbaked_cmd 15.32 + 15.33 + if os.uname()[0] == "SunOS": 15.34 + xenbaked_cmd = "/usr/lib/xenbaked" 15.35 + stop_cmd = "/usr/bin/pkill -INT -z global xenbaked" 15.36 + kill_cmd = "/usr/bin/pkill -KILL -z global xenbaked" 15.37 + else: 15.38 + # assumes that xenbaked is in your path 15.39 + xenbaked_cmd = "xenbaked" 15.40 + stop_cmd = "/usr/bin/pkill -INT xenbaked" 15.41 + kill_cmd = "/usr/bin/pkill -KILL xenbaked" 15.42 15.43 parser = setup_cmdline_parser() 15.44 (options, args) = parser.parse_args()
16.1 --- a/tools/xentrace/xentrace.c Fri Nov 30 08:54:33 2007 -0700 16.2 +++ b/tools/xentrace/xentrace.c Tue Dec 04 11:06:05 2007 +0000 16.3 @@ -572,24 +572,24 @@ int main(int argc, char **argv) 16.4 16.5 parse_args(argc, argv); 16.6 16.7 - if (opts.evt_mask != 0) { 16.8 + if ( opts.evt_mask != 0 ) 16.9 set_mask(opts.evt_mask, 0); 16.10 - } 16.11 16.12 - if (opts.cpu_mask != 0) { 16.13 + if ( opts.cpu_mask != 0 ) 16.14 set_mask(opts.cpu_mask, 1); 16.15 - } 16.16 16.17 if ( opts.outfile ) 16.18 - outfd = open(opts.outfile, O_WRONLY | O_CREAT | O_LARGEFILE, 0644); 16.19 + outfd = open(opts.outfile, 16.20 + O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 16.21 + 0644); 16.22 16.23 - if(outfd < 0) 16.24 + if ( outfd < 0 ) 16.25 { 16.26 perror("Could not open output file"); 16.27 exit(EXIT_FAILURE); 16.28 } 16.29 16.30 - if(isatty(outfd)) 16.31 + if ( isatty(outfd) ) 16.32 { 16.33 fprintf(stderr, "Cannot output to a TTY, specify a log file.\n"); 16.34 exit(EXIT_FAILURE);
17.1 --- a/xen/arch/x86/domctl.c Fri Nov 30 08:54:33 2007 -0700 17.2 +++ b/xen/arch/x86/domctl.c Tue Dec 04 11:06:05 2007 +0000 17.3 @@ -546,7 +546,12 @@ long arch_do_domctl( 17.4 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff; 17.5 17.6 if ( device_assigned(bus, devfn) ) 17.7 + { 17.8 + gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: " 17.9 + "%x:%x:%x already assigned\n", 17.10 + bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 17.11 break; 17.12 + } 17.13 17.14 ret = assign_device(d, bus, devfn); 17.15 gdprintk(XENLOG_INFO, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
18.1 --- a/xen/arch/x86/hvm/vmx/vtd/dmar.c Fri Nov 30 08:54:33 2007 -0700 18.2 +++ b/xen/arch/x86/hvm/vmx/vtd/dmar.c Tue Dec 04 11:06:05 2007 +0000 18.3 @@ -553,5 +553,7 @@ int acpi_dmar_init(void) 18.4 return -ENODEV; 18.5 } 18.6 18.7 + printk("Intel VT-d has been enabled\n"); 18.8 + 18.9 return 0; 18.10 }
19.1 --- a/xen/arch/x86/mm.c Fri Nov 30 08:54:33 2007 -0700 19.2 +++ b/xen/arch/x86/mm.c Tue Dec 04 11:06:05 2007 +0000 19.3 @@ -620,6 +620,7 @@ get_page_from_l1e( 19.4 unsigned long mfn = l1e_get_pfn(l1e); 19.5 struct page_info *page = mfn_to_page(mfn); 19.6 uint32_t l1f = l1e_get_flags(l1e); 19.7 + struct vcpu *curr = current; 19.8 int okay; 19.9 19.10 if ( !(l1f & _PAGE_PRESENT) ) 19.11 @@ -635,7 +636,7 @@ get_page_from_l1e( 19.12 { 19.13 /* DOMID_IO reverts to caller for privilege checks. */ 19.14 if ( d == dom_io ) 19.15 - d = current->domain; 19.16 + d = curr->domain; 19.17 19.18 if ( !iomem_access_permitted(d, mfn, mfn) ) 19.19 { 19.20 @@ -653,7 +654,7 @@ get_page_from_l1e( 19.21 * qemu-dm helper process in dom0 to map the domain's memory without 19.22 * messing up the count of "real" writable mappings.) */ 19.23 okay = (((l1f & _PAGE_RW) && 19.24 - !(unlikely(paging_mode_external(d) && (d != current->domain)))) 19.25 + !(unlikely(paging_mode_external(d) && (d != curr->domain)))) 19.26 ? get_page_and_type(page, d, PGT_writable_page) 19.27 : get_page(page, d)); 19.28 if ( !okay ) 19.29 @@ -673,7 +674,7 @@ get_page_from_l1e( 19.30 { 19.31 if ( (l1f & _PAGE_RW) && 19.32 !(unlikely(paging_mode_external(d) && 19.33 - (d != current->domain))) ) 19.34 + (d != curr->domain))) ) 19.35 put_page_type(page); 19.36 put_page(page); 19.37 MEM_LOG("Attempt to change cache attributes of Xen heap page"); 19.38 @@ -1384,14 +1385,15 @@ static int mod_l1_entry(l1_pgentry_t *pl 19.39 unsigned long gl1mfn) 19.40 { 19.41 l1_pgentry_t ol1e; 19.42 - struct domain *d = current->domain; 19.43 + struct vcpu *curr = current; 19.44 + struct domain *d = curr->domain; 19.45 unsigned long mfn; 19.46 19.47 if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ) 19.48 return 0; 19.49 19.50 if ( unlikely(paging_mode_refcounts(d)) ) 19.51 - return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current); 19.52 + return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr); 19.53 19.54 if ( l1e_get_flags(nl1e) & _PAGE_PRESENT ) 19.55 { 19.56 @@ -1413,12 +1415,12 @@ static int mod_l1_entry(l1_pgentry_t *pl 19.57 19.58 /* Fast path for identical mapping, r/w and presence. */ 19.59 if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) ) 19.60 - return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current); 19.61 + return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr); 19.62 19.63 if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) ) 19.64 return 0; 19.65 19.66 - if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current)) ) 19.67 + if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr)) ) 19.68 { 19.69 put_page_from_l1e(nl1e, d); 19.70 return 0; 19.71 @@ -1426,7 +1428,7 @@ static int mod_l1_entry(l1_pgentry_t *pl 19.72 } 19.73 else 19.74 { 19.75 - if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current)) ) 19.76 + if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr)) ) 19.77 return 0; 19.78 } 19.79 19.80 @@ -1442,7 +1444,8 @@ static int mod_l2_entry(l2_pgentry_t *pl 19.81 unsigned long type) 19.82 { 19.83 l2_pgentry_t ol2e; 19.84 - struct domain *d = current->domain; 19.85 + struct vcpu *curr = current; 19.86 + struct domain *d = curr->domain; 19.87 19.88 if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) ) 19.89 { 19.90 @@ -1466,18 +1469,18 @@ static int mod_l2_entry(l2_pgentry_t *pl 19.91 19.92 /* Fast path for identical mapping and presence. */ 19.93 if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT)) 19.94 - return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, current); 19.95 + return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr); 19.96 19.97 if ( unlikely(!get_page_from_l2e(nl2e, pfn, d)) ) 19.98 return 0; 19.99 19.100 - if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, current)) ) 19.101 + if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr)) ) 19.102 { 19.103 put_page_from_l2e(nl2e, pfn); 19.104 return 0; 19.105 } 19.106 } 19.107 - else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, current)) ) 19.108 + else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr)) ) 19.109 { 19.110 return 0; 19.111 } 19.112 @@ -1494,7 +1497,8 @@ static int mod_l3_entry(l3_pgentry_t *pl 19.113 unsigned long pfn) 19.114 { 19.115 l3_pgentry_t ol3e; 19.116 - struct domain *d = current->domain; 19.117 + struct vcpu *curr = current; 19.118 + struct domain *d = curr->domain; 19.119 int okay; 19.120 19.121 if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) ) 19.122 @@ -1528,18 +1532,18 @@ static int mod_l3_entry(l3_pgentry_t *pl 19.123 19.124 /* Fast path for identical mapping and presence. */ 19.125 if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT)) 19.126 - return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, current); 19.127 + return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr); 19.128 19.129 if ( unlikely(!get_page_from_l3e(nl3e, pfn, d)) ) 19.130 return 0; 19.131 19.132 - if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, current)) ) 19.133 + if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr)) ) 19.134 { 19.135 put_page_from_l3e(nl3e, pfn); 19.136 return 0; 19.137 } 19.138 } 19.139 - else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, current)) ) 19.140 + else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr)) ) 19.141 { 19.142 return 0; 19.143 } 19.144 @@ -1558,11 +1562,12 @@ static int mod_l3_entry(l3_pgentry_t *pl 19.145 #if CONFIG_PAGING_LEVELS >= 4 19.146 19.147 /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */ 19.148 -static int mod_l4_entry(struct domain *d, 19.149 - l4_pgentry_t *pl4e, 19.150 +static int mod_l4_entry(l4_pgentry_t *pl4e, 19.151 l4_pgentry_t nl4e, 19.152 unsigned long pfn) 19.153 { 19.154 + struct vcpu *curr = current; 19.155 + struct domain *d = curr->domain; 19.156 l4_pgentry_t ol4e; 19.157 19.158 if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) ) 19.159 @@ -1583,22 +1588,22 @@ static int mod_l4_entry(struct domain *d 19.160 return 0; 19.161 } 19.162 19.163 - adjust_guest_l4e(nl4e, current->domain); 19.164 + adjust_guest_l4e(nl4e, d); 19.165 19.166 /* Fast path for identical mapping and presence. */ 19.167 if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT)) 19.168 - return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, current); 19.169 - 19.170 - if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) ) 19.171 + return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr); 19.172 + 19.173 + if ( unlikely(!get_page_from_l4e(nl4e, pfn, d)) ) 19.174 return 0; 19.175 19.176 - if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, current)) ) 19.177 + if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr)) ) 19.178 { 19.179 put_page_from_l4e(nl4e, pfn); 19.180 return 0; 19.181 } 19.182 } 19.183 - else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, current)) ) 19.184 + else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr)) ) 19.185 { 19.186 return 0; 19.187 } 19.188 @@ -1609,6 +1614,58 @@ static int mod_l4_entry(struct domain *d 19.189 19.190 #endif 19.191 19.192 +void put_page(struct page_info *page) 19.193 +{ 19.194 + u32 nx, x, y = page->count_info; 19.195 + 19.196 + do { 19.197 + x = y; 19.198 + nx = x - 1; 19.199 + } 19.200 + while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); 19.201 + 19.202 + if ( unlikely((nx & PGC_count_mask) == 0) ) 19.203 + { 19.204 + cleanup_page_cacheattr(page); 19.205 + free_domheap_page(page); 19.206 + } 19.207 +} 19.208 + 19.209 + 19.210 +int get_page(struct page_info *page, struct domain *domain) 19.211 +{ 19.212 + u32 x, nx, y = page->count_info; 19.213 + u32 d, nd = page->u.inuse._domain; 19.214 + u32 _domain = pickle_domptr(domain); 19.215 + 19.216 + do { 19.217 + x = y; 19.218 + nx = x + 1; 19.219 + d = nd; 19.220 + if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */ 19.221 + unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */ 19.222 + unlikely(d != _domain) ) /* Wrong owner? */ 19.223 + { 19.224 + if ( !_shadow_mode_refcounts(domain) && !domain->is_dying ) 19.225 + gdprintk(XENLOG_INFO, 19.226 + "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" 19.227 + PRtype_info "\n", 19.228 + page_to_mfn(page), domain, unpickle_domptr(d), 19.229 + x, page->u.inuse.type_info); 19.230 + return 0; 19.231 + } 19.232 + asm volatile ( 19.233 + LOCK_PREFIX "cmpxchg8b %3" 19.234 + : "=d" (nd), "=a" (y), "=c" (d), 19.235 + "=m" (*(volatile u64 *)(&page->count_info)) 19.236 + : "0" (d), "1" (x), "c" (d), "b" (nx) ); 19.237 + } 19.238 + while ( unlikely(nd != d) || unlikely(y != x) ); 19.239 + 19.240 + return 1; 19.241 +} 19.242 + 19.243 + 19.244 static int alloc_page_type(struct page_info *page, unsigned long type) 19.245 { 19.246 struct domain *owner = page_get_owner(page); 19.247 @@ -1885,7 +1942,6 @@ int new_guest_cr3(unsigned long mfn) 19.248 okay = paging_mode_refcounts(d) 19.249 ? 0 /* Old code was broken, but what should it be? */ 19.250 : mod_l4_entry( 19.251 - d, 19.252 __va(pagetable_get_paddr(v->arch.guest_table)), 19.253 l4e_from_pfn( 19.254 mfn, 19.255 @@ -2117,7 +2173,7 @@ int do_mmuext_op( 19.256 type = PGT_l4_page_table; 19.257 19.258 pin_page: 19.259 - rc = xsm_memory_pin_page(current->domain, page); 19.260 + rc = xsm_memory_pin_page(d, page); 19.261 if ( rc ) 19.262 break; 19.263 19.264 @@ -2407,14 +2463,14 @@ int do_mmu_update( 19.265 */ 19.266 case MMU_NORMAL_PT_UPDATE: 19.267 19.268 - rc = xsm_mmu_normal_update(current->domain, req.val); 19.269 + rc = xsm_mmu_normal_update(d, req.val); 19.270 if ( rc ) 19.271 break; 19.272 19.273 gmfn = req.ptr >> PAGE_SHIFT; 19.274 mfn = gmfn_to_mfn(d, gmfn); 19.275 19.276 - if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) ) 19.277 + if ( unlikely(!get_page_from_pagenr(mfn, d)) ) 19.278 { 19.279 MEM_LOG("Could not get page for normal update"); 19.280 break; 19.281 @@ -2468,7 +2524,7 @@ int do_mmu_update( 19.282 case PGT_l4_page_table: 19.283 { 19.284 l4_pgentry_t l4e = l4e_from_intpte(req.val); 19.285 - okay = mod_l4_entry(d, va, l4e, mfn); 19.286 + okay = mod_l4_entry(va, l4e, mfn); 19.287 } 19.288 break; 19.289 #endif 19.290 @@ -2501,7 +2557,7 @@ int do_mmu_update( 19.291 mfn = req.ptr >> PAGE_SHIFT; 19.292 gpfn = req.val; 19.293 19.294 - rc = xsm_mmu_machphys_update(current->domain, mfn); 19.295 + rc = xsm_mmu_machphys_update(d, mfn); 19.296 if ( rc ) 19.297 break; 19.298 19.299 @@ -2780,6 +2836,7 @@ int create_grant_host_mapping(uint64_t a 19.300 int replace_grant_host_mapping( 19.301 uint64_t addr, unsigned long frame, uint64_t new_addr, unsigned int flags) 19.302 { 19.303 + struct vcpu *curr = current; 19.304 l1_pgentry_t *pl1e, ol1e; 19.305 unsigned long gl1mfn; 19.306 int rc; 19.307 @@ -2787,16 +2844,16 @@ int replace_grant_host_mapping( 19.308 if ( flags & GNTMAP_contains_pte ) 19.309 { 19.310 if ( !new_addr ) 19.311 - return destroy_grant_pte_mapping(addr, frame, current->domain); 19.312 + return destroy_grant_pte_mapping(addr, frame, curr->domain); 19.313 19.314 MEM_LOG("Unsupported grant table operation"); 19.315 return GNTST_general_error; 19.316 } 19.317 19.318 if ( !new_addr ) 19.319 - return destroy_grant_va_mapping(addr, frame, current); 19.320 - 19.321 - pl1e = guest_map_l1e(current, new_addr, &gl1mfn); 19.322 + return destroy_grant_va_mapping(addr, frame, curr); 19.323 + 19.324 + pl1e = guest_map_l1e(curr, new_addr, &gl1mfn); 19.325 if ( !pl1e ) 19.326 { 19.327 MEM_LOG("Could not find L1 PTE for address %lx", 19.328 @@ -2805,19 +2862,18 @@ int replace_grant_host_mapping( 19.329 } 19.330 ol1e = *pl1e; 19.331 19.332 - if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(), 19.333 - gl1mfn, current)) ) 19.334 + if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(), gl1mfn, curr)) ) 19.335 { 19.336 MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e); 19.337 - guest_unmap_l1e(current, pl1e); 19.338 + guest_unmap_l1e(curr, pl1e); 19.339 return GNTST_general_error; 19.340 } 19.341 19.342 - guest_unmap_l1e(current, pl1e); 19.343 - 19.344 - rc = replace_grant_va_mapping(addr, frame, ol1e, current); 19.345 - if ( rc && !paging_mode_refcounts(current->domain) ) 19.346 - put_page_from_l1e(ol1e, current->domain); 19.347 + guest_unmap_l1e(curr, pl1e); 19.348 + 19.349 + rc = replace_grant_va_mapping(addr, frame, ol1e, curr); 19.350 + if ( rc && !paging_mode_refcounts(curr->domain) ) 19.351 + put_page_from_l1e(ol1e, curr->domain); 19.352 19.353 return rc; 19.354 } 19.355 @@ -2839,8 +2895,9 @@ int steal_page( 19.356 y = page->count_info; 19.357 do { 19.358 x = y; 19.359 - if (unlikely((x & (PGC_count_mask|PGC_allocated)) != 19.360 - (1 | PGC_allocated)) || unlikely(_nd != _d)) { 19.361 + if ( unlikely((x & (PGC_count_mask|PGC_allocated)) != 19.362 + (1 | PGC_allocated)) || unlikely(_nd != _d) ) 19.363 + { 19.364 MEM_LOG("gnttab_transfer: Bad page %p: ed=%p(%u), sd=%p," 19.365 " caf=%08x, taf=%" PRtype_info "\n", 19.366 (void *) page_to_mfn(page), 19.367 @@ -2849,7 +2906,7 @@ int steal_page( 19.368 spin_unlock(&d->page_alloc_lock); 19.369 return -1; 19.370 } 19.371 - __asm__ __volatile__( 19.372 + asm volatile ( 19.373 LOCK_PREFIX "cmpxchg8b %2" 19.374 : "=d" (_nd), "=a" (y), 19.375 "=m" (*(volatile u64 *)(&page->count_info)) 19.376 @@ -2930,8 +2987,8 @@ int do_update_va_mapping(unsigned long v 19.377 switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) ) 19.378 { 19.379 case UVMF_LOCAL: 19.380 - if ( !paging_mode_enabled(d) 19.381 - || (paging_invlpg(current, va) != 0) ) 19.382 + if ( !paging_mode_enabled(d) || 19.383 + (paging_invlpg(v, va) != 0) ) 19.384 flush_tlb_one_local(va); 19.385 break; 19.386 case UVMF_ALL: 19.387 @@ -3040,6 +3097,7 @@ long do_set_gdt(XEN_GUEST_HANDLE(ulong) 19.388 { 19.389 int nr_pages = (entries + 511) / 512; 19.390 unsigned long frames[16]; 19.391 + struct vcpu *curr = current; 19.392 long ret; 19.393 19.394 /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */ 19.395 @@ -3049,12 +3107,12 @@ long do_set_gdt(XEN_GUEST_HANDLE(ulong) 19.396 if ( copy_from_guest(frames, frame_list, nr_pages) ) 19.397 return -EFAULT; 19.398 19.399 - LOCK_BIGLOCK(current->domain); 19.400 - 19.401 - if ( (ret = set_gdt(current, frames, entries)) == 0 ) 19.402 + LOCK_BIGLOCK(curr->domain); 19.403 + 19.404 + if ( (ret = set_gdt(curr, frames, entries)) == 0 ) 19.405 flush_tlb_local(); 19.406 19.407 - UNLOCK_BIGLOCK(current->domain); 19.408 + UNLOCK_BIGLOCK(curr->domain); 19.409 19.410 return ret; 19.411 }
20.1 --- a/xen/arch/x86/tboot.c Fri Nov 30 08:54:33 2007 -0700 20.2 +++ b/xen/arch/x86/tboot.c Tue Dec 04 11:06:05 2007 +0000 20.3 @@ -43,16 +43,43 @@ void __init tboot_probe(void) 20.4 printk(" s3_tb_wakeup_entry: 0x%08x\n", tboot_shared->s3_tb_wakeup_entry); 20.5 printk(" s3_k_wakeup_entry: 0x%08x\n", tboot_shared->s3_k_wakeup_entry); 20.6 printk(" &acpi_sinfo: 0x%p\n", &tboot_shared->acpi_sinfo); 20.7 + if ( tboot_shared->version >= 0x02 ) 20.8 + { 20.9 + printk(" tboot_base: 0x%08x\n", tboot_shared->tboot_base); 20.10 + printk(" tboot_size: 0x%x\n", tboot_shared->tboot_size); 20.11 + } 20.12 } 20.13 20.14 void tboot_shutdown(uint32_t shutdown_type) 20.15 { 20.16 + uint32_t map_base, map_size; 20.17 + int err; 20.18 + 20.19 g_tboot_shared->shutdown_type = shutdown_type; 20.20 20.21 local_irq_disable(); 20.22 20.23 - /* Create identity map for 0-640k to include tboot code. */ 20.24 - map_pages_to_xen(0, 0, PFN_UP(0xa0000), __PAGE_HYPERVISOR); 20.25 + /* Create identity map for tboot shutdown code. */ 20.26 + if ( g_tboot_shared->version >= 0x02 ) 20.27 + { 20.28 + map_base = PFN_DOWN(g_tboot_shared->tboot_base); 20.29 + map_size = PFN_UP(g_tboot_shared->tboot_size); 20.30 + } 20.31 + else 20.32 + { 20.33 + map_base = 0; 20.34 + map_size = PFN_UP(0xa0000); 20.35 + } 20.36 + 20.37 + err = map_pages_to_xen(map_base << PAGE_SHIFT, map_base, map_size, 20.38 + __PAGE_HYPERVISOR); 20.39 + if ( err != 0 ) 20.40 + { 20.41 + printk("error (0x%x) mapping tboot pages (mfns) @ 0x%x, 0x%x\n", err, 20.42 + map_base, map_size); 20.43 + return; 20.44 + } 20.45 + 20.46 write_ptbase(idle_vcpu[0]); 20.47 20.48 #ifdef __x86_64__ 20.49 @@ -68,3 +95,13 @@ int tboot_in_measured_env(void) 20.50 { 20.51 return (g_tboot_shared != NULL); 20.52 } 20.53 + 20.54 +/* 20.55 + * Local variables: 20.56 + * mode: C 20.57 + * c-set-style: "BSD" 20.58 + * c-basic-offset: 4 20.59 + * tab-width: 4 20.60 + * indent-tabs-mode: nil 20.61 + * End: 20.62 + */
21.1 --- a/xen/common/domctl.c Fri Nov 30 08:54:33 2007 -0700 21.2 +++ b/xen/common/domctl.c Tue Dec 04 11:06:05 2007 +0000 21.3 @@ -187,6 +187,9 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.4 21.5 spin_lock(&domctl_lock); 21.6 21.7 + if ( xsm_domctl(op) ) 21.8 + goto domctl_out; 21.9 + 21.10 switch ( op->cmd ) 21.11 { 21.12 21.13 @@ -201,10 +204,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.14 if ( d == NULL ) 21.15 break; 21.16 21.17 - ret = xsm_setvcpucontext(d); 21.18 - if ( ret ) 21.19 - goto svc_out; 21.20 - 21.21 ret = -EINVAL; 21.22 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) ) 21.23 goto svc_out; 21.24 @@ -252,17 +251,12 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.25 ret = -ESRCH; 21.26 if ( d != NULL ) 21.27 { 21.28 - ret = xsm_pausedomain(d); 21.29 - if ( ret ) 21.30 - goto pausedomain_out; 21.31 - 21.32 ret = -EINVAL; 21.33 if ( d != current->domain ) 21.34 { 21.35 domain_pause_by_systemcontroller(d); 21.36 ret = 0; 21.37 } 21.38 - pausedomain_out: 21.39 rcu_unlock_domain(d); 21.40 } 21.41 } 21.42 @@ -275,14 +269,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.43 ret = -ESRCH; 21.44 if ( d == NULL ) 21.45 break; 21.46 - 21.47 - ret = xsm_unpausedomain(d); 21.48 - if ( ret ) 21.49 - { 21.50 - rcu_unlock_domain(d); 21.51 - break; 21.52 - } 21.53 - 21.54 domain_unpause_by_systemcontroller(d); 21.55 rcu_unlock_domain(d); 21.56 ret = 0; 21.57 @@ -297,13 +283,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.58 if ( d == NULL ) 21.59 break; 21.60 21.61 - ret = xsm_resumedomain(d); 21.62 - if ( ret ) 21.63 - { 21.64 - rcu_unlock_domain(d); 21.65 - break; 21.66 - } 21.67 - 21.68 domain_resume(d); 21.69 rcu_unlock_domain(d); 21.70 ret = 0; 21.71 @@ -379,13 +358,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.72 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) 21.73 break; 21.74 21.75 - ret = xsm_max_vcpus(d); 21.76 - if ( ret ) 21.77 - { 21.78 - rcu_unlock_domain(d); 21.79 - break; 21.80 - } 21.81 - 21.82 /* Needed, for example, to ensure writable p.t. state is synced. */ 21.83 domain_pause(d); 21.84 21.85 @@ -422,7 +394,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.86 ret = -ESRCH; 21.87 if ( d != NULL ) 21.88 { 21.89 - ret = xsm_destroydomain(d) ? : domain_kill(d); 21.90 + domain_kill(d); 21.91 rcu_unlock_domain(d); 21.92 } 21.93 } 21.94 @@ -440,10 +412,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.95 if ( d == NULL ) 21.96 break; 21.97 21.98 - ret = xsm_vcpuaffinity(op->cmd, d); 21.99 - if ( ret ) 21.100 - goto vcpuaffinity_out; 21.101 - 21.102 ret = -EINVAL; 21.103 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS ) 21.104 goto vcpuaffinity_out; 21.105 @@ -478,15 +446,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.106 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) 21.107 break; 21.108 21.109 - ret = xsm_scheduler(d); 21.110 - if ( ret ) 21.111 - goto scheduler_op_out; 21.112 - 21.113 ret = sched_adjust(d, &op->u.scheduler_op); 21.114 if ( copy_to_guest(u_domctl, op, 1) ) 21.115 ret = -EFAULT; 21.116 21.117 - scheduler_op_out: 21.118 rcu_unlock_domain(d); 21.119 } 21.120 break; 21.121 @@ -509,17 +472,12 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.122 break; 21.123 } 21.124 21.125 - ret = xsm_getdomaininfo(d); 21.126 - if ( ret ) 21.127 - goto getdomaininfo_out; 21.128 - 21.129 getdomaininfo(d, &op->u.getdomaininfo); 21.130 21.131 op->domain = op->u.getdomaininfo.domain; 21.132 if ( copy_to_guest(u_domctl, op, 1) ) 21.133 ret = -EFAULT; 21.134 21.135 - getdomaininfo_out: 21.136 rcu_read_unlock(&domlist_read_lock); 21.137 } 21.138 break; 21.139 @@ -534,10 +492,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.140 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) 21.141 break; 21.142 21.143 - ret = xsm_getvcpucontext(d); 21.144 - if ( ret ) 21.145 - goto getvcpucontext_out; 21.146 - 21.147 ret = -EINVAL; 21.148 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS ) 21.149 goto getvcpucontext_out; 21.150 @@ -594,10 +548,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.151 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) 21.152 break; 21.153 21.154 - ret = xsm_getvcpuinfo(d); 21.155 - if ( ret ) 21.156 - goto getvcpuinfo_out; 21.157 - 21.158 ret = -EINVAL; 21.159 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS ) 21.160 goto getvcpuinfo_out; 21.161 @@ -633,10 +583,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.162 if ( d == NULL ) 21.163 break; 21.164 21.165 - ret = xsm_setdomainmaxmem(d); 21.166 - if ( ret ) 21.167 - goto max_mem_out; 21.168 - 21.169 ret = -EINVAL; 21.170 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10); 21.171 21.172 @@ -651,7 +597,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.173 } 21.174 spin_unlock(&d->page_alloc_lock); 21.175 21.176 - max_mem_out: 21.177 rcu_unlock_domain(d); 21.178 } 21.179 break; 21.180 @@ -665,13 +610,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.181 if ( d == NULL ) 21.182 break; 21.183 21.184 - ret = xsm_setdomainhandle(d); 21.185 - if ( ret ) 21.186 - { 21.187 - rcu_unlock_domain(d); 21.188 - break; 21.189 - } 21.190 - 21.191 memcpy(d->handle, op->u.setdomainhandle.handle, 21.192 sizeof(xen_domain_handle_t)); 21.193 rcu_unlock_domain(d); 21.194 @@ -688,13 +626,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.195 if ( d == NULL ) 21.196 break; 21.197 21.198 - ret = xsm_setdebugging(d); 21.199 - if ( ret ) 21.200 - { 21.201 - rcu_unlock_domain(d); 21.202 - break; 21.203 - } 21.204 - 21.205 domain_pause(d); 21.206 d->debugger_attached = !!op->u.setdebugging.enable; 21.207 domain_unpause(d); /* causes guest to latch new status */ 21.208 @@ -717,16 +648,11 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.209 if ( d == NULL ) 21.210 break; 21.211 21.212 - ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access); 21.213 - if ( ret ) 21.214 - goto irq_permission_out; 21.215 - 21.216 if ( op->u.irq_permission.allow_access ) 21.217 ret = irq_permit_access(d, pirq); 21.218 else 21.219 ret = irq_deny_access(d, pirq); 21.220 21.221 - irq_permission_out: 21.222 rcu_unlock_domain(d); 21.223 } 21.224 break; 21.225 @@ -746,16 +672,11 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.226 if ( d == NULL ) 21.227 break; 21.228 21.229 - ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access); 21.230 - if ( ret ) 21.231 - goto iomem_permission_out; 21.232 - 21.233 if ( op->u.iomem_permission.allow_access ) 21.234 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); 21.235 else 21.236 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); 21.237 21.238 - iomem_permission_out: 21.239 rcu_unlock_domain(d); 21.240 } 21.241 break; 21.242 @@ -768,13 +689,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.243 d = rcu_lock_domain_by_id(op->domain); 21.244 if ( d != NULL ) 21.245 { 21.246 - ret = xsm_domain_settime(d); 21.247 - if ( ret ) 21.248 - { 21.249 - rcu_unlock_domain(d); 21.250 - break; 21.251 - } 21.252 - 21.253 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds; 21.254 rcu_unlock_domain(d); 21.255 ret = 0; 21.256 @@ -787,6 +701,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 21.257 break; 21.258 } 21.259 21.260 +domctl_out: 21.261 spin_unlock(&domctl_lock); 21.262 21.263 return ret;
22.1 --- a/xen/include/asm-x86/mm.h Fri Nov 30 08:54:33 2007 -0700 22.2 +++ b/xen/include/asm-x86/mm.h Tue Dec 04 11:06:05 2007 +0000 22.3 @@ -149,60 +149,10 @@ int _shadow_mode_refcounts(struct domain 22.4 22.5 void cleanup_page_cacheattr(struct page_info *page); 22.6 22.7 -static inline void put_page(struct page_info *page) 22.8 -{ 22.9 - u32 nx, x, y = page->count_info; 22.10 - 22.11 - do { 22.12 - x = y; 22.13 - nx = x - 1; 22.14 - } 22.15 - while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); 22.16 - 22.17 - if ( unlikely((nx & PGC_count_mask) == 0) ) 22.18 - { 22.19 - cleanup_page_cacheattr(page); 22.20 - free_domheap_page(page); 22.21 - } 22.22 -} 22.23 - 22.24 - 22.25 -static inline int get_page(struct page_info *page, 22.26 - struct domain *domain) 22.27 -{ 22.28 - u32 x, nx, y = page->count_info; 22.29 - u32 d, nd = page->u.inuse._domain; 22.30 - u32 _domain = pickle_domptr(domain); 22.31 - 22.32 - do { 22.33 - x = y; 22.34 - nx = x + 1; 22.35 - d = nd; 22.36 - if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */ 22.37 - unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */ 22.38 - unlikely(d != _domain) ) /* Wrong owner? */ 22.39 - { 22.40 - if ( !_shadow_mode_refcounts(domain) ) 22.41 - gdprintk(XENLOG_INFO, 22.42 - "Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" 22.43 - PRtype_info "\n", 22.44 - page_to_mfn(page), domain, unpickle_domptr(d), 22.45 - x, page->u.inuse.type_info); 22.46 - return 0; 22.47 - } 22.48 - __asm__ __volatile__( 22.49 - LOCK_PREFIX "cmpxchg8b %3" 22.50 - : "=d" (nd), "=a" (y), "=c" (d), 22.51 - "=m" (*(volatile u64 *)(&page->count_info)) 22.52 - : "0" (d), "1" (x), "c" (d), "b" (nx) ); 22.53 - } 22.54 - while ( unlikely(nd != d) || unlikely(y != x) ); 22.55 - 22.56 - return 1; 22.57 -} 22.58 - 22.59 int is_iomem_page(unsigned long mfn); 22.60 22.61 +void put_page(struct page_info *page); 22.62 +int get_page(struct page_info *page, struct domain *domain); 22.63 void put_page_type(struct page_info *page); 22.64 int get_page_type(struct page_info *page, unsigned long type); 22.65 int get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
23.1 --- a/xen/include/asm-x86/tboot.h Fri Nov 30 08:54:33 2007 -0700 23.2 +++ b/xen/include/asm-x86/tboot.h Tue Dec 04 11:06:05 2007 +0000 23.3 @@ -49,8 +49,9 @@ typedef struct __attribute__ ((__packed_ 23.4 #define MAX_TB_ACPI_SINFO_SIZE 64 23.5 23.6 typedef struct __attribute__ ((__packed__)) { 23.7 + /* version 0x01+ fields: */ 23.8 uuid_t uuid; /* {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} */ 23.9 - uint32_t version; /* 0x01 */ 23.10 + uint32_t version; /* Version number: 0x01, 0x02, ... */ 23.11 uint32_t log_addr; /* physical addr of tb_log_t log */ 23.12 uint32_t shutdown_entry32; /* entry point for tboot shutdown from 32b */ 23.13 uint32_t shutdown_entry64; /* entry point for tboot shutdown from 64b */ 23.14 @@ -59,6 +60,9 @@ typedef struct __attribute__ ((__packed_ 23.15 uint32_t s3_k_wakeup_entry; /* entry point for xen s3 wake up */ 23.16 uint8_t acpi_sinfo[MAX_TB_ACPI_SINFO_SIZE]; 23.17 /* where kernel put acpi sleep info in Sx */ 23.18 + /* version 0x02+ fields: */ 23.19 + uint32_t tboot_base; /* starting addr for tboot */ 23.20 + uint32_t tboot_size; /* size of tboot */ 23.21 } tboot_shared_t; 23.22 23.23 #define TB_SHUTDOWN_REBOOT 0
24.1 --- a/xen/include/public/io/blkif.h Fri Nov 30 08:54:33 2007 -0700 24.2 +++ b/xen/include/public/io/blkif.h Tue Dec 04 11:06:05 2007 +0000 24.3 @@ -54,7 +54,7 @@ 24.4 #define BLKIF_OP_WRITE 1 24.5 /* 24.6 * Recognised only if "feature-barrier" is present in backend xenbus info. 24.7 - * The "feature_barrier" node contains a boolean indicating whether barrier 24.8 + * The "feature-barrier" node contains a boolean indicating whether barrier 24.9 * requests are likely to succeed or fail. Either way, a barrier request 24.10 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by 24.11 * the underlying block-device hardware. The boolean simply indicates whether 24.12 @@ -63,6 +63,19 @@ 24.13 * create the "feature-barrier" node! 24.14 */ 24.15 #define BLKIF_OP_WRITE_BARRIER 2 24.16 +/* 24.17 + * Recognised if "feature-flush-cache" is present in backend xenbus 24.18 + * info. A flush will ask the underlying storage hardware to flush its 24.19 + * non-volatile caches as appropriate. The "feature-flush-cache" node 24.20 + * contains a boolean indicating whether flush requests are likely to 24.21 + * succeed or fail. Either way, a flush request may fail at any time 24.22 + * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying 24.23 + * block-device hardware. The boolean simply indicates whether or not it 24.24 + * is worthwhile for the frontend to attempt flushes. If a backend does 24.25 + * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the 24.26 + * "feature-flush-cache" node! 24.27 + */ 24.28 +#define BLKIF_OP_FLUSH_DISKCACHE 3 24.29 24.30 /* 24.31 * Maximum scatter/gather segments per request.
25.1 --- a/xen/include/xsm/xsm.h Fri Nov 30 08:54:33 2007 -0700 25.2 +++ b/xen/include/xsm/xsm.h Tue Dec 04 11:06:05 2007 +0000 25.3 @@ -50,34 +50,18 @@ extern xsm_initcall_t __xsm_initcall_sta 25.4 25.5 struct xsm_operations { 25.6 void (*security_domaininfo) (struct domain *d, 25.7 - struct xen_domctl_getdomaininfo *info); 25.8 - int (*setvcpucontext) (struct domain *d); 25.9 - int (*pausedomain) (struct domain *d); 25.10 - int (*unpausedomain) (struct domain *d); 25.11 - int (*resumedomain) (struct domain *d); 25.12 + struct xen_domctl_getdomaininfo *info); 25.13 + int (*domctl) (struct xen_domctl *domctl); 25.14 int (*domain_create) (struct domain *d, u32 ssidref); 25.15 - int (*max_vcpus) (struct domain *d); 25.16 - int (*destroydomain) (struct domain *d); 25.17 - int (*vcpuaffinity) (int cmd, struct domain *d); 25.18 - int (*scheduler) (struct domain *d); 25.19 int (*getdomaininfo) (struct domain *d); 25.20 - int (*getvcpucontext) (struct domain *d); 25.21 - int (*getvcpuinfo) (struct domain *d); 25.22 - int (*domain_settime) (struct domain *d); 25.23 int (*tbufcontrol) (void); 25.24 int (*readconsole) (uint32_t clear); 25.25 int (*sched_id) (void); 25.26 - int (*setdomainmaxmem) (struct domain *d); 25.27 - int (*setdomainhandle) (struct domain *d); 25.28 - int (*setdebugging) (struct domain *d); 25.29 - int (*irq_permission) (struct domain *d, uint8_t pirq, uint8_t access); 25.30 - int (*iomem_permission) (struct domain *d, unsigned long mfn, 25.31 - uint8_t access); 25.32 int (*perfcontrol) (void); 25.33 25.34 int (*evtchn_unbound) (struct domain *d, struct evtchn *chn, domid_t id2); 25.35 int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1, 25.36 - struct domain *d2, struct evtchn *chn2); 25.37 + struct domain *d2, struct evtchn *chn2); 25.38 void (*evtchn_close_post) (struct evtchn *chn); 25.39 int (*evtchn_send) (struct domain *d, struct evtchn *chn); 25.40 int (*evtchn_status) (struct domain *d, struct evtchn *chn); 25.41 @@ -149,69 +133,19 @@ static inline void xsm_security_domainin 25.42 xsm_call(security_domaininfo(d, info)); 25.43 } 25.44 25.45 -static inline int xsm_setvcpucontext(struct domain *d) 25.46 +static inline int xsm_domctl(struct xen_domctl *domctl) 25.47 { 25.48 - return xsm_call(setvcpucontext(d)); 25.49 -} 25.50 - 25.51 -static inline int xsm_pausedomain (struct domain *d) 25.52 -{ 25.53 - return xsm_call(pausedomain(d)); 25.54 + return xsm_call(domctl(domctl)); 25.55 } 25.56 25.57 -static inline int xsm_unpausedomain (struct domain *d) 25.58 -{ 25.59 - return xsm_call(unpausedomain(d)); 25.60 -} 25.61 - 25.62 -static inline int xsm_resumedomain (struct domain *d) 25.63 -{ 25.64 - return xsm_call(resumedomain(d)); 25.65 -} 25.66 - 25.67 -static inline int xsm_domain_create (struct domain *d, u32 ssidref) 25.68 +static inline int xsm_domain_create(struct domain *d, u32 ssidref) 25.69 { 25.70 return xsm_call(domain_create(d, ssidref)); 25.71 } 25.72 25.73 -static inline int xsm_max_vcpus(struct domain *d) 25.74 -{ 25.75 - return xsm_call(max_vcpus(d)); 25.76 -} 25.77 - 25.78 -static inline int xsm_destroydomain (struct domain *d) 25.79 -{ 25.80 - return xsm_call(destroydomain(d)); 25.81 -} 25.82 - 25.83 -static inline int xsm_vcpuaffinity (int cmd, struct domain *d) 25.84 -{ 25.85 - return xsm_call(vcpuaffinity(cmd, d)); 25.86 -} 25.87 - 25.88 -static inline int xsm_scheduler (struct domain *d) 25.89 +static inline int xsm_getdomaininfo(struct domain *d) 25.90 { 25.91 - return xsm_call(scheduler(d)); 25.92 -} 25.93 - 25.94 -static inline int xsm_getdomaininfo (struct domain *d) 25.95 -{ 25.96 - return xsm_call(getdomaininfo(d)); 25.97 -} 25.98 - 25.99 -static inline int xsm_getvcpucontext (struct domain *d) 25.100 -{ 25.101 - return xsm_call(getvcpucontext(d)); 25.102 -} 25.103 - 25.104 -static inline int xsm_getvcpuinfo (struct domain *d) 25.105 -{ 25.106 - return xsm_call(getvcpuinfo(d)); 25.107 -} 25.108 - 25.109 -static inline int xsm_domain_settime (struct domain *d) 25.110 -{ 25.111 - return xsm_call(domain_settime(d)); 25.112 + return xsm_call(domain_getdomaininfo(d)); 25.113 } 25.114 25.115 static inline int xsm_tbufcontrol (void) 25.116 @@ -229,33 +163,6 @@ static inline int xsm_sched_id (void) 25.117 return xsm_call(sched_id()); 25.118 } 25.119 25.120 -static inline int xsm_setdomainmaxmem (struct domain *d) 25.121 -{ 25.122 - return xsm_call(setdomainmaxmem(d)); 25.123 -} 25.124 - 25.125 -static inline int xsm_setdomainhandle (struct domain *d) 25.126 -{ 25.127 - return xsm_call(setdomainhandle(d)); 25.128 -} 25.129 - 25.130 -static inline int xsm_setdebugging (struct domain *d) 25.131 -{ 25.132 - return xsm_call(setdebugging(d)); 25.133 -} 25.134 - 25.135 -static inline int xsm_irq_permission (struct domain *d, uint8_t pirq, 25.136 - uint8_t access) 25.137 -{ 25.138 - return xsm_call(irq_permission(d, pirq, access)); 25.139 -} 25.140 - 25.141 -static inline int xsm_iomem_permission (struct domain *d, unsigned long mfn, 25.142 - uint8_t access) 25.143 -{ 25.144 - return xsm_call(iomem_permission(d, mfn, access)); 25.145 -} 25.146 - 25.147 static inline int xsm_perfcontrol (void) 25.148 { 25.149 return xsm_call(perfcontrol());