debuggers.hg
changeset 14642:81fec499a983
Merge
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Wed Mar 28 08:44:08 2007 +0000 (2007-03-28) |
parents | bc2811bf7771 a138ae831515 |
children | 8a3b9908fd75 |
files |
line diff
1.1 --- a/Config.mk Wed Mar 28 08:40:42 2007 +0000 1.2 +++ b/Config.mk Wed Mar 28 08:44:08 2007 +0000 1.3 @@ -31,17 +31,27 @@ EXTRA_INCLUDES += $(EXTRA_PREFIX)/includ 1.4 EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR) 1.5 endif 1.6 1.7 -# cc-option 1.8 +# cc-option: Check if compiler supports first option, else fall back to second. 1.9 # Usage: cflags-y += $(call cc-option,$(CC),-march=winchip-c6,-march=i586) 1.10 cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc \ 1.11 /dev/null 2>&1`"; then echo "$(2)"; else echo "$(3)"; fi ;) 1.12 1.13 -# cc-ver 1.14 +# cc-ver: Check compiler is at least specified version. Return boolean 'y'/'n'. 1.15 # Usage: ifeq ($(call cc-ver,$(CC),0x030400),y) 1.16 cc-ver = $(shell if [ $$((`$(1) -dumpversion | awk -F. \ 1.17 '{ printf "0x%02x%02x%02x", $$1, $$2, $$3}'`)) -ge $$(($(2))) ]; \ 1.18 then echo y; else echo n; fi ;) 1.19 1.20 +# cc-ver-check: Check compiler is at least specified version, else fail. 1.21 +# Usage: $(call cc-ver-check,CC,0x030400,"Require at least gcc-3.4") 1.22 +cc-ver-check = $(eval $(call cc-ver-check-closure,$(1),$(2),$(3))) 1.23 +define cc-ver-check-closure 1.24 + ifeq ($$(call cc-ver,$$($(1)),$(2)),n) 1.25 + override $(1) = echo "*** FATAL BUILD ERROR: "$(3) >&2; exit 1; 1.26 + cc-option := n 1.27 + endif 1.28 +endef 1.29 + 1.30 ifneq ($(debug),y) 1.31 CFLAGS += -DNDEBUG 1.32 else
2.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S Wed Mar 28 08:40:42 2007 +0000 2.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S Wed Mar 28 08:44:08 2007 +0000 2.3 @@ -148,11 +148,11 @@ NMI_MASK = 0x80000000 2.4 .endm 2.5 2.6 /* 2.7 - * Must be consistent with the definition in arch-x86_64.h: 2.8 + * Must be consistent with the definition in arch-x86/xen-x86_64.h: 2.9 * struct iret_context { 2.10 * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; 2.11 * }; 2.12 - * #define VGCF_IN_SYSCALL (1<<8) 2.13 + * with rax, r11, and rcx being taken care of in the hypercall stub. 2.14 */ 2.15 .macro HYPERVISOR_IRET flag 2.16 testb $3,1*8(%rsp) 2.17 @@ -164,22 +164,16 @@ NMI_MASK = 0x80000000 2.18 jnz 1f 2.19 2.20 /* Direct iret to kernel space. Correct CS and SS. */ 2.21 - orb $3,1*8(%rsp) 2.22 - orb $3,4*8(%rsp) 2.23 + orl $3,1*8(%rsp) 2.24 + orl $3,4*8(%rsp) 2.25 1: iretq 2.26 2.27 2: /* Slow iret via hypervisor. */ 2.28 - andl $~NMI_MASK, 16(%rsp) 2.29 + andl $~NMI_MASK, 2*8(%rsp) 2.30 pushq $\flag 2.31 jmp hypercall_page + (__HYPERVISOR_iret * 32) 2.32 .endm 2.33 2.34 - .macro SWITCH_TO_KERNEL ssoff,adjust=0 2.35 - jc 1f 2.36 - orb $1,\ssoff-\adjust+4(%rsp) 2.37 -1: 2.38 - .endm 2.39 - 2.40 /* 2.41 * A newly forked process directly context switches into this. 2.42 */
3.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c Wed Mar 28 08:40:42 2007 +0000 3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c Wed Mar 28 08:44:08 2007 +0000 3.3 @@ -123,7 +123,7 @@ static void post_suspend(int suspend_can 3.4 static int take_machine_down(void *p_fast_suspend) 3.5 { 3.6 int fast_suspend = *(int *)p_fast_suspend; 3.7 - int suspend_cancelled, err, cpu; 3.8 + int suspend_cancelled, err; 3.9 extern void time_resume(void); 3.10 3.11 if (fast_suspend) {
4.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Wed Mar 28 08:40:42 2007 +0000 4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Wed Mar 28 08:44:08 2007 +0000 4.3 @@ -236,7 +236,10 @@ static ssize_t xenbus_dev_write(struct f 4.4 break; 4.5 4.6 case XS_WATCH: 4.7 - case XS_UNWATCH: 4.8 + case XS_UNWATCH: { 4.9 + static const char *XS_RESP = "OK"; 4.10 + struct xsd_sockmsg hdr; 4.11 + 4.12 path = u->u.buffer + sizeof(u->u.msg); 4.13 token = memchr(path, 0, u->u.msg.len); 4.14 if (token == NULL) { 4.15 @@ -246,9 +249,6 @@ static ssize_t xenbus_dev_write(struct f 4.16 token++; 4.17 4.18 if (msg_type == XS_WATCH) { 4.19 - static const char * XS_WATCH_RESP = "OK"; 4.20 - struct xsd_sockmsg hdr; 4.21 - 4.22 watch = kmalloc(sizeof(*watch), GFP_KERNEL); 4.23 watch->watch.node = kmalloc(strlen(path)+1, 4.24 GFP_KERNEL); 4.25 @@ -266,11 +266,6 @@ static ssize_t xenbus_dev_write(struct f 4.26 } 4.27 4.28 list_add(&watch->list, &u->watches); 4.29 - 4.30 - hdr.type = XS_WATCH; 4.31 - hdr.len = strlen(XS_WATCH_RESP) + 1; 4.32 - queue_reply(u, (char *)&hdr, sizeof(hdr)); 4.33 - queue_reply(u, (char *)XS_WATCH_RESP, hdr.len); 4.34 } else { 4.35 list_for_each_entry_safe(watch, tmp_watch, 4.36 &u->watches, list) { 4.37 @@ -285,7 +280,12 @@ static ssize_t xenbus_dev_write(struct f 4.38 } 4.39 } 4.40 4.41 + hdr.type = msg_type; 4.42 + hdr.len = strlen(XS_RESP) + 1; 4.43 + queue_reply(u, (char *)&hdr, sizeof(hdr)); 4.44 + queue_reply(u, (char *)XS_RESP, hdr.len); 4.45 break; 4.46 + } 4.47 4.48 default: 4.49 rc = -EINVAL;
5.1 --- a/tools/Rules.mk Wed Mar 28 08:40:42 2007 +0000 5.2 +++ b/tools/Rules.mk Wed Mar 28 08:44:08 2007 +0000 5.3 @@ -24,9 +24,9 @@ CFLAGS-$(CONFIG_X86_32) += $(call cc-opt 5.4 CFLAGS += $(CFLAGS-y) 5.5 5.6 # Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers) 5.7 -ifeq ($(CONFIG_X86)$(call cc-ver,$(CC),0x030400),yn) 5.8 -$(error Xen tools require at least gcc-3.4) 5.9 -endif 5.10 +check-$(CONFIG_X86) = $(call cc-ver-check,CC,0x030400,\ 5.11 + "Xen requires at least gcc-3.4") 5.12 +$(eval $(check-y)) 5.13 5.14 %.opic: %.c 5.15 $(CC) $(CPPFLAGS) -DPIC $(CFLAGS) -fPIC -c -o $@ $<
6.1 --- a/tools/examples/xend-config.sxp Wed Mar 28 08:40:42 2007 +0000 6.2 +++ b/tools/examples/xend-config.sxp Wed Mar 28 08:44:08 2007 +0000 6.3 @@ -46,6 +46,11 @@ 6.4 # (xen-api-server ((9363 pam '^localhost$ example\\.com$') 6.5 # (unix none))) 6.6 # 6.7 +# Optionally, the TCP Xen-API server can use SSL by specifying the private 6.8 +# key and certificate location: 6.9 +# 6.10 +# (9367 pam '' /etc/xen/xen-api.key /etc/xen/xen-api.crt) 6.11 +# 6.12 # Default: 6.13 # (xen-api-server ((unix))) 6.14 6.15 @@ -59,11 +64,18 @@ 6.16 6.17 #(xend-unix-path /var/lib/xend/xend-socket) 6.18 6.19 -# Address and port xend should use for the TCP XMLRPC interface, 6.20 + 6.21 +# Address and port xend should use for the legacy TCP XMLRPC interface, 6.22 # if xen-tcp-xmlrpc-server is set. 6.23 #(xen-tcp-xmlrpc-server-address 'localhost') 6.24 #(xen-tcp-xmlrpc-server-port 8006) 6.25 6.26 +# SSL key and certificate to use for the legacy TCP XMLRPC interface. 6.27 +# Setting these will mean that this port serves only SSL connections as 6.28 +# opposed to plaintext ones. 6.29 +#(xend-tcp-xmlrpc-server-ssl-key-file /etc/xen/xmlrpc.key) 6.30 +#(xend-tcp-xmlrpc-server-ssl-cert-file /etc/xen/xmlrpc.crt) 6.31 + 6.32 6.33 # Port xend should use for the HTTP interface, if xend-http-server is set. 6.34 #(xend-port 8000)
7.1 --- a/tools/ioemu/Makefile.target Wed Mar 28 08:40:42 2007 +0000 7.2 +++ b/tools/ioemu/Makefile.target Wed Mar 28 08:44:08 2007 +0000 7.3 @@ -193,6 +193,10 @@ ifdef CONFIG_SOLARIS 7.4 LIBS+=-lsocket -lnsl -lresolv 7.5 endif 7.6 7.7 +ifeq ($(debug),y) 7.8 +CFLAGS += -DQEMU_VNC_MONITOR_EXPORT 7.9 +endif 7.10 + 7.11 # profiling code 7.12 ifdef TARGET_GPROF 7.13 LDFLAGS+=-p
8.1 --- a/tools/ioemu/vnc.c Wed Mar 28 08:40:42 2007 +0000 8.2 +++ b/tools/ioemu/vnc.c Wed Mar 28 08:44:08 2007 +0000 8.3 @@ -113,8 +113,10 @@ struct VncState 8.4 int visible_w; 8.5 int visible_h; 8.6 8.7 +#ifdef QEMU_VNC_MONITOR_EXPORT 8.8 int ctl_keys; /* Ctrl+Alt starts calibration */ 8.9 int shift_keys; /* Shift / CapsLock keys */ 8.10 +#endif 8.11 int numlock; 8.12 }; 8.13 8.14 @@ -895,6 +897,7 @@ static void do_key_event(VncState *vs, i 8.15 kbd_put_keycode(keycode & 0x7f); 8.16 else 8.17 kbd_put_keycode(keycode | 0x80); 8.18 +#ifdef QEMU_VNC_MONITOR_EXPORT 8.19 } else if (down) { 8.20 int qemu_keysym = 0; 8.21 8.22 @@ -922,8 +925,10 @@ static void do_key_event(VncState *vs, i 8.23 } 8.24 if (qemu_keysym != 0) 8.25 kbd_put_keysym(qemu_keysym); 8.26 +#endif 8.27 } 8.28 8.29 +#ifdef QEMU_VNC_MONITOR_EXPORT 8.30 if (down) { 8.31 switch (sym) { 8.32 case XK_Control_L: 8.33 @@ -976,6 +981,10 @@ static void do_key_event(VncState *vs, i 8.34 break; 8.35 } 8.36 } 8.37 +#else 8.38 + if (!down && sym == XK_Num_Lock) 8.39 + vs->numlock = !vs->numlock; 8.40 +#endif 8.41 } 8.42 8.43 static void key_event(VncState *vs, int down, uint32_t sym)
9.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 9.2 +++ b/tools/python/xen/util/xmlrpcclient.py Wed Mar 28 08:44:08 2007 +0000 9.3 @@ -0,0 +1,122 @@ 9.4 +#============================================================================ 9.5 +# This library is free software; you can redistribute it and/or 9.6 +# modify it under the terms of version 2.1 of the GNU Lesser General Public 9.7 +# License as published by the Free Software Foundation. 9.8 +# 9.9 +# This library is distributed in the hope that it will be useful, 9.10 +# but WITHOUT ANY WARRANTY; without even the implied warranty of 9.11 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 9.12 +# Lesser General Public License for more details. 9.13 +# 9.14 +# You should have received a copy of the GNU Lesser General Public 9.15 +# License along with this library; if not, write to the Free Software 9.16 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 9.17 +#============================================================================ 9.18 +# Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com> 9.19 +# Copyright (C) 2007 XenSource Inc. 9.20 +#============================================================================ 9.21 + 9.22 + 9.23 +from httplib import FakeSocket, HTTPConnection, HTTP 9.24 +import socket 9.25 +import xmlrpclib 9.26 +from types import StringTypes 9.27 + 9.28 + 9.29 +try: 9.30 + import SSHTransport 9.31 + ssh_enabled = True 9.32 +except ImportError: 9.33 + # SSHTransport is disabled on Python <2.4, because it uses the subprocess 9.34 + # package. 9.35 + ssh_enabled = False 9.36 + 9.37 + 9.38 +# A new ServerProxy that also supports httpu urls. An http URL comes in the 9.39 +# form: 9.40 +# 9.41 +# httpu:///absolute/path/to/socket.sock 9.42 +# 9.43 +# It assumes that the RPC handler is /RPC2. This probably needs to be improved 9.44 + 9.45 +class HTTPUnixConnection(HTTPConnection): 9.46 + def connect(self): 9.47 + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 9.48 + self.sock.connect(self.host) 9.49 + 9.50 +class HTTPUnix(HTTP): 9.51 + _connection_class = HTTPUnixConnection 9.52 + 9.53 +class UnixTransport(xmlrpclib.Transport): 9.54 + def request(self, host, handler, request_body, verbose=0): 9.55 + self.__handler = handler 9.56 + return xmlrpclib.Transport.request(self, host, '/RPC2', 9.57 + request_body, verbose) 9.58 + def make_connection(self, host): 9.59 + return HTTPUnix(self.__handler) 9.60 + 9.61 + 9.62 +# We need our own transport for HTTPS, because xmlrpclib.SafeTransport is 9.63 +# broken -- it does not handle ERROR_ZERO_RETURN properly. 9.64 +class HTTPSTransport(xmlrpclib.SafeTransport): 9.65 + def _parse_response(self, file, sock): 9.66 + p, u = self.getparser() 9.67 + while 1: 9.68 + try: 9.69 + if sock: 9.70 + response = sock.recv(1024) 9.71 + else: 9.72 + response = file.read(1024) 9.73 + except socket.sslerror, exn: 9.74 + if exn[0] == socket.SSL_ERROR_ZERO_RETURN: 9.75 + break 9.76 + raise 9.77 + 9.78 + if not response: 9.79 + break 9.80 + if self.verbose: 9.81 + print 'body:', repr(response) 9.82 + p.feed(response) 9.83 + 9.84 + file.close() 9.85 + p.close() 9.86 + return u.close() 9.87 + 9.88 + 9.89 +# See xmlrpclib2.TCPXMLRPCServer._marshalled_dispatch. 9.90 +def conv_string(x): 9.91 + if isinstance(x, StringTypes): 9.92 + s = string.replace(x, "'", r"\047") 9.93 + exec "s = '" + s + "'" 9.94 + return s 9.95 + else: 9.96 + return x 9.97 + 9.98 + 9.99 +class ServerProxy(xmlrpclib.ServerProxy): 9.100 + def __init__(self, uri, transport=None, encoding=None, verbose=0, 9.101 + allow_none=1): 9.102 + if transport == None: 9.103 + (protocol, rest) = uri.split(':', 1) 9.104 + if protocol == 'httpu': 9.105 + uri = 'http:' + rest 9.106 + transport = UnixTransport() 9.107 + elif protocol == 'https': 9.108 + transport = HTTPSTransport() 9.109 + elif protocol == 'ssh': 9.110 + global ssh_enabled 9.111 + if ssh_enabled: 9.112 + (transport, uri) = SSHTransport.getHTTPURI(uri) 9.113 + else: 9.114 + raise ValueError( 9.115 + "SSH transport not supported on Python <2.4.") 9.116 + xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, 9.117 + verbose, allow_none) 9.118 + 9.119 + def __request(self, methodname, params): 9.120 + response = xmlrpclib.ServerProxy.__request(self, methodname, params) 9.121 + 9.122 + if isinstance(response, tuple): 9.123 + return tuple([conv_string(x) for x in response]) 9.124 + else: 9.125 + return conv_string(response)
10.1 --- a/tools/python/xen/util/xmlrpclib2.py Wed Mar 28 08:40:42 2007 +0000 10.2 +++ b/tools/python/xen/util/xmlrpclib2.py Wed Mar 28 08:44:08 2007 +0000 10.3 @@ -26,7 +26,6 @@ import fcntl 10.4 from types import * 10.5 10.6 10.7 -from httplib import HTTPConnection, HTTP 10.8 from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler 10.9 import SocketServer 10.10 import xmlrpclib, socket, os, stat 10.11 @@ -36,14 +35,6 @@ import mkdir 10.12 from xen.web import connection 10.13 from xen.xend.XendLogging import log 10.14 10.15 -try: 10.16 - import SSHTransport 10.17 - ssh_enabled = True 10.18 -except ImportError: 10.19 - # SSHTransport is disabled on Python <2.4, because it uses the subprocess 10.20 - # package. 10.21 - ssh_enabled = False 10.22 - 10.23 # 10.24 # Convert all integers to strings as described in the Xen API 10.25 # 10.26 @@ -64,13 +55,6 @@ def stringify(value): 10.27 return value 10.28 10.29 10.30 -# A new ServerProxy that also supports httpu urls. An http URL comes in the 10.31 -# form: 10.32 -# 10.33 -# httpu:///absolute/path/to/socket.sock 10.34 -# 10.35 -# It assumes that the RPC handler is /RPC2. This probably needs to be improved 10.36 - 10.37 # We're forced to subclass the RequestHandler class so that we can work around 10.38 # some bugs in Keep-Alive handling and also enabled it by default 10.39 class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler): 10.40 @@ -106,60 +90,6 @@ class XMLRPCRequestHandler(SimpleXMLRPCR 10.41 if self.close_connection == 1: 10.42 self.connection.shutdown(1) 10.43 10.44 -class HTTPUnixConnection(HTTPConnection): 10.45 - def connect(self): 10.46 - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 10.47 - self.sock.connect(self.host) 10.48 - 10.49 -class HTTPUnix(HTTP): 10.50 - _connection_class = HTTPUnixConnection 10.51 - 10.52 -class UnixTransport(xmlrpclib.Transport): 10.53 - def request(self, host, handler, request_body, verbose=0): 10.54 - self.__handler = handler 10.55 - return xmlrpclib.Transport.request(self, host, '/RPC2', 10.56 - request_body, verbose) 10.57 - def make_connection(self, host): 10.58 - return HTTPUnix(self.__handler) 10.59 - 10.60 - 10.61 -# See _marshalled_dispatch below. 10.62 -def conv_string(x): 10.63 - if isinstance(x, StringTypes): 10.64 - s = string.replace(x, "'", r"\047") 10.65 - exec "s = '" + s + "'" 10.66 - return s 10.67 - else: 10.68 - return x 10.69 - 10.70 - 10.71 -class ServerProxy(xmlrpclib.ServerProxy): 10.72 - def __init__(self, uri, transport=None, encoding=None, verbose=0, 10.73 - allow_none=1): 10.74 - if transport == None: 10.75 - (protocol, rest) = uri.split(':', 1) 10.76 - if protocol == 'httpu': 10.77 - uri = 'http:' + rest 10.78 - transport = UnixTransport() 10.79 - elif protocol == 'ssh': 10.80 - global ssh_enabled 10.81 - if ssh_enabled: 10.82 - (transport, uri) = SSHTransport.getHTTPURI(uri) 10.83 - else: 10.84 - raise ValueError( 10.85 - "SSH transport not supported on Python <2.4.") 10.86 - xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, 10.87 - verbose, allow_none) 10.88 - 10.89 - def __request(self, methodname, params): 10.90 - response = xmlrpclib.ServerProxy.__request(self, methodname, params) 10.91 - 10.92 - if isinstance(response, tuple): 10.93 - return tuple([conv_string(x) for x in response]) 10.94 - else: 10.95 - return conv_string(response) 10.96 - 10.97 - 10.98 # This is a base XML-RPC server for TCP. It sets allow_reuse_address to 10.99 # true, and has an improved marshaller that logs and serializes exceptions. 10.100
11.1 --- a/tools/python/xen/xend/XendAPI.py Wed Mar 28 08:40:42 2007 +0000 11.2 +++ b/tools/python/xen/xend/XendAPI.py Wed Mar 28 08:44:08 2007 +0000 11.3 @@ -982,10 +982,10 @@ class XendAPI(object): 11.4 # Xen API: Class PIF 11.5 # ---------------------------------------------------------------- 11.6 11.7 - PIF_attr_ro = ['metrics'] 11.8 + PIF_attr_ro = ['network', 11.9 + 'host', 11.10 + 'metrics'] 11.11 PIF_attr_rw = ['device', 11.12 - 'network', 11.13 - 'host', 11.14 'MAC', 11.15 'MTU', 11.16 'VLAN'] 11.17 @@ -1647,14 +1647,15 @@ class XendAPI(object): 11.18 11.19 def VM_send_sysrq(self, _, vm_ref, req): 11.20 xeninfo = XendDomain.instance().get_vm_by_uuid(vm_ref) 11.21 - if xeninfo.state != XEN_API_VM_POWER_STATE_RUNNING: 11.22 + if xeninfo.state == XEN_API_VM_POWER_STATE_RUNNING \ 11.23 + or xeninfo.state == XEN_API_VM_POWER_STATE_PAUSED: 11.24 + xeninfo.send_sysrq(req) 11.25 + return xen_api_success_void() 11.26 + else: 11.27 return xen_api_error( 11.28 ['VM_BAD_POWER_STATE', vm_ref, 11.29 XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING], 11.30 XendDomain.POWER_STATE_NAMES[xeninfo.state]]) 11.31 - xeninfo.send_sysrq(req) 11.32 - return xen_api_success_void() 11.33 - 11.34 11.35 def VM_send_trigger(self, _, vm_ref, trigger, vcpu): 11.36 xendom = XendDomain.instance() 11.37 @@ -1689,6 +1690,9 @@ class XendAPI(object): 11.38 VM_metrics_attr_rw = [] 11.39 VM_metrics_methods = [] 11.40 11.41 + def VIF_metrics_get_all(self, session): 11.42 + return self.VIF_get_all(session) 11.43 + 11.44 def _VM_metrics_get(self, _, ref): 11.45 return XendVMMetrics.get_by_uuid(ref) 11.46 11.47 @@ -1699,11 +1703,11 @@ class XendAPI(object): 11.48 # Xen API: Class VBD 11.49 # ---------------------------------------------------------------- 11.50 11.51 - VBD_attr_ro = ['metrics', 11.52 + VBD_attr_ro = ['VM', 11.53 + 'VDI', 11.54 + 'metrics', 11.55 'runtime_properties'] 11.56 - VBD_attr_rw = ['VM', 11.57 - 'VDI', 11.58 - 'device', 11.59 + VBD_attr_rw = ['device', 11.60 'bootable', 11.61 'mode', 11.62 'type'] 11.63 @@ -1852,7 +1856,10 @@ class XendAPI(object): 11.64 'io_write_kbs', 11.65 'last_updated'] 11.66 VBD_metrics_attr_rw = [] 11.67 - VBD_methods = [] 11.68 + VBD_metrics_methods = [] 11.69 + 11.70 + def VBD_metrics_get_all(self, session): 11.71 + return self.VBD_get_all(session) 11.72 11.73 def VBD_metrics_get_record(self, _, ref): 11.74 vm = XendDomain.instance().get_vm_with_dev_uuid('vbd', ref) 11.75 @@ -1877,11 +1884,11 @@ class XendAPI(object): 11.76 # Xen API: Class VIF 11.77 # ---------------------------------------------------------------- 11.78 11.79 - VIF_attr_ro = ['metrics', 11.80 + VIF_attr_ro = ['network', 11.81 + 'VM', 11.82 + 'metrics', 11.83 'runtime_properties'] 11.84 VIF_attr_rw = ['device', 11.85 - 'network', 11.86 - 'VM', 11.87 'MAC', 11.88 'MTU'] 11.89 11.90 @@ -1945,10 +1952,10 @@ class XendAPI(object): 11.91 return xen_api_success(vif_ref) 11.92 11.93 def VIF_get_VM(self, session, vif_ref): 11.94 - xendom = XendDomain.instance() 11.95 - vm = xendom.get_vm_with_dev_uuid('vif', vif_ref) 11.96 + xendom = XendDomain.instance() 11.97 + vm = xendom.get_vm_with_dev_uuid('vif', vif_ref) 11.98 return xen_api_success(vm.get_uuid()) 11.99 - 11.100 + 11.101 def VIF_get_MTU(self, session, vif_ref): 11.102 return self._VIF_get(vif_ref, 'MTU') 11.103 11.104 @@ -1993,7 +2000,7 @@ class XendAPI(object): 11.105 'io_write_kbs', 11.106 'last_updated'] 11.107 VIF_metrics_attr_rw = [] 11.108 - VIF_methods = [] 11.109 + VIF_metrics_methods = [] 11.110 11.111 def VIF_metrics_get_record(self, _, ref): 11.112 vm = XendDomain.instance().get_vm_with_dev_uuid('vif', ref)
12.1 --- a/tools/python/xen/xend/XendClient.py Wed Mar 28 08:40:42 2007 +0000 12.2 +++ b/tools/python/xen/xend/XendClient.py Wed Mar 28 08:44:08 2007 +0000 12.3 @@ -17,7 +17,7 @@ 12.4 # Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com> 12.5 #============================================================================ 12.6 12.7 -from xen.util.xmlrpclib2 import ServerProxy 12.8 +from xen.util.xmlrpcclient import ServerProxy 12.9 import os 12.10 import sys 12.11
13.1 --- a/tools/python/xen/xend/XendConfig.py Wed Mar 28 08:40:42 2007 +0000 13.2 +++ b/tools/python/xen/xend/XendConfig.py Wed Mar 28 08:44:08 2007 +0000 13.3 @@ -298,7 +298,7 @@ class XendConfig(dict): 13.4 'actions_after_reboot': 'restart', 13.5 'actions_after_crash': 'restart', 13.6 'actions_after_suspend': '', 13.7 - 'is_template': False, 13.8 + 'is_a_template': False, 13.9 'is_control_domain': False, 13.10 'features': '', 13.11 'PV_bootloader': '', 13.12 @@ -452,7 +452,10 @@ class XendConfig(dict): 13.13 for key, typ in XENAPI_CFG_TYPES.items(): 13.14 val = sxp.child_value(sxp_cfg, key) 13.15 if val is not None: 13.16 - cfg[key] = typ(val) 13.17 + try: 13.18 + cfg[key] = typ(val) 13.19 + except (ValueError, TypeError), e: 13.20 + log.warn('Unable to convert type value for key: %s' % key) 13.21 13.22 # Convert deprecated options to current equivalents. 13.23 13.24 @@ -845,6 +848,8 @@ class XendConfig(dict): 13.25 sxpr.append([name, s]) 13.26 13.27 for xenapi, legacy in XENAPI_CFG_TO_LEGACY_CFG.items(): 13.28 + if legacy in ('cpus'): # skip this 13.29 + continue 13.30 if self.has_key(xenapi) and self[xenapi] not in (None, []): 13.31 if type(self[xenapi]) == bool: 13.32 # convert booleans to ints before making an sxp item 13.33 @@ -858,7 +863,7 @@ class XendConfig(dict): 13.34 sxpr.append(["memory", int(self["memory_dynamic_max"])/MiB]) 13.35 13.36 for legacy in LEGACY_UNSUPPORTED_BY_XENAPI_CFG: 13.37 - if legacy in ('domid', 'uuid'): # skip these 13.38 + if legacy in ('domid', 'uuid', 'cpus'): # skip these 13.39 continue 13.40 if self.has_key(legacy) and self[legacy] not in (None, []): 13.41 sxpr.append([legacy, self[legacy]])
14.1 --- a/tools/python/xen/xend/XendDomain.py Wed Mar 28 08:40:42 2007 +0000 14.2 +++ b/tools/python/xen/xend/XendDomain.py Wed Mar 28 08:44:08 2007 +0000 14.3 @@ -569,6 +569,26 @@ class XendDomain: 14.4 finally: 14.5 self.domains_lock.release() 14.6 14.7 + def autostart_domains(self): 14.8 + """ Autostart managed domains that are marked as such. """ 14.9 + 14.10 + need_starting = [] 14.11 + 14.12 + self.domains_lock.acquire() 14.13 + try: 14.14 + for dom_uuid, dom in self.managed_domains.items(): 14.15 + if dom and dom.state == DOM_STATE_HALTED: 14.16 + on_xend_start = dom.info.get('on_xend_start', 'ignore') 14.17 + auto_power_on = dom.info.get('auto_power_on', False) 14.18 + should_start = (on_xend_start == 'start') or auto_power_on 14.19 + if should_start: 14.20 + need_starting.append(dom_uuid) 14.21 + finally: 14.22 + self.domains_lock.release() 14.23 + 14.24 + for dom_uuid in need_starting: 14.25 + self.domain_start(dom_uuid, False) 14.26 + 14.27 def cleanup_domains(self): 14.28 """Clean up domains that are marked as autostop. 14.29 Should be called when Xend goes down. This is currently
15.1 --- a/tools/python/xen/xend/XendDomainInfo.py Wed Mar 28 08:40:42 2007 +0000 15.2 +++ b/tools/python/xen/xend/XendDomainInfo.py Wed Mar 28 08:44:08 2007 +0000 15.3 @@ -152,8 +152,9 @@ def recreate(info, priv): 15.4 try: 15.5 vmpath = xstransact.Read(dompath, "vm") 15.6 if not vmpath: 15.7 - log.warn('/local/domain/%d/vm is missing. recreate is ' 15.8 - 'confused, trying our best to recover' % domid) 15.9 + if not priv: 15.10 + log.warn('/local/domain/%d/vm is missing. recreate is ' 15.11 + 'confused, trying our best to recover' % domid) 15.12 needs_reinitialising = True 15.13 raise XendError('reinit') 15.14
16.1 --- a/tools/python/xen/xend/XendLogging.py Wed Mar 28 08:40:42 2007 +0000 16.2 +++ b/tools/python/xen/xend/XendLogging.py Wed Mar 28 08:44:08 2007 +0000 16.3 @@ -62,6 +62,7 @@ if 'TRACE' not in logging.__dict__: 16.4 # Work around a bug in Python's inspect module: findsource is supposed to 16.5 # raise IOError if it fails, with other functions in that module coping 16.6 # with that, but some people are seeing IndexError raised from there. 16.7 + # This is Python bug 1628987. http://python.org/sf/1628987. 16.8 if hasattr(inspect, 'findsource'): 16.9 real_findsource = getattr(inspect, 'findsource') 16.10 def findsource(*args, **kwargs):
17.1 --- a/tools/python/xen/xend/XendMonitor.py Wed Mar 28 08:40:42 2007 +0000 17.2 +++ b/tools/python/xen/xend/XendMonitor.py Wed Mar 28 08:44:08 2007 +0000 17.3 @@ -24,8 +24,8 @@ import re 17.4 """Monitoring thread to keep track of Xend statistics. """ 17.5 17.6 VBD_SYSFS_PATH = '/sys/devices/xen-backend/' 17.7 -VBD_WR_PATH = VBD_SYSFS_PATH + '%s/statistics/wr_req' 17.8 -VBD_RD_PATH = VBD_SYSFS_PATH + '%s/statistics/rd_req' 17.9 +VBD_WR_PATH = VBD_SYSFS_PATH + '%s/statistics/wr_sect' 17.10 +VBD_RD_PATH = VBD_SYSFS_PATH + '%s/statistics/rd_sect' 17.11 VBD_DOMAIN_RE = r'vbd-(?P<domid>\d+)-(?P<devid>\d+)$' 17.12 17.13 NET_PROCFS_PATH = '/proc/net/dev' 17.14 @@ -51,14 +51,9 @@ VIF_DOMAIN_RE = re.compile(r'vif(?P<domi 17.15 PROC_NET_DEV_RE) 17.16 PIF_RE = re.compile(r'^\s*(?P<iface>peth\d+):\s*' + PROC_NET_DEV_RE) 17.17 17.18 -# The VBD transfer figures are in "requests" where we don't 17.19 -# really know how many bytes per requests. For now we make 17.20 -# up a number roughly could be. 17.21 -VBD_ROUGH_BYTES_PER_REQUEST = 1024 * 8 * 4 17.22 - 17.23 # Interval to poll xc, sysfs and proc 17.24 POLL_INTERVAL = 2.0 17.25 - 17.26 +SECTOR_SIZE = 512 17.27 class XendMonitor(threading.Thread): 17.28 """Monitors VCPU, VBD, VIF and PIF statistics for Xen API. 17.29 17.30 @@ -186,9 +181,8 @@ class XendMonitor(threading.Thread): 17.31 usage_at = time.time() 17.32 rd_stat = int(open(rd_stat_path).readline().strip()) 17.33 wr_stat = int(open(wr_stat_path).readline().strip()) 17.34 - rd_stat *= VBD_ROUGH_BYTES_PER_REQUEST 17.35 - wr_stat *= VBD_ROUGH_BYTES_PER_REQUEST 17.36 - 17.37 + rd_stat *= SECTOR_SIZE 17.38 + wr_stat *= SECTOR_SIZE 17.39 if domid not in stats: 17.40 stats[domid] = {} 17.41
18.1 --- a/tools/python/xen/xend/XendOptions.py Wed Mar 28 08:40:42 2007 +0000 18.2 +++ b/tools/python/xen/xend/XendOptions.py Wed Mar 28 08:44:08 2007 +0000 18.3 @@ -165,7 +165,13 @@ class XendOptions: 18.4 18.5 def get_xend_tcp_xmlrpc_server_address(self): 18.6 return self.get_config_string("xend-tcp-xmlrpc-server-address", 18.7 - self.xend_tcp_xmlrpc_server_address_default) 18.8 + self.xend_tcp_xmlrpc_server_address_default) 18.9 + 18.10 + def get_xend_tcp_xmlrpc_server_ssl_key_file(self): 18.11 + return self.get_config_string("xend-tcp-xmlrpc-server-ssl-key-file") 18.12 + 18.13 + def get_xend_tcp_xmlrpc_server_ssl_cert_file(self): 18.14 + return self.get_config_string("xend-tcp-xmlrpc-server-ssl-cert-file") 18.15 18.16 def get_xend_unix_xmlrpc_server(self): 18.17 return self.get_config_bool("xend-unix-xmlrpc-server",
19.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 19.2 +++ b/tools/python/xen/xend/server/SSLXMLRPCServer.py Wed Mar 28 08:44:08 2007 +0000 19.3 @@ -0,0 +1,103 @@ 19.4 +#============================================================================ 19.5 +# This library is free software; you can redistribute it and/or 19.6 +# modify it under the terms of version 2.1 of the GNU Lesser General Public 19.7 +# License as published by the Free Software Foundation. 19.8 +# 19.9 +# This library is distributed in the hope that it will be useful, 19.10 +# but WITHOUT ANY WARRANTY; without even the implied warranty of 19.11 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19.12 +# Lesser General Public License for more details. 19.13 +# 19.14 +# You should have received a copy of the GNU Lesser General Public 19.15 +# License along with this library; if not, write to the Free Software 19.16 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19.17 +#============================================================================ 19.18 +# Copyright (C) 2007 XenSource Inc. 19.19 +#============================================================================ 19.20 + 19.21 + 19.22 +""" 19.23 +HTTPS wrapper for an XML-RPC server interface. Requires PyOpenSSL (Debian 19.24 +package python-pyopenssl). 19.25 +""" 19.26 + 19.27 +import socket 19.28 + 19.29 +from OpenSSL import SSL 19.30 + 19.31 +from xen.util.xmlrpclib2 import XMLRPCRequestHandler, TCPXMLRPCServer 19.32 + 19.33 + 19.34 +class SSLXMLRPCRequestHandler(XMLRPCRequestHandler): 19.35 + def setup(self): 19.36 + self.connection = self.request 19.37 + self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) 19.38 + self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) 19.39 + 19.40 +# 19.41 +# Taken from pyOpenSSL-0.6 examples (public-domain) 19.42 +# 19.43 + 19.44 +class SSLWrapper: 19.45 + """ 19.46 + """ 19.47 + def __init__(self, conn): 19.48 + """ 19.49 + Connection is not yet a new-style class, 19.50 + so I'm making a proxy instead of subclassing. 19.51 + """ 19.52 + self.__dict__["conn"] = conn 19.53 + def __getattr__(self, name): 19.54 + return getattr(self.__dict__["conn"], name) 19.55 + def __setattr__(self, name, value): 19.56 + setattr(self.__dict__["conn"], name, value) 19.57 + 19.58 + def close(self): 19.59 + self.shutdown() 19.60 + return self.__dict__["conn"].close() 19.61 + 19.62 + def shutdown(self, how=1): 19.63 + """ 19.64 + SimpleXMLRpcServer.doPOST calls shutdown(1), 19.65 + and Connection.shutdown() doesn't take 19.66 + an argument. So we just discard the argument. 19.67 + """ 19.68 + # Block until the shutdown is complete 19.69 + self.__dict__["conn"].shutdown() 19.70 + self.__dict__["conn"].shutdown() 19.71 + 19.72 + def accept(self): 19.73 + """ 19.74 + This is the other part of the shutdown() workaround. 19.75 + Since servers create new sockets, we have to infect 19.76 + them with our magic. :) 19.77 + """ 19.78 + c, a = self.__dict__["conn"].accept() 19.79 + return (SSLWrapper(c), a) 19.80 + 19.81 +# 19.82 +# End of pyOpenSSL-0.6 example code. 19.83 +# 19.84 + 19.85 +class SSLXMLRPCServer(TCPXMLRPCServer): 19.86 + def __init__(self, addr, allowed, xenapi, logRequests = 1, 19.87 + ssl_key_file = None, ssl_cert_file = None): 19.88 + 19.89 + TCPXMLRPCServer.__init__(self, addr, allowed, xenapi, 19.90 + SSLXMLRPCRequestHandler, logRequests) 19.91 + 19.92 + if not ssl_key_file or not ssl_cert_file: 19.93 + raise ValueError("SSLXMLRPCServer requires ssl_key_file " 19.94 + "and ssl_cert_file to be set.") 19.95 + 19.96 + # make a SSL socket 19.97 + ctx = SSL.Context(SSL.SSLv23_METHOD) 19.98 + ctx.set_options(SSL.OP_NO_SSLv2) 19.99 + ctx.use_privatekey_file (ssl_key_file) 19.100 + ctx.use_certificate_file(ssl_cert_file) 19.101 + self.socket = SSLWrapper(SSL.Connection(ctx, 19.102 + socket.socket(self.address_family, 19.103 + self.socket_type))) 19.104 + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 19.105 + self.server_bind() 19.106 + self.server_activate()
20.1 --- a/tools/python/xen/xend/server/SrvServer.py Wed Mar 28 08:40:42 2007 +0000 20.2 +++ b/tools/python/xen/xend/server/SrvServer.py Wed Mar 28 08:44:08 2007 +0000 20.3 @@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio 20.4 from xen.xend import Vifctl 20.5 from xen.xend.XendLogging import log 20.6 from xen.xend.XendClient import XEN_API_SOCKET 20.7 +from xen.xend.XendDomain import instance as xenddomain 20.8 from xen.web.SrvDir import SrvDir 20.9 20.10 from SrvRoot import SrvRoot 20.11 @@ -72,7 +73,7 @@ class XendServers: 20.12 def add(self, server): 20.13 self.servers.append(server) 20.14 20.15 - def cleanup(self, signum = 0, frame = None): 20.16 + def cleanup(self, signum = 0, frame = None, reloading = False): 20.17 log.debug("SrvServer.cleanup()") 20.18 self.cleaningUp = True 20.19 for server in self.servers: 20.20 @@ -80,12 +81,18 @@ class XendServers: 20.21 server.shutdown() 20.22 except: 20.23 pass 20.24 + 20.25 + # clean up domains for those that have on_xend_stop 20.26 + if not reloading: 20.27 + xenddomain().cleanup_domains() 20.28 + 20.29 self.running = False 20.30 + 20.31 20.32 def reloadConfig(self, signum = 0, frame = None): 20.33 log.debug("SrvServer.reloadConfig()") 20.34 self.reloadingConfig = True 20.35 - self.cleanup(signum, frame) 20.36 + self.cleanup(signum, frame, reloading = True) 20.37 20.38 def start(self, status): 20.39 # Running the network script will spawn another process, which takes 20.40 @@ -144,6 +151,12 @@ class XendServers: 20.41 status.close() 20.42 status = None 20.43 20.44 + # Reaching this point means we can auto start domains 20.45 + try: 20.46 + xenddomain().autostart_domains() 20.47 + except Exception, e: 20.48 + log.exception("Failed while autostarting domains") 20.49 + 20.50 # loop to keep main thread alive until it receives a SIGTERM 20.51 self.running = True 20.52 while self.running: 20.53 @@ -172,33 +185,49 @@ def _loadConfig(servers, root, reload): 20.54 api_cfg = xoptions.get_xen_api_server() 20.55 if api_cfg: 20.56 try: 20.57 - addrs = [(str(x[0]).split(':'), 20.58 - len(x) > 1 and x[1] or XendAPI.AUTH_PAM, 20.59 - len(x) > 2 and x[2] and map(re.compile, x[2].split(" ")) 20.60 - or None) 20.61 - for x in api_cfg] 20.62 - for addrport, auth, allowed in addrs: 20.63 - if auth not in [XendAPI.AUTH_PAM, XendAPI.AUTH_NONE]: 20.64 - log.error('Xen-API server configuration %s is invalid, ' + 20.65 - 'as %s is not a valid authentication type.', 20.66 - api_cfg, auth) 20.67 - break 20.68 + for server_cfg in api_cfg: 20.69 + # Parse the xen-api-server config 20.70 + 20.71 + host = 'localhost' 20.72 + port = 0 20.73 + use_tcp = False 20.74 + ssl_key_file = None 20.75 + ssl_cert_file = None 20.76 + auth_method = XendAPI.AUTH_NONE 20.77 + hosts_allowed = None 20.78 + 20.79 + host_addr = server_cfg[0].split(':', 1) 20.80 + if len(host_addr) == 1 and host_addr[0].lower() == 'unix': 20.81 + use_tcp = False 20.82 + elif len(host_addr) == 1: 20.83 + use_tcp = True 20.84 + port = int(host_addr[0]) 20.85 + elif len(host_addr) == 2: 20.86 + use_tcp = True 20.87 + host = str(host_addr[0]) 20.88 + port = int(host_addr[1]) 20.89 20.90 - if len(addrport) == 1: 20.91 - if addrport[0] == 'unix': 20.92 - servers.add(XMLRPCServer(auth, True, 20.93 - path = XEN_API_SOCKET, 20.94 - hosts_allowed = allowed)) 20.95 - else: 20.96 - servers.add( 20.97 - XMLRPCServer(auth, True, True, '', 20.98 - int(addrport[0]), 20.99 - hosts_allowed = allowed)) 20.100 - else: 20.101 - addr, port = addrport 20.102 - servers.add(XMLRPCServer(auth, True, True, addr, 20.103 - int(port), 20.104 - hosts_allowed = allowed)) 20.105 + if len(server_cfg) > 1: 20.106 + if server_cfg[1] in [XendAPI.AUTH_PAM, XendAPI.AUTH_NONE]: 20.107 + auth_method = server_cfg[1] 20.108 + 20.109 + if len(server_cfg) > 2: 20.110 + hosts_allowed = server_cfg[2] or None 20.111 + 20.112 + 20.113 + if len(server_cfg) > 4: 20.114 + # SSL key and cert file 20.115 + ssl_key_file = server_cfg[3] 20.116 + ssl_cert_file = server_cfg[4] 20.117 + 20.118 + 20.119 + servers.add(XMLRPCServer(auth_method, True, use_tcp = use_tcp, 20.120 + ssl_key_file = ssl_key_file, 20.121 + ssl_cert_file = ssl_cert_file, 20.122 + host = host, port = port, 20.123 + path = XEN_API_SOCKET, 20.124 + hosts_allowed = hosts_allowed)) 20.125 + 20.126 except (ValueError, TypeError), exn: 20.127 log.exception('Xen API Server init failed') 20.128 log.error('Xen-API server configuration %s is invalid.', api_cfg) 20.129 @@ -206,8 +235,17 @@ def _loadConfig(servers, root, reload): 20.130 if xoptions.get_xend_tcp_xmlrpc_server(): 20.131 addr = xoptions.get_xend_tcp_xmlrpc_server_address() 20.132 port = xoptions.get_xend_tcp_xmlrpc_server_port() 20.133 - servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True, 20.134 - host = addr, port = port)) 20.135 + ssl_key_file = xoptions.get_xend_tcp_xmlrpc_server_ssl_key_file() 20.136 + ssl_cert_file = xoptions.get_xend_tcp_xmlrpc_server_ssl_cert_file() 20.137 + 20.138 + if ssl_key_file and ssl_cert_file: 20.139 + servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True, 20.140 + ssl_key_file = ssl_key_file, 20.141 + ssl_cert_file = ssl_cert_file, 20.142 + host = addr, port = port)) 20.143 + else: 20.144 + servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True, 20.145 + host = addr, port = port)) 20.146 20.147 if xoptions.get_xend_unix_xmlrpc_server(): 20.148 servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False))
21.1 --- a/tools/python/xen/xend/server/XMLRPCServer.py Wed Mar 28 08:40:42 2007 +0000 21.2 +++ b/tools/python/xen/xend/server/XMLRPCServer.py Wed Mar 28 08:44:08 2007 +0000 21.3 @@ -21,6 +21,11 @@ import socket 21.4 import types 21.5 import xmlrpclib 21.6 from xen.util.xmlrpclib2 import UnixXMLRPCServer, TCPXMLRPCServer 21.7 +try: 21.8 + from SSLXMLRPCServer import SSLXMLRPCServer 21.9 + ssl_enabled = True 21.10 +except ImportError: 21.11 + ssl_enabled = False 21.12 21.13 from xen.xend import XendAPI, XendDomain, XendDomainInfo, XendNode 21.14 from xen.xend import XendLogging, XendDmesg 21.15 @@ -87,14 +92,20 @@ methods = ['device_create', 'device_conf 21.16 exclude = ['domain_create', 'domain_restore'] 21.17 21.18 class XMLRPCServer: 21.19 - def __init__(self, auth, use_xenapi, use_tcp=False, host = "localhost", 21.20 - port = 8006, path = XML_RPC_SOCKET, hosts_allowed = None): 21.21 + def __init__(self, auth, use_xenapi, use_tcp = False, 21.22 + ssl_key_file = None, ssl_cert_file = None, 21.23 + host = "localhost", port = 8006, path = XML_RPC_SOCKET, 21.24 + hosts_allowed = None): 21.25 + 21.26 self.use_tcp = use_tcp 21.27 self.port = port 21.28 self.host = host 21.29 self.path = path 21.30 self.hosts_allowed = hosts_allowed 21.31 21.32 + self.ssl_key_file = ssl_key_file 21.33 + self.ssl_cert_file = ssl_cert_file 21.34 + 21.35 self.ready = False 21.36 self.running = True 21.37 self.auth = auth 21.38 @@ -107,14 +118,33 @@ class XMLRPCServer: 21.39 21.40 try: 21.41 if self.use_tcp: 21.42 - log.info("Opening TCP XML-RPC server on %s%d%s", 21.43 + using_ssl = self.ssl_key_file and self.ssl_cert_file 21.44 + 21.45 + log.info("Opening %s XML-RPC server on %s%d%s", 21.46 + using_ssl and 'HTTPS' or 'TCP', 21.47 self.host and '%s:' % self.host or 21.48 'all interfaces, port ', 21.49 self.port, authmsg) 21.50 - self.server = TCPXMLRPCServer((self.host, self.port), 21.51 - self.hosts_allowed, 21.52 - self.xenapi is not None, 21.53 - logRequests = False) 21.54 + 21.55 + if not ssl_enabled: 21.56 + raise ValueError("pyOpenSSL not installed. " 21.57 + "Unable to start HTTPS XML-RPC server") 21.58 + 21.59 + if using_ssl: 21.60 + self.server = SSLXMLRPCServer( 21.61 + (self.host, self.port), 21.62 + self.hosts_allowed, 21.63 + self.xenapi is not None, 21.64 + logRequests = False, 21.65 + ssl_key_file = self.ssl_key_file, 21.66 + ssl_cert_file = self.ssl_cert_file) 21.67 + else: 21.68 + self.server = TCPXMLRPCServer( 21.69 + (self.host, self.port), 21.70 + self.hosts_allowed, 21.71 + self.xenapi is not None, 21.72 + logRequests = False) 21.73 + 21.74 else: 21.75 log.info("Opening Unix domain socket XML-RPC server on %s%s", 21.76 self.path, authmsg) 21.77 @@ -126,7 +156,12 @@ class XMLRPCServer: 21.78 ready = True 21.79 running = False 21.80 return 21.81 - 21.82 + except Exception, e: 21.83 + log.exception('Cannot start server: %s!', e) 21.84 + ready = True 21.85 + running = False 21.86 + return 21.87 + 21.88 # Register Xen API Functions 21.89 # ------------------------------------------------------------------- 21.90 # exportable functions are ones that do not begin with '_'
22.1 --- a/tools/python/xen/xm/XenAPI.py Wed Mar 28 08:40:42 2007 +0000 22.2 +++ b/tools/python/xen/xm/XenAPI.py Wed Mar 28 08:44:08 2007 +0000 22.3 @@ -12,7 +12,7 @@ 22.4 # License along with this library; if not, write to the Free Software 22.5 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22.6 #============================================================================ 22.7 -# Copyright (C) 2006 XenSource Inc. 22.8 +# Copyright (C) 2006-2007 XenSource Inc. 22.9 #============================================================================ 22.10 # 22.11 # Parts of this file are based upon xmlrpclib.py, the XML-RPC client 22.12 @@ -47,7 +47,7 @@ 22.13 import gettext 22.14 import xmlrpclib 22.15 22.16 -import xen.util.xmlrpclib2 22.17 +import xen.util.xmlrpcclient as xmlrpcclient 22.18 22.19 22.20 translation = gettext.translation('xen-xm', fallback = True) 22.21 @@ -85,7 +85,7 @@ class Failure(Exception): 22.22 _RECONNECT_AND_RETRY = (lambda _ : ()) 22.23 22.24 22.25 -class Session(xen.util.xmlrpclib2.ServerProxy): 22.26 +class Session(xmlrpcclient.ServerProxy): 22.27 """A server proxy and session manager for communicating with Xend using 22.28 the Xen-API. 22.29 22.30 @@ -104,9 +104,8 @@ class Session(xen.util.xmlrpclib2.Server 22.31 22.32 def __init__(self, uri, transport=None, encoding=None, verbose=0, 22.33 allow_none=1): 22.34 - xen.util.xmlrpclib2.ServerProxy.__init__(self, uri, transport, 22.35 - encoding, verbose, 22.36 - allow_none) 22.37 + xmlrpcclient.ServerProxy.__init__(self, uri, transport, encoding, 22.38 + verbose, allow_none) 22.39 self._session = None 22.40 self.last_login_method = None 22.41 self.last_login_params = None 22.42 @@ -153,7 +152,7 @@ class Session(xen.util.xmlrpclib2.Server 22.43 elif name.startswith('login'): 22.44 return lambda *params: self._login(name, params) 22.45 else: 22.46 - return xen.util.xmlrpclib2.ServerProxy.__getattr__(self, name) 22.47 + return xmlrpcclient.ServerProxy.__getattr__(self, name) 22.48 22.49 22.50 def _parse_result(result):
23.1 --- a/tools/python/xen/xm/create.py Wed Mar 28 08:40:42 2007 +0000 23.2 +++ b/tools/python/xen/xm/create.py Wed Mar 28 08:44:08 2007 +0000 23.3 @@ -107,7 +107,7 @@ gopts.opt('xmldryrun', short='x', 23.4 gopts.opt('skipdtd', short='s', 23.5 fn=set_true, default=0, 23.6 use="Skip DTD checking - skips checks on XML before creating. " 23.7 - " Experimental. Can decreate create time." ) 23.8 + " Experimental. Can decrease create time." ) 23.9 23.10 gopts.opt('paused', short='p', 23.11 fn=set_true, default=0,
24.1 --- a/tools/python/xen/xm/main.py Wed Mar 28 08:40:42 2007 +0000 24.2 +++ b/tools/python/xen/xm/main.py Wed Mar 28 08:44:08 2007 +0000 24.3 @@ -49,7 +49,7 @@ from xen.xend.XendConstants import * 24.4 24.5 from xen.xm.opts import OptionError, Opts, wrap, set_true 24.6 from xen.xm import console 24.7 -from xen.util.xmlrpclib2 import ServerProxy 24.8 +from xen.util.xmlrpcclient import ServerProxy 24.9 24.10 import XenAPI 24.11
25.1 --- a/xen/acm/acm_policy.c Wed Mar 28 08:40:42 2007 +0000 25.2 +++ b/xen/acm/acm_policy.c Wed Mar 28 08:44:08 2007 +0000 25.3 @@ -62,6 +62,7 @@ int 25.4 do_acm_set_policy(void *buf, u32 buf_size) 25.5 { 25.6 struct acm_policy_buffer *pol = (struct acm_policy_buffer *)buf; 25.7 + uint32_t offset, length; 25.8 /* some sanity checking */ 25.9 if ((be32_to_cpu(pol->magic) != ACM_MAGIC) || 25.10 (buf_size != be32_to_cpu(pol->len)) || 25.11 @@ -92,22 +93,27 @@ do_acm_set_policy(void *buf, u32 buf_siz 25.12 /* get bin_policy lock and rewrite policy (release old one) */ 25.13 write_lock(&acm_bin_pol_rwlock); 25.14 25.15 + offset = be32_to_cpu(pol->policy_reference_offset); 25.16 + length = be32_to_cpu(pol->primary_buffer_offset) - offset; 25.17 + 25.18 /* set label reference name */ 25.19 - if (acm_set_policy_reference(buf + be32_to_cpu(pol->policy_reference_offset), 25.20 - be32_to_cpu(pol->primary_buffer_offset) - 25.21 - be32_to_cpu(pol->policy_reference_offset))) 25.22 + if ( (offset + length) > buf_size || 25.23 + acm_set_policy_reference(buf + offset, length)) 25.24 goto error_lock_free; 25.25 25.26 /* set primary policy data */ 25.27 - if (acm_primary_ops->set_binary_policy(buf + be32_to_cpu(pol->primary_buffer_offset), 25.28 - be32_to_cpu(pol->secondary_buffer_offset) - 25.29 - be32_to_cpu(pol->primary_buffer_offset))) 25.30 + offset = be32_to_cpu(pol->primary_buffer_offset); 25.31 + length = be32_to_cpu(pol->secondary_buffer_offset) - offset; 25.32 + 25.33 + if ( (offset + length) > buf_size || 25.34 + acm_primary_ops->set_binary_policy(buf + offset, length)) 25.35 goto error_lock_free; 25.36 25.37 /* set secondary policy data */ 25.38 - if (acm_secondary_ops->set_binary_policy(buf + be32_to_cpu(pol->secondary_buffer_offset), 25.39 - be32_to_cpu(pol->len) - 25.40 - be32_to_cpu(pol->secondary_buffer_offset))) 25.41 + offset = be32_to_cpu(pol->secondary_buffer_offset); 25.42 + length = be32_to_cpu(pol->len) - offset; 25.43 + if ( (offset + length) > buf_size || 25.44 + acm_secondary_ops->set_binary_policy(buf + offset, length)) 25.45 goto error_lock_free; 25.46 25.47 write_unlock(&acm_bin_pol_rwlock);
26.1 --- a/xen/arch/ia64/asm-offsets.c Wed Mar 28 08:40:42 2007 +0000 26.2 +++ b/xen/arch/ia64/asm-offsets.c Wed Mar 28 08:44:08 2007 +0000 26.3 @@ -223,10 +223,11 @@ void foo(void) 26.4 26.5 #ifdef PERF_COUNTERS 26.6 BLANK(); 26.7 - DEFINE(RECOVER_TO_PAGE_FAULT_PERFC_OFS, offsetof (struct perfcounter, recover_to_page_fault)); 26.8 - DEFINE(RECOVER_TO_BREAK_FAULT_PERFC_OFS, offsetof (struct perfcounter, recover_to_break_fault)); 26.9 - DEFINE(FAST_HYPERPRIVOP_PERFC_OFS, offsetof (struct perfcounter, fast_hyperprivop)); 26.10 - DEFINE(FAST_REFLECT_PERFC_OFS, offsetof (struct perfcounter, fast_reflect)); 26.11 + DEFINE(IA64_PERFC_recover_to_page_fault, PERFC_recover_to_page_fault); 26.12 + DEFINE(IA64_PERFC_recover_to_break_fault, PERFC_recover_to_break_fault); 26.13 + DEFINE(IA64_PERFC_fast_vhpt_translate, PERFC_fast_vhpt_translate); 26.14 + DEFINE(IA64_PERFC_fast_hyperprivop, PERFC_fast_hyperprivop); 26.15 + DEFINE(IA64_PERFC_fast_reflect, PERFC_fast_reflect); 26.16 #endif 26.17 26.18 BLANK();
27.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c Wed Mar 28 08:40:42 2007 +0000 27.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c Wed Mar 28 08:44:08 2007 +0000 27.3 @@ -113,7 +113,7 @@ ia64_handle_irq (ia64_vector vector, str 27.4 unsigned long saved_tpr; 27.5 27.6 #ifdef XEN 27.7 - perfc_incrc(irqs); 27.8 + perfc_incr(irqs); 27.9 #endif 27.10 #if IRQ_DEBUG 27.11 #ifdef XEN
28.1 --- a/xen/arch/ia64/linux-xen/mca.c Wed Mar 28 08:40:42 2007 +0000 28.2 +++ b/xen/arch/ia64/linux-xen/mca.c Wed Mar 28 08:44:08 2007 +0000 28.3 @@ -397,16 +397,6 @@ ia64_log_queue(int sal_info_type, int vi 28.4 28.5 #ifdef XEN 28.6 /** 28.7 - * Copy from linux/include/asm-generic/bug.h 28.8 - */ 28.9 -#define WARN_ON(condition) do { \ 28.10 - if (unlikely((condition)!=0)) { \ 28.11 - printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ 28.12 - dump_stack(); \ 28.13 - } \ 28.14 -} while (0) 28.15 - 28.16 -/** 28.17 * Copy from linux/kernel/irq/manage.c 28.18 * 28.19 * disable_irq_nosync - disable an irq without waiting
29.1 --- a/xen/arch/ia64/linux-xen/smp.c Wed Mar 28 08:40:42 2007 +0000 29.2 +++ b/xen/arch/ia64/linux-xen/smp.c Wed Mar 28 08:44:08 2007 +0000 29.3 @@ -148,7 +148,7 @@ handle_IPI (int irq, void *dev_id, struc 29.4 unsigned long ops; 29.5 29.6 #ifdef XEN 29.7 - perfc_incrc(ipis); 29.8 + perfc_incr(ipis); 29.9 #endif 29.10 mb(); /* Order interrupt and bit testing. */ 29.11 while ((ops = xchg(pending_ipis, 0)) != 0) {
30.1 --- a/xen/arch/ia64/vmx/pal_emul.c Wed Mar 28 08:40:42 2007 +0000 30.2 +++ b/xen/arch/ia64/vmx/pal_emul.c Wed Mar 28 08:44:08 2007 +0000 30.3 @@ -37,7 +37,7 @@ pal_emul(struct vcpu *vcpu) 30.4 vcpu_get_gr_nat(vcpu, 30, &gr30); 30.5 vcpu_get_gr_nat(vcpu, 31, &gr31); 30.6 30.7 - perfc_incrc(vmx_pal_emul); 30.8 + perfc_incr(vmx_pal_emul); 30.9 result = xen_pal_emulator(gr28, gr29, gr30, gr31); 30.10 30.11 vcpu_set_gr(vcpu, 8, result.status, 0);
31.1 --- a/xen/arch/ia64/vmx/vmx_process.c Wed Mar 28 08:40:42 2007 +0000 31.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Wed Mar 28 08:44:08 2007 +0000 31.3 @@ -151,7 +151,7 @@ vmx_ia64_handle_break (unsigned long ifa 31.4 struct domain *d = current->domain; 31.5 struct vcpu *v = current; 31.6 31.7 - perfc_incrc(vmx_ia64_handle_break); 31.8 + perfc_incr(vmx_ia64_handle_break); 31.9 #ifdef CRASH_DEBUG 31.10 if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) && 31.11 IS_VMM_ADDRESS(regs->cr_iip)) {
32.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Wed Mar 28 08:40:42 2007 +0000 32.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Mar 28 08:44:08 2007 +0000 32.3 @@ -1401,159 +1401,159 @@ if ( (cause == 0xff && opcode == 0x1e000 32.4 32.5 switch(cause) { 32.6 case EVENT_RSM: 32.7 - perfc_incrc(vmx_rsm); 32.8 + perfc_incr(vmx_rsm); 32.9 status=vmx_emul_rsm(vcpu, inst); 32.10 break; 32.11 case EVENT_SSM: 32.12 - perfc_incrc(vmx_ssm); 32.13 + perfc_incr(vmx_ssm); 32.14 status=vmx_emul_ssm(vcpu, inst); 32.15 break; 32.16 case EVENT_MOV_TO_PSR: 32.17 - perfc_incrc(vmx_mov_to_psr); 32.18 + perfc_incr(vmx_mov_to_psr); 32.19 status=vmx_emul_mov_to_psr(vcpu, inst); 32.20 break; 32.21 case EVENT_MOV_FROM_PSR: 32.22 - perfc_incrc(vmx_mov_from_psr); 32.23 + perfc_incr(vmx_mov_from_psr); 32.24 status=vmx_emul_mov_from_psr(vcpu, inst); 32.25 break; 32.26 case EVENT_MOV_FROM_CR: 32.27 - perfc_incrc(vmx_mov_from_cr); 32.28 + perfc_incr(vmx_mov_from_cr); 32.29 status=vmx_emul_mov_from_cr(vcpu, inst); 32.30 break; 32.31 case EVENT_MOV_TO_CR: 32.32 - perfc_incrc(vmx_mov_to_cr); 32.33 + perfc_incr(vmx_mov_to_cr); 32.34 status=vmx_emul_mov_to_cr(vcpu, inst); 32.35 break; 32.36 case EVENT_BSW_0: 32.37 - perfc_incrc(vmx_bsw0); 32.38 + perfc_incr(vmx_bsw0); 32.39 status=vmx_emul_bsw0(vcpu, inst); 32.40 break; 32.41 case EVENT_BSW_1: 32.42 - perfc_incrc(vmx_bsw1); 32.43 + perfc_incr(vmx_bsw1); 32.44 status=vmx_emul_bsw1(vcpu, inst); 32.45 break; 32.46 case EVENT_COVER: 32.47 - perfc_incrc(vmx_cover); 32.48 + perfc_incr(vmx_cover); 32.49 status=vmx_emul_cover(vcpu, inst); 32.50 break; 32.51 case EVENT_RFI: 32.52 - perfc_incrc(vmx_rfi); 32.53 + perfc_incr(vmx_rfi); 32.54 status=vmx_emul_rfi(vcpu, inst); 32.55 break; 32.56 case EVENT_ITR_D: 32.57 - perfc_incrc(vmx_itr_d); 32.58 + perfc_incr(vmx_itr_d); 32.59 status=vmx_emul_itr_d(vcpu, inst); 32.60 break; 32.61 case EVENT_ITR_I: 32.62 - perfc_incrc(vmx_itr_i); 32.63 + perfc_incr(vmx_itr_i); 32.64 status=vmx_emul_itr_i(vcpu, inst); 32.65 break; 32.66 case EVENT_PTR_D: 32.67 - perfc_incrc(vmx_ptr_d); 32.68 + perfc_incr(vmx_ptr_d); 32.69 status=vmx_emul_ptr_d(vcpu, inst); 32.70 break; 32.71 case EVENT_PTR_I: 32.72 - perfc_incrc(vmx_ptr_i); 32.73 + perfc_incr(vmx_ptr_i); 32.74 status=vmx_emul_ptr_i(vcpu, inst); 32.75 break; 32.76 case EVENT_ITC_D: 32.77 - perfc_incrc(vmx_itc_d); 32.78 + perfc_incr(vmx_itc_d); 32.79 status=vmx_emul_itc_d(vcpu, inst); 32.80 break; 32.81 case EVENT_ITC_I: 32.82 - perfc_incrc(vmx_itc_i); 32.83 + perfc_incr(vmx_itc_i); 32.84 status=vmx_emul_itc_i(vcpu, inst); 32.85 break; 32.86 case EVENT_PTC_L: 32.87 - perfc_incrc(vmx_ptc_l); 32.88 + perfc_incr(vmx_ptc_l); 32.89 status=vmx_emul_ptc_l(vcpu, inst); 32.90 break; 32.91 case EVENT_PTC_G: 32.92 - perfc_incrc(vmx_ptc_g); 32.93 + perfc_incr(vmx_ptc_g); 32.94 status=vmx_emul_ptc_g(vcpu, inst); 32.95 break; 32.96 case EVENT_PTC_GA: 32.97 - perfc_incrc(vmx_ptc_ga); 32.98 + perfc_incr(vmx_ptc_ga); 32.99 status=vmx_emul_ptc_ga(vcpu, inst); 32.100 break; 32.101 case EVENT_PTC_E: 32.102 - perfc_incrc(vmx_ptc_e); 32.103 + perfc_incr(vmx_ptc_e); 32.104 status=vmx_emul_ptc_e(vcpu, inst); 32.105 break; 32.106 case EVENT_MOV_TO_RR: 32.107 - perfc_incrc(vmx_mov_to_rr); 32.108 + perfc_incr(vmx_mov_to_rr); 32.109 status=vmx_emul_mov_to_rr(vcpu, inst); 32.110 break; 32.111 case EVENT_MOV_FROM_RR: 32.112 - perfc_incrc(vmx_mov_from_rr); 32.113 + perfc_incr(vmx_mov_from_rr); 32.114 status=vmx_emul_mov_from_rr(vcpu, inst); 32.115 break; 32.116 case EVENT_THASH: 32.117 - perfc_incrc(vmx_thash); 32.118 + perfc_incr(vmx_thash); 32.119 status=vmx_emul_thash(vcpu, inst); 32.120 break; 32.121 case EVENT_TTAG: 32.122 - perfc_incrc(vmx_ttag); 32.123 + perfc_incr(vmx_ttag); 32.124 status=vmx_emul_ttag(vcpu, inst); 32.125 break; 32.126 case EVENT_TPA: 32.127 - perfc_incrc(vmx_tpa); 32.128 + perfc_incr(vmx_tpa); 32.129 status=vmx_emul_tpa(vcpu, inst); 32.130 break; 32.131 case EVENT_TAK: 32.132 - perfc_incrc(vmx_tak); 32.133 + perfc_incr(vmx_tak); 32.134 status=vmx_emul_tak(vcpu, inst); 32.135 break; 32.136 case EVENT_MOV_TO_AR_IMM: 32.137 - perfc_incrc(vmx_mov_to_ar_imm); 32.138 + perfc_incr(vmx_mov_to_ar_imm); 32.139 status=vmx_emul_mov_to_ar_imm(vcpu, inst); 32.140 break; 32.141 case EVENT_MOV_TO_AR: 32.142 - perfc_incrc(vmx_mov_to_ar_reg); 32.143 + perfc_incr(vmx_mov_to_ar_reg); 32.144 status=vmx_emul_mov_to_ar_reg(vcpu, inst); 32.145 break; 32.146 case EVENT_MOV_FROM_AR: 32.147 - perfc_incrc(vmx_mov_from_ar_reg); 32.148 + perfc_incr(vmx_mov_from_ar_reg); 32.149 status=vmx_emul_mov_from_ar_reg(vcpu, inst); 32.150 break; 32.151 case EVENT_MOV_TO_DBR: 32.152 - perfc_incrc(vmx_mov_to_dbr); 32.153 + perfc_incr(vmx_mov_to_dbr); 32.154 status=vmx_emul_mov_to_dbr(vcpu, inst); 32.155 break; 32.156 case EVENT_MOV_TO_IBR: 32.157 - perfc_incrc(vmx_mov_to_ibr); 32.158 + perfc_incr(vmx_mov_to_ibr); 32.159 status=vmx_emul_mov_to_ibr(vcpu, inst); 32.160 break; 32.161 case EVENT_MOV_TO_PMC: 32.162 - perfc_incrc(vmx_mov_to_pmc); 32.163 + perfc_incr(vmx_mov_to_pmc); 32.164 status=vmx_emul_mov_to_pmc(vcpu, inst); 32.165 break; 32.166 case EVENT_MOV_TO_PMD: 32.167 - perfc_incrc(vmx_mov_to_pmd); 32.168 + perfc_incr(vmx_mov_to_pmd); 32.169 status=vmx_emul_mov_to_pmd(vcpu, inst); 32.170 break; 32.171 case EVENT_MOV_TO_PKR: 32.172 - perfc_incrc(vmx_mov_to_pkr); 32.173 + perfc_incr(vmx_mov_to_pkr); 32.174 status=vmx_emul_mov_to_pkr(vcpu, inst); 32.175 break; 32.176 case EVENT_MOV_FROM_DBR: 32.177 - perfc_incrc(vmx_mov_from_dbr); 32.178 + perfc_incr(vmx_mov_from_dbr); 32.179 status=vmx_emul_mov_from_dbr(vcpu, inst); 32.180 break; 32.181 case EVENT_MOV_FROM_IBR: 32.182 - perfc_incrc(vmx_mov_from_ibr); 32.183 + perfc_incr(vmx_mov_from_ibr); 32.184 status=vmx_emul_mov_from_ibr(vcpu, inst); 32.185 break; 32.186 case EVENT_MOV_FROM_PMC: 32.187 - perfc_incrc(vmx_mov_from_pmc); 32.188 + perfc_incr(vmx_mov_from_pmc); 32.189 status=vmx_emul_mov_from_pmc(vcpu, inst); 32.190 break; 32.191 case EVENT_MOV_FROM_PKR: 32.192 - perfc_incrc(vmx_mov_from_pkr); 32.193 + perfc_incr(vmx_mov_from_pkr); 32.194 status=vmx_emul_mov_from_pkr(vcpu, inst); 32.195 break; 32.196 case EVENT_MOV_FROM_CPUID: 32.197 - perfc_incrc(vmx_mov_from_cpuid); 32.198 + perfc_incr(vmx_mov_from_cpuid); 32.199 status=vmx_emul_mov_from_cpuid(vcpu, inst); 32.200 break; 32.201 case EVENT_VMSW:
33.1 --- a/xen/arch/ia64/xen/dom0_ops.c Wed Mar 28 08:40:42 2007 +0000 33.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Wed Mar 28 08:44:08 2007 +0000 33.3 @@ -372,7 +372,7 @@ do_dom0vp_op(unsigned long cmd, 33.4 } else { 33.5 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn() 33.6 } 33.7 - perfc_incrc(dom0vp_phystomach); 33.8 + perfc_incr(dom0vp_phystomach); 33.9 break; 33.10 case IA64_DOM0VP_machtophys: 33.11 if (!mfn_valid(arg0)) { 33.12 @@ -380,7 +380,7 @@ do_dom0vp_op(unsigned long cmd, 33.13 break; 33.14 } 33.15 ret = get_gpfn_from_mfn(arg0); 33.16 - perfc_incrc(dom0vp_machtophys); 33.17 + perfc_incr(dom0vp_machtophys); 33.18 break; 33.19 case IA64_DOM0VP_zap_physmap: 33.20 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
34.1 --- a/xen/arch/ia64/xen/domain.c Wed Mar 28 08:40:42 2007 +0000 34.2 +++ b/xen/arch/ia64/xen/domain.c Wed Mar 28 08:44:08 2007 +0000 34.3 @@ -131,11 +131,11 @@ static void flush_vtlb_for_context_switc 34.4 if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time), 34.5 last_tlbflush_timestamp)) { 34.6 local_flush_tlb_all(); 34.7 - perfc_incrc(tlbflush_clock_cswitch_purge); 34.8 + perfc_incr(tlbflush_clock_cswitch_purge); 34.9 } else { 34.10 - perfc_incrc(tlbflush_clock_cswitch_skip); 34.11 + perfc_incr(tlbflush_clock_cswitch_skip); 34.12 } 34.13 - perfc_incrc(flush_vtlb_for_context_switch); 34.14 + perfc_incr(flush_vtlb_for_context_switch); 34.15 } 34.16 } 34.17
35.1 --- a/xen/arch/ia64/xen/faults.c Wed Mar 28 08:40:42 2007 +0000 35.2 +++ b/xen/arch/ia64/xen/faults.c Wed Mar 28 08:44:08 2007 +0000 35.3 @@ -187,7 +187,7 @@ static int handle_lazy_cover(struct vcpu 35.4 if (!PSCB(v, interrupt_collection_enabled)) { 35.5 PSCB(v, ifs) = regs->cr_ifs; 35.6 regs->cr_ifs = 0; 35.7 - perfc_incrc(lazy_cover); 35.8 + perfc_incr(lazy_cover); 35.9 return 1; // retry same instruction with cr.ifs off 35.10 } 35.11 return 0;
36.1 --- a/xen/arch/ia64/xen/hypercall.c Wed Mar 28 08:40:42 2007 +0000 36.2 +++ b/xen/arch/ia64/xen/hypercall.c Wed Mar 28 08:44:08 2007 +0000 36.3 @@ -161,7 +161,7 @@ ia64_hypercall(struct pt_regs *regs) 36.4 if (regs->r28 == PAL_HALT_LIGHT) { 36.5 if (vcpu_deliverable_interrupts(v) || 36.6 event_pending(v)) { 36.7 - perfc_incrc(idle_when_pending); 36.8 + perfc_incr(idle_when_pending); 36.9 vcpu_pend_unspecified_interrupt(v); 36.10 //printk("idle w/int#%d pending!\n",pi); 36.11 //this shouldn't happen, but it apparently does quite a bit! so don't 36.12 @@ -170,7 +170,7 @@ ia64_hypercall(struct pt_regs *regs) 36.13 //as deliver_pending_interrupt is called on the way out and will deliver it 36.14 } 36.15 else { 36.16 - perfc_incrc(pal_halt_light); 36.17 + perfc_incr(pal_halt_light); 36.18 migrate_timer(&v->arch.hlt_timer, 36.19 v->processor); 36.20 set_timer(&v->arch.hlt_timer,
37.1 --- a/xen/arch/ia64/xen/hyperprivop.S Wed Mar 28 08:40:42 2007 +0000 37.2 +++ b/xen/arch/ia64/xen/hyperprivop.S Wed Mar 28 08:44:08 2007 +0000 37.3 @@ -26,8 +26,7 @@ 37.4 # define FAST_HYPERPRIVOPS 37.5 # ifdef PERF_COUNTERS 37.6 # define FAST_HYPERPRIVOP_CNT 37.7 -# define FAST_HYPERPRIVOP_PERFC(N) \ 37.8 - (perfcounters + FAST_HYPERPRIVOP_PERFC_OFS + (4 * N)) 37.9 +# define FAST_HYPERPRIVOP_PERFC(N) PERFC(fast_hyperprivop + N) 37.10 # define FAST_REFLECT_CNT 37.11 # endif 37.12 37.13 @@ -364,7 +363,7 @@ GLOBAL_ENTRY(fast_tick_reflect) 37.14 mov rp=r29;; 37.15 mov cr.itm=r26;; // ensure next tick 37.16 #ifdef FAST_REFLECT_CNT 37.17 - movl r20=perfcounters+FAST_REFLECT_PERFC_OFS+((0x3000>>8)*4);; 37.18 + movl r20=PERFC(fast_reflect + (0x3000>>8));; 37.19 ld4 r21=[r20];; 37.20 adds r21=1,r21;; 37.21 st4 [r20]=r21;; 37.22 @@ -597,7 +596,7 @@ END(fast_break_reflect) 37.23 // r31 == pr 37.24 ENTRY(fast_reflect) 37.25 #ifdef FAST_REFLECT_CNT 37.26 - movl r22=perfcounters+FAST_REFLECT_PERFC_OFS; 37.27 + movl r22=PERFC(fast_reflect); 37.28 shr r23=r20,8-2;; 37.29 add r22=r22,r23;; 37.30 ld4 r21=[r22];; 37.31 @@ -938,7 +937,7 @@ 1: // check the guest VHPT 37.32 (p7) br.cond.spnt.few page_not_present;; 37.33 37.34 #ifdef FAST_REFLECT_CNT 37.35 - movl r21=perfcounter+FAST_VHPT_TRANSLATE_PERFC_OFS;; 37.36 + movl r21=PERFC(fast_vhpt_translate);; 37.37 ld4 r22=[r21];; 37.38 adds r22=1,r22;; 37.39 st4 [r21]=r22;; 37.40 @@ -968,7 +967,7 @@ END(fast_tlb_miss_reflect) 37.41 // we get here if fast_insert fails (e.g. due to metaphysical lookup) 37.42 ENTRY(recover_and_page_fault) 37.43 #ifdef PERF_COUNTERS 37.44 - movl r21=perfcounters + RECOVER_TO_PAGE_FAULT_PERFC_OFS;; 37.45 + movl r21=PERFC(recover_to_page_fault);; 37.46 ld4 r22=[r21];; 37.47 adds r22=1,r22;; 37.48 st4 [r21]=r22;; 37.49 @@ -1832,7 +1831,7 @@ END(hyper_ptc_ga) 37.50 // recovery block for hyper_itc metaphysical memory lookup 37.51 ENTRY(recover_and_dispatch_break_fault) 37.52 #ifdef PERF_COUNTERS 37.53 - movl r21=perfcounters + RECOVER_TO_BREAK_FAULT_PERFC_OFS;; 37.54 + movl r21=PERFC(recover_to_break_fault);; 37.55 ld4 r22=[r21];; 37.56 adds r22=1,r22;; 37.57 st4 [r21]=r22;;
38.1 --- a/xen/arch/ia64/xen/mm.c Wed Mar 28 08:40:42 2007 +0000 38.2 +++ b/xen/arch/ia64/xen/mm.c Wed Mar 28 08:44:08 2007 +0000 38.3 @@ -1139,7 +1139,7 @@ assign_domain_page_replace(struct domain 38.4 domain_put_page(d, mpaddr, pte, old_pte, 1); 38.5 } 38.6 } 38.7 - perfc_incrc(assign_domain_page_replace); 38.8 + perfc_incr(assign_domain_page_replace); 38.9 } 38.10 38.11 // caller must get_page(new_page) before 38.12 @@ -1202,7 +1202,7 @@ assign_domain_page_cmpxchg_rel(struct do 38.13 set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY); 38.14 38.15 domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page); 38.16 - perfc_incrc(assign_domain_pge_cmpxchg_rel); 38.17 + perfc_incr(assign_domain_pge_cmpxchg_rel); 38.18 return 0; 38.19 } 38.20 38.21 @@ -1264,7 +1264,7 @@ zap_domain_page_one(struct domain *d, un 38.22 // guest_physmap_remove_page() 38.23 // zap_domain_page_one() 38.24 domain_put_page(d, mpaddr, pte, old_pte, (page_get_owner(page) != NULL)); 38.25 - perfc_incrc(zap_dcomain_page_one); 38.26 + perfc_incr(zap_dcomain_page_one); 38.27 } 38.28 38.29 unsigned long 38.30 @@ -1277,7 +1277,7 @@ dom0vp_zap_physmap(struct domain *d, uns 38.31 } 38.32 38.33 zap_domain_page_one(d, gpfn << PAGE_SHIFT, INVALID_MFN); 38.34 - perfc_incrc(dom0vp_zap_physmap); 38.35 + perfc_incr(dom0vp_zap_physmap); 38.36 return 0; 38.37 } 38.38 38.39 @@ -1331,7 +1331,7 @@ static unsigned long 38.40 get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY); 38.41 assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags); 38.42 //don't update p2m table because this page belongs to rd, not d. 38.43 - perfc_incrc(dom0vp_add_physmap); 38.44 + perfc_incr(dom0vp_add_physmap); 38.45 out1: 38.46 put_domain(rd); 38.47 return error; 38.48 @@ -1501,7 +1501,7 @@ create_grant_host_mapping(unsigned long 38.49 #endif 38.50 ((flags & GNTMAP_readonly) ? 38.51 ASSIGN_readonly : ASSIGN_writable)); 38.52 - perfc_incrc(create_grant_host_mapping); 38.53 + perfc_incr(create_grant_host_mapping); 38.54 return GNTST_okay; 38.55 } 38.56 38.57 @@ -1565,7 +1565,7 @@ destroy_grant_host_mapping(unsigned long 38.58 get_gpfn_from_mfn(mfn) == gpfn); 38.59 domain_page_flush_and_put(d, gpaddr, pte, old_pte, page); 38.60 38.61 - perfc_incrc(destroy_grant_host_mapping); 38.62 + perfc_incr(destroy_grant_host_mapping); 38.63 return GNTST_okay; 38.64 } 38.65 38.66 @@ -1629,7 +1629,7 @@ steal_page(struct domain *d, struct page 38.67 free_domheap_page(new); 38.68 return -1; 38.69 } 38.70 - perfc_incrc(steal_page_refcount); 38.71 + perfc_incr(steal_page_refcount); 38.72 } 38.73 38.74 spin_lock(&d->page_alloc_lock); 38.75 @@ -1703,7 +1703,7 @@ steal_page(struct domain *d, struct page 38.76 list_del(&page->list); 38.77 38.78 spin_unlock(&d->page_alloc_lock); 38.79 - perfc_incrc(steal_page); 38.80 + perfc_incr(steal_page); 38.81 return 0; 38.82 } 38.83 38.84 @@ -1723,7 +1723,7 @@ guest_physmap_add_page(struct domain *d, 38.85 38.86 //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT)); 38.87 38.88 - perfc_incrc(guest_physmap_add_page); 38.89 + perfc_incr(guest_physmap_add_page); 38.90 } 38.91 38.92 void 38.93 @@ -1732,7 +1732,7 @@ guest_physmap_remove_page(struct domain 38.94 { 38.95 BUG_ON(mfn == 0);//XXX 38.96 zap_domain_page_one(d, gpfn << PAGE_SHIFT, mfn); 38.97 - perfc_incrc(guest_physmap_remove_page); 38.98 + perfc_incr(guest_physmap_remove_page); 38.99 } 38.100 38.101 static void 38.102 @@ -1812,7 +1812,7 @@ domain_page_flush_and_put(struct domain* 38.103 break; 38.104 } 38.105 #endif 38.106 - perfc_incrc(domain_page_flush_and_put); 38.107 + perfc_incr(domain_page_flush_and_put); 38.108 } 38.109 38.110 int 38.111 @@ -2009,7 +2009,7 @@ int get_page_type(struct page_info *page 38.112 38.113 if ( unlikely(!cpus_empty(mask)) ) 38.114 { 38.115 - perfc_incrc(need_flush_tlb_flush); 38.116 + perfc_incr(need_flush_tlb_flush); 38.117 flush_tlb_mask(mask); 38.118 } 38.119
39.1 --- a/xen/arch/ia64/xen/privop.c Wed Mar 28 08:40:42 2007 +0000 39.2 +++ b/xen/arch/ia64/xen/privop.c Wed Mar 28 08:44:08 2007 +0000 39.3 @@ -641,15 +641,15 @@ static IA64FAULT priv_handle_op(VCPU * v 39.4 if (inst.M29.x3 != 0) 39.5 break; 39.6 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) { 39.7 - perfc_incrc(mov_to_ar_imm); 39.8 + perfc_incr(mov_to_ar_imm); 39.9 return priv_mov_to_ar_imm(vcpu, inst); 39.10 } 39.11 if (inst.M44.x4 == 6) { 39.12 - perfc_incrc(ssm); 39.13 + perfc_incr(ssm); 39.14 return priv_ssm(vcpu, inst); 39.15 } 39.16 if (inst.M44.x4 == 7) { 39.17 - perfc_incrc(rsm); 39.18 + perfc_incr(rsm); 39.19 return priv_rsm(vcpu, inst); 39.20 } 39.21 break; 39.22 @@ -658,9 +658,9 @@ static IA64FAULT priv_handle_op(VCPU * v 39.23 x6 = inst.M29.x6; 39.24 if (x6 == 0x2a) { 39.25 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) 39.26 - perfc_incrc(mov_from_ar); // privified mov from kr 39.27 + perfc_incr(mov_from_ar); // privified mov from kr 39.28 else 39.29 - perfc_incrc(mov_to_ar_reg); 39.30 + perfc_incr(mov_to_ar_reg); 39.31 return priv_mov_to_ar_reg(vcpu, inst); 39.32 } 39.33 if (inst.M29.x3 != 0) 39.34 @@ -676,9 +676,9 @@ static IA64FAULT priv_handle_op(VCPU * v 39.35 } 39.36 } 39.37 if (privify_en && x6 == 52 && inst.M28.r3 > 63) 39.38 - perfc_incrc(fc); 39.39 + perfc_incr(fc); 39.40 else if (privify_en && x6 == 16 && inst.M43.r3 > 63) 39.41 - perfc_incrc(cpuid); 39.42 + perfc_incr(cpuid); 39.43 else 39.44 perfc_incra(misc_privop, x6); 39.45 return (*pfunc) (vcpu, inst); 39.46 @@ -688,23 +688,23 @@ static IA64FAULT priv_handle_op(VCPU * v 39.47 break; 39.48 if (inst.B8.x6 == 0x08) { 39.49 IA64FAULT fault; 39.50 - perfc_incrc(rfi); 39.51 + perfc_incr(rfi); 39.52 fault = priv_rfi(vcpu, inst); 39.53 if (fault == IA64_NO_FAULT) 39.54 fault = IA64_RFI_IN_PROGRESS; 39.55 return fault; 39.56 } 39.57 if (inst.B8.x6 == 0x0c) { 39.58 - perfc_incrc(bsw0); 39.59 + perfc_incr(bsw0); 39.60 return priv_bsw0(vcpu, inst); 39.61 } 39.62 if (inst.B8.x6 == 0x0d) { 39.63 - perfc_incrc(bsw1); 39.64 + perfc_incr(bsw1); 39.65 return priv_bsw1(vcpu, inst); 39.66 } 39.67 if (inst.B8.x6 == 0x0) { 39.68 // break instr for privified cover 39.69 - perfc_incrc(cover); 39.70 + perfc_incr(cover); 39.71 return priv_cover(vcpu, inst); 39.72 } 39.73 break; 39.74 @@ -713,7 +713,7 @@ static IA64FAULT priv_handle_op(VCPU * v 39.75 break; 39.76 #if 0 39.77 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) { 39.78 - perfc_incrc(cover); 39.79 + perfc_incr(cover); 39.80 return priv_cover(vcpu, inst); 39.81 } 39.82 #endif 39.83 @@ -721,13 +721,13 @@ static IA64FAULT priv_handle_op(VCPU * v 39.84 break; // I26.x3 == I27.x3 39.85 if (inst.I26.x6 == 0x2a) { 39.86 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8) 39.87 - perfc_incrc(mov_from_ar); // privified mov from kr 39.88 + perfc_incr(mov_from_ar); // privified mov from kr 39.89 else 39.90 - perfc_incrc(mov_to_ar_reg); 39.91 + perfc_incr(mov_to_ar_reg); 39.92 return priv_mov_to_ar_reg(vcpu, inst); 39.93 } 39.94 if (inst.I27.x6 == 0x0a) { 39.95 - perfc_incrc(mov_to_ar_imm); 39.96 + perfc_incr(mov_to_ar_imm); 39.97 return priv_mov_to_ar_imm(vcpu, inst); 39.98 } 39.99 break;
40.1 --- a/xen/arch/ia64/xen/privop_stat.c Wed Mar 28 08:40:42 2007 +0000 40.2 +++ b/xen/arch/ia64/xen/privop_stat.c Wed Mar 28 08:44:08 2007 +0000 40.3 @@ -10,48 +10,39 @@ struct privop_addr_count { 40.4 unsigned long addr[PRIVOP_COUNT_NADDRS]; 40.5 unsigned int count[PRIVOP_COUNT_NADDRS]; 40.6 unsigned int overflow; 40.7 - atomic_t *perfc_addr; 40.8 - atomic_t *perfc_count; 40.9 - atomic_t *perfc_overflow; 40.10 }; 40.11 40.12 -#undef PERFCOUNTER 40.13 -#define PERFCOUNTER(var, name) 40.14 +struct privop_addr_info { 40.15 + enum perfcounter perfc_addr; 40.16 + enum perfcounter perfc_count; 40.17 + enum perfcounter perfc_overflow; 40.18 +}; 40.19 40.20 -#undef PERFCOUNTER_CPU 40.21 -#define PERFCOUNTER_CPU(var, name) 40.22 - 40.23 -#undef PERFCOUNTER_ARRAY 40.24 +#define PERFCOUNTER(var, name) 40.25 #define PERFCOUNTER_ARRAY(var, name, size) 40.26 40.27 -#undef PERFSTATUS 40.28 #define PERFSTATUS(var, name) 40.29 - 40.30 -#undef PERFSTATUS_CPU 40.31 -#define PERFSTATUS_CPU(var, name) 40.32 - 40.33 -#undef PERFSTATUS_ARRAY 40.34 #define PERFSTATUS_ARRAY(var, name, size) 40.35 40.36 -#undef PERFPRIVOPADDR 40.37 #define PERFPRIVOPADDR(name) \ 40.38 { \ 40.39 - { 0 }, { 0 }, 0, \ 40.40 - perfcounters.privop_addr_##name##_addr, \ 40.41 - perfcounters.privop_addr_##name##_count, \ 40.42 - perfcounters.privop_addr_##name##_overflow \ 40.43 + PERFC_privop_addr_##name##_addr, \ 40.44 + PERFC_privop_addr_##name##_count, \ 40.45 + PERFC_privop_addr_##name##_overflow \ 40.46 }, 40.47 40.48 -static struct privop_addr_count privop_addr_counter[] = { 40.49 +static const struct privop_addr_info privop_addr_info[] = { 40.50 #include <asm/perfc_defn.h> 40.51 }; 40.52 40.53 #define PRIVOP_COUNT_NINSTS \ 40.54 - (sizeof(privop_addr_counter) / sizeof(privop_addr_counter[0])) 40.55 + (sizeof(privop_addr_info) / sizeof(privop_addr_info[0])) 40.56 + 40.57 +static DEFINE_PER_CPU(struct privop_addr_count[PRIVOP_COUNT_NINSTS], privop_addr_counter); 40.58 40.59 void privop_count_addr(unsigned long iip, enum privop_inst inst) 40.60 { 40.61 - struct privop_addr_count *v = &privop_addr_counter[inst]; 40.62 + struct privop_addr_count *v = this_cpu(privop_addr_counter) + inst; 40.63 int i; 40.64 40.65 if (inst >= PRIVOP_COUNT_NINSTS) 40.66 @@ -72,31 +63,44 @@ void privop_count_addr(unsigned long iip 40.67 40.68 void gather_privop_addrs(void) 40.69 { 40.70 - int i, j; 40.71 - atomic_t *v; 40.72 - for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) { 40.73 - /* Note: addresses are truncated! */ 40.74 - v = privop_addr_counter[i].perfc_addr; 40.75 - for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) 40.76 - atomic_set(&v[j], privop_addr_counter[i].addr[j]); 40.77 + unsigned int cpu; 40.78 + 40.79 + for_each_cpu ( cpu ) { 40.80 + perfc_t *perfcounters = per_cpu(perfcounters, cpu); 40.81 + struct privop_addr_count *s = per_cpu(privop_addr_counter, cpu); 40.82 + int i, j; 40.83 + 40.84 + for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, s++) { 40.85 + perfc_t *d; 40.86 40.87 - v = privop_addr_counter[i].perfc_count; 40.88 - for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) 40.89 - atomic_set(&v[j], privop_addr_counter[i].count[j]); 40.90 + /* Note: addresses are truncated! */ 40.91 + d = perfcounters + privop_addr_info[i].perfc_addr; 40.92 + for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) 40.93 + d[j] = s->addr[j]; 40.94 + 40.95 + d = perfcounters + privop_addr_info[i].perfc_count; 40.96 + for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) 40.97 + d[j] = s->count[j]; 40.98 40.99 - atomic_set(privop_addr_counter[i].perfc_overflow, 40.100 - privop_addr_counter[i].overflow); 40.101 + perfcounters[privop_addr_info[i].perfc_overflow] = 40.102 + s->overflow; 40.103 + } 40.104 } 40.105 } 40.106 40.107 void reset_privop_addrs(void) 40.108 { 40.109 - int i, j; 40.110 - for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) { 40.111 - struct privop_addr_count *v = &privop_addr_counter[i]; 40.112 - for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) 40.113 - v->addr[j] = v->count[j] = 0; 40.114 - v->overflow = 0; 40.115 + unsigned int cpu; 40.116 + 40.117 + for_each_cpu ( cpu ) { 40.118 + struct privop_addr_count *v = per_cpu(privop_addr_counter, cpu); 40.119 + int i, j; 40.120 + 40.121 + for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, v++) { 40.122 + for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) 40.123 + v->addr[j] = v->count[j] = 0; 40.124 + v->overflow = 0; 40.125 + } 40.126 } 40.127 } 40.128 #endif
41.1 --- a/xen/arch/ia64/xen/tlb_track.c Wed Mar 28 08:40:42 2007 +0000 41.2 +++ b/xen/arch/ia64/xen/tlb_track.c Wed Mar 28 08:44:08 2007 +0000 41.3 @@ -216,14 +216,14 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.4 TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND; 41.5 41.6 #if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */ 41.7 - perfc_incrc(tlb_track_iod); 41.8 + perfc_incr(tlb_track_iod); 41.9 if (!pte_tlb_tracking(old_pte)) { 41.10 - perfc_incrc(tlb_track_iod_not_tracked); 41.11 + perfc_incr(tlb_track_iod_not_tracked); 41.12 return TLB_TRACK_NOT_TRACKED; 41.13 } 41.14 #endif 41.15 if (pte_tlb_inserted_many(old_pte)) { 41.16 - perfc_incrc(tlb_track_iod_tracked_many); 41.17 + perfc_incr(tlb_track_iod_tracked_many); 41.18 return TLB_TRACK_MANY; 41.19 } 41.20 41.21 @@ -260,7 +260,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.22 if (entry->vaddr == vaddr && entry->rid == rid) { 41.23 // tlb_track_printd("TLB_TRACK_FOUND\n"); 41.24 ret = TLB_TRACK_FOUND; 41.25 - perfc_incrc(tlb_track_iod_found); 41.26 + perfc_incr(tlb_track_iod_found); 41.27 #ifdef CONFIG_TLB_TRACK_CNT 41.28 entry->cnt++; 41.29 if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) { 41.30 @@ -276,7 +276,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.31 */ 41.32 // tlb_track_entry_printf(entry); 41.33 // tlb_track_printd("cnt = %ld\n", entry->cnt); 41.34 - perfc_incrc(tlb_track_iod_force_many); 41.35 + perfc_incr(tlb_track_iod_force_many); 41.36 goto force_many; 41.37 } 41.38 #endif 41.39 @@ -294,14 +294,14 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.40 if (pte_val(ret_pte) != pte_val(old_pte)) { 41.41 // tlb_track_printd("TLB_TRACK_AGAIN\n"); 41.42 ret = TLB_TRACK_AGAIN; 41.43 - perfc_incrc(tlb_track_iod_again); 41.44 + perfc_incr(tlb_track_iod_again); 41.45 } else { 41.46 // tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n", 41.47 // entry); 41.48 ret = TLB_TRACK_MANY; 41.49 list_del(&entry->list); 41.50 // tlb_track_entry_printf(entry); 41.51 - perfc_incrc(tlb_track_iod_tracked_many_del); 41.52 + perfc_incr(tlb_track_iod_tracked_many_del); 41.53 } 41.54 goto out; 41.55 } 41.56 @@ -314,7 +314,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.57 */ 41.58 // tlb_track_printd("TLB_TRACK_AGAIN\n"); 41.59 ret = TLB_TRACK_AGAIN; 41.60 - perfc_incrc(tlb_track_iod_again); 41.61 + perfc_incr(tlb_track_iod_again); 41.62 goto out; 41.63 } 41.64 41.65 @@ -323,7 +323,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.66 /* Other thread else removed the tlb_track_entry after we got old_pte 41.67 before we got spin lock. */ 41.68 ret = TLB_TRACK_AGAIN; 41.69 - perfc_incrc(tlb_track_iod_again); 41.70 + perfc_incr(tlb_track_iod_again); 41.71 goto out; 41.72 } 41.73 if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) { 41.74 @@ -334,10 +334,10 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.75 /* entry can't be allocated. 41.76 fall down into full flush mode. */ 41.77 bit_to_be_set |= _PAGE_TLB_INSERTED_MANY; 41.78 - perfc_incrc(tlb_track_iod_new_failed); 41.79 + perfc_incr(tlb_track_iod_new_failed); 41.80 } 41.81 // tlb_track_printd("new_entry 0x%p\n", new_entry); 41.82 - perfc_incrc(tlb_track_iod_new_entry); 41.83 + perfc_incr(tlb_track_iod_new_entry); 41.84 goto again; 41.85 } 41.86 41.87 @@ -348,7 +348,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.88 if (tlb_track_pte_zapped(old_pte, ret_pte)) { 41.89 // tlb_track_printd("zapped TLB_TRACK_AGAIN\n"); 41.90 ret = TLB_TRACK_AGAIN; 41.91 - perfc_incrc(tlb_track_iod_again); 41.92 + perfc_incr(tlb_track_iod_again); 41.93 goto out; 41.94 } 41.95 41.96 @@ -359,7 +359,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.97 // tlb_track_printd("iserted TLB_TRACK_MANY\n"); 41.98 BUG_ON(!pte_tlb_inserted(ret_pte)); 41.99 ret = TLB_TRACK_MANY; 41.100 - perfc_incrc(tlb_track_iod_new_many); 41.101 + perfc_incr(tlb_track_iod_new_many); 41.102 goto out; 41.103 } 41.104 BUG_ON(pte_tlb_inserted(ret_pte)); 41.105 @@ -381,7 +381,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.106 #ifdef CONFIG_TLB_TRACK_CNT 41.107 entry->cnt = 0; 41.108 #endif 41.109 - perfc_incrc(tlb_track_iod_insert); 41.110 + perfc_incr(tlb_track_iod_insert); 41.111 // tlb_track_entry_printf(entry); 41.112 } else { 41.113 goto out; 41.114 @@ -392,7 +392,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 41.115 cpu_set(v->processor, entry->pcpu_dirty_mask); 41.116 BUG_ON(v->vcpu_id >= NR_CPUS); 41.117 vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask); 41.118 - perfc_incrc(tlb_track_iod_dirtied); 41.119 + perfc_incr(tlb_track_iod_dirtied); 41.120 41.121 out: 41.122 spin_unlock(&tlb_track->hash_lock); 41.123 @@ -432,19 +432,19 @@ tlb_track_search_and_remove(struct tlb_t 41.124 struct list_head* head = tlb_track_hash_head(tlb_track, ptep); 41.125 struct tlb_track_entry* entry; 41.126 41.127 - perfc_incrc(tlb_track_sar); 41.128 + perfc_incr(tlb_track_sar); 41.129 if (!pte_tlb_tracking(old_pte)) { 41.130 - perfc_incrc(tlb_track_sar_not_tracked); 41.131 + perfc_incr(tlb_track_sar_not_tracked); 41.132 return TLB_TRACK_NOT_TRACKED; 41.133 } 41.134 if (!pte_tlb_inserted(old_pte)) { 41.135 BUG_ON(pte_tlb_inserted_many(old_pte)); 41.136 - perfc_incrc(tlb_track_sar_not_found); 41.137 + perfc_incr(tlb_track_sar_not_found); 41.138 return TLB_TRACK_NOT_FOUND; 41.139 } 41.140 if (pte_tlb_inserted_many(old_pte)) { 41.141 BUG_ON(!pte_tlb_inserted(old_pte)); 41.142 - perfc_incrc(tlb_track_sar_many); 41.143 + perfc_incr(tlb_track_sar_many); 41.144 return TLB_TRACK_MANY; 41.145 } 41.146 41.147 @@ -475,14 +475,14 @@ tlb_track_search_and_remove(struct tlb_t 41.148 pte_tlb_inserted(current_pte))) { 41.149 BUG_ON(pte_tlb_inserted_many(current_pte)); 41.150 spin_unlock(&tlb_track->hash_lock); 41.151 - perfc_incrc(tlb_track_sar_many); 41.152 + perfc_incr(tlb_track_sar_many); 41.153 return TLB_TRACK_MANY; 41.154 } 41.155 41.156 list_del(&entry->list); 41.157 spin_unlock(&tlb_track->hash_lock); 41.158 *entryp = entry; 41.159 - perfc_incrc(tlb_track_sar_found); 41.160 + perfc_incr(tlb_track_sar_found); 41.161 // tlb_track_entry_printf(entry); 41.162 #ifdef CONFIG_TLB_TRACK_CNT 41.163 // tlb_track_printd("cnt = %ld\n", entry->cnt);
42.1 --- a/xen/arch/ia64/xen/vcpu.c Wed Mar 28 08:40:42 2007 +0000 42.2 +++ b/xen/arch/ia64/xen/vcpu.c Wed Mar 28 08:44:08 2007 +0000 42.3 @@ -1616,7 +1616,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 42.4 *pteval = (address & _PAGE_PPN_MASK) | 42.5 __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX; 42.6 *itir = PAGE_SHIFT << 2; 42.7 - perfc_incrc(phys_translate); 42.8 + perfc_incr(phys_translate); 42.9 return IA64_NO_FAULT; 42.10 } 42.11 } else if (!region && warn_region0_address) { 42.12 @@ -1637,7 +1637,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 42.13 if (trp != NULL) { 42.14 *pteval = trp->pte.val; 42.15 *itir = trp->itir; 42.16 - perfc_incrc(tr_translate); 42.17 + perfc_incr(tr_translate); 42.18 return IA64_NO_FAULT; 42.19 } 42.20 } 42.21 @@ -1647,7 +1647,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 42.22 if (trp != NULL) { 42.23 *pteval = trp->pte.val; 42.24 *itir = trp->itir; 42.25 - perfc_incrc(tr_translate); 42.26 + perfc_incr(tr_translate); 42.27 return IA64_NO_FAULT; 42.28 } 42.29 } 42.30 @@ -1660,7 +1660,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 42.31 && vcpu_match_tr_entry_no_p(trp, address, rid)) { 42.32 *pteval = pte.val; 42.33 *itir = trp->itir; 42.34 - perfc_incrc(dtlb_translate); 42.35 + perfc_incr(dtlb_translate); 42.36 return IA64_USE_TLB; 42.37 } 42.38 42.39 @@ -1709,7 +1709,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 42.40 out: 42.41 *itir = rr & RR_PS_MASK; 42.42 *pteval = pte.val; 42.43 - perfc_incrc(vhpt_translate); 42.44 + perfc_incr(vhpt_translate); 42.45 return IA64_NO_FAULT; 42.46 } 42.47
43.1 --- a/xen/arch/ia64/xen/vhpt.c Wed Mar 28 08:40:42 2007 +0000 43.2 +++ b/xen/arch/ia64/xen/vhpt.c Wed Mar 28 08:44:08 2007 +0000 43.3 @@ -48,14 +48,14 @@ local_vhpt_flush(void) 43.4 /* this must be after flush */ 43.5 tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp), 43.6 flush_time); 43.7 - perfc_incrc(local_vhpt_flush); 43.8 + perfc_incr(local_vhpt_flush); 43.9 } 43.10 43.11 void 43.12 vcpu_vhpt_flush(struct vcpu* v) 43.13 { 43.14 __vhpt_flush(vcpu_vhpt_maddr(v)); 43.15 - perfc_incrc(vcpu_vhpt_flush); 43.16 + perfc_incr(vcpu_vhpt_flush); 43.17 } 43.18 43.19 static void 43.20 @@ -248,7 +248,7 @@ void vcpu_flush_vtlb_all(struct vcpu *v) 43.21 not running on this processor. There is currently no easy way to 43.22 check this. */ 43.23 43.24 - perfc_incrc(vcpu_flush_vtlb_all); 43.25 + perfc_incr(vcpu_flush_vtlb_all); 43.26 } 43.27 43.28 static void __vcpu_flush_vtlb_all(void *vcpu) 43.29 @@ -280,7 +280,7 @@ void domain_flush_vtlb_all(struct domain 43.30 __vcpu_flush_vtlb_all, 43.31 v, 1, 1); 43.32 } 43.33 - perfc_incrc(domain_flush_vtlb_all); 43.34 + perfc_incr(domain_flush_vtlb_all); 43.35 } 43.36 43.37 // Callers may need to call smp_mb() before/after calling this. 43.38 @@ -322,7 +322,7 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr 43.39 vadr, 1UL << log_range); 43.40 ia64_ptcl(vadr, log_range << 2); 43.41 ia64_srlz_i(); 43.42 - perfc_incrc(vcpu_flush_tlb_vhpt_range); 43.43 + perfc_incr(vcpu_flush_tlb_vhpt_range); 43.44 } 43.45 43.46 void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range) 43.47 @@ -361,7 +361,7 @@ void domain_flush_vtlb_range (struct dom 43.48 43.49 /* ptc.ga */ 43.50 platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT); 43.51 - perfc_incrc(domain_flush_vtlb_range); 43.52 + perfc_incr(domain_flush_vtlb_range); 43.53 } 43.54 43.55 #ifdef CONFIG_XEN_IA64_TLB_TRACK 43.56 @@ -391,11 +391,11 @@ void 43.57 */ 43.58 vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid); 43.59 if (likely(rr7_rid == entry->rid)) { 43.60 - perfc_incrc(tlb_track_use_rr7); 43.61 + perfc_incr(tlb_track_use_rr7); 43.62 } else { 43.63 swap_rr0 = 1; 43.64 vaddr = (vaddr << 3) >> 3;// force vrn0 43.65 - perfc_incrc(tlb_track_swap_rr0); 43.66 + perfc_incr(tlb_track_swap_rr0); 43.67 } 43.68 43.69 // tlb_track_entry_printf(entry); 43.70 @@ -435,18 +435,18 @@ void 43.71 /* ptc.ga */ 43.72 if (local_purge) { 43.73 ia64_ptcl(vaddr, PAGE_SHIFT << 2); 43.74 - perfc_incrc(domain_flush_vtlb_local); 43.75 + perfc_incr(domain_flush_vtlb_local); 43.76 } else { 43.77 /* ptc.ga has release semantics. */ 43.78 platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE, 43.79 PAGE_SHIFT); 43.80 - perfc_incrc(domain_flush_vtlb_global); 43.81 + perfc_incr(domain_flush_vtlb_global); 43.82 } 43.83 43.84 if (swap_rr0) { 43.85 vcpu_set_rr(current, 0, old_rid); 43.86 } 43.87 - perfc_incrc(domain_flush_vtlb_track_entry); 43.88 + perfc_incr(domain_flush_vtlb_track_entry); 43.89 } 43.90 43.91 void 43.92 @@ -512,7 +512,7 @@ void gather_vhpt_stats(void) 43.93 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) 43.94 if (!(v->ti_tag & INVALID_TI_TAG)) 43.95 vhpt_valid++; 43.96 - perfc_seta(vhpt_valid_entries, cpu, vhpt_valid); 43.97 + per_cpu(perfcounters, cpu)[PERFC_vhpt_valid_entries] = vhpt_valid; 43.98 } 43.99 } 43.100 #endif
44.1 --- a/xen/arch/powerpc/backtrace.c Wed Mar 28 08:40:42 2007 +0000 44.2 +++ b/xen/arch/powerpc/backtrace.c Wed Mar 28 08:44:08 2007 +0000 44.3 @@ -205,21 +205,6 @@ void show_backtrace_regs(struct cpu_user 44.4 console_end_sync(); 44.5 } 44.6 44.7 -void __warn(char *file, int line) 44.8 -{ 44.9 - ulong sp; 44.10 - ulong lr; 44.11 - 44.12 - console_start_sync(); 44.13 - printk("WARN at %s:%d\n", file, line); 44.14 - 44.15 - sp = (ulong)__builtin_frame_address(0); 44.16 - lr = (ulong)__builtin_return_address(0); 44.17 - backtrace(sp, lr, lr); 44.18 - 44.19 - console_end_sync(); 44.20 -} 44.21 - 44.22 void dump_execution_state(void) 44.23 { 44.24 struct cpu_user_regs *regs = guest_cpu_user_regs();
45.1 --- a/xen/arch/powerpc/mm.c Wed Mar 28 08:40:42 2007 +0000 45.2 +++ b/xen/arch/powerpc/mm.c Wed Mar 28 08:44:08 2007 +0000 45.3 @@ -261,7 +261,7 @@ int get_page_type(struct page_info *page 45.4 45.5 if ( unlikely(!cpus_empty(mask)) ) 45.6 { 45.7 - perfc_incrc(need_flush_tlb_flush); 45.8 + perfc_incr(need_flush_tlb_flush); 45.9 flush_tlb_mask(mask); 45.10 } 45.11
46.1 --- a/xen/arch/x86/Rules.mk Wed Mar 28 08:40:42 2007 +0000 46.2 +++ b/xen/arch/x86/Rules.mk Wed Mar 28 08:44:08 2007 +0000 46.3 @@ -59,6 +59,4 @@ HDRS += $(wildcard $(BASEDIR)/include/as 46.4 HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/vmx/*.h) 46.5 46.6 # Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers) 46.7 -ifneq ($(call cc-ver,$(CC),0x030400),y) 46.8 -$(error Xen requires at least gcc-3.4) 46.9 -endif 46.10 +$(call cc-ver-check,CC,0x030400,"Xen requires at least gcc-3.4")
47.1 --- a/xen/arch/x86/apic.c Wed Mar 28 08:40:42 2007 +0000 47.2 +++ b/xen/arch/x86/apic.c Wed Mar 28 08:44:08 2007 +0000 47.3 @@ -1076,7 +1076,7 @@ int reprogram_timer(s_time_t timeout) 47.4 fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs) 47.5 { 47.6 ack_APIC_irq(); 47.7 - perfc_incrc(apic_timer); 47.8 + perfc_incr(apic_timer); 47.9 raise_softirq(TIMER_SOFTIRQ); 47.10 } 47.11
48.1 --- a/xen/arch/x86/extable.c Wed Mar 28 08:40:42 2007 +0000 48.2 +++ b/xen/arch/x86/extable.c Wed Mar 28 08:44:08 2007 +0000 48.3 @@ -72,7 +72,7 @@ search_pre_exception_table(struct cpu_us 48.4 if ( fixup ) 48.5 { 48.6 dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup)); 48.7 - perfc_incrc(exception_fixed); 48.8 + perfc_incr(exception_fixed); 48.9 } 48.10 return fixup; 48.11 }
49.1 --- a/xen/arch/x86/hvm/io.c Wed Mar 28 08:40:42 2007 +0000 49.2 +++ b/xen/arch/x86/hvm/io.c Wed Mar 28 08:44:08 2007 +0000 49.3 @@ -292,7 +292,11 @@ extern long get_reg_value(int size, int 49.4 static inline void set_eflags_CF(int size, unsigned long v1, 49.5 unsigned long v2, struct cpu_user_regs *regs) 49.6 { 49.7 - unsigned long mask = (1 << (8 * size)) - 1; 49.8 + unsigned long mask; 49.9 + 49.10 + ASSERT((size <= sizeof(mask)) && (size > 0)); 49.11 + 49.12 + mask = ~0UL >> (8 * (sizeof(mask) - size)); 49.13 49.14 if ((v1 & mask) > (v2 & mask)) 49.15 regs->eflags |= X86_EFLAGS_CF; 49.16 @@ -303,7 +307,13 @@ static inline void set_eflags_CF(int siz 49.17 static inline void set_eflags_OF(int size, unsigned long v1, 49.18 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) 49.19 { 49.20 - if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1))) 49.21 + unsigned long mask; 49.22 + 49.23 + ASSERT((size <= sizeof(mask)) && (size > 0)); 49.24 + 49.25 + mask = ~0UL >> (8 * (sizeof(mask) - size)); 49.26 + 49.27 + if ((v3 ^ v2) & (v3 ^ v1) & mask) 49.28 regs->eflags |= X86_EFLAGS_OF; 49.29 } 49.30 49.31 @@ -317,7 +327,11 @@ static inline void set_eflags_AF(int siz 49.32 static inline void set_eflags_ZF(int size, unsigned long v1, 49.33 struct cpu_user_regs *regs) 49.34 { 49.35 - unsigned long mask = (1 << (8 * size)) - 1; 49.36 + unsigned long mask; 49.37 + 49.38 + ASSERT((size <= sizeof(mask)) && (size > 0)); 49.39 + 49.40 + mask = ~0UL >> (8 * (sizeof(mask) - size)); 49.41 49.42 if ((v1 & mask) == 0) 49.43 regs->eflags |= X86_EFLAGS_ZF; 49.44 @@ -326,7 +340,13 @@ static inline void set_eflags_ZF(int siz 49.45 static inline void set_eflags_SF(int size, unsigned long v1, 49.46 struct cpu_user_regs *regs) 49.47 { 49.48 - if (v1 & (1 << ((8 * size) - 1))) 49.49 + unsigned long mask; 49.50 + 49.51 + ASSERT((size <= sizeof(mask)) && (size > 0)); 49.52 + 49.53 + mask = ~0UL >> (8 * (sizeof(mask) - size)); 49.54 + 49.55 + if (v1 & mask) 49.56 regs->eflags |= X86_EFLAGS_SF; 49.57 } 49.58
50.1 --- a/xen/arch/x86/hvm/svm/intr.c Wed Mar 28 08:40:42 2007 +0000 50.2 +++ b/xen/arch/x86/hvm/svm/intr.c Wed Mar 28 08:44:08 2007 +0000 50.3 @@ -64,87 +64,75 @@ asmlinkage void svm_intr_assist(void) 50.4 { 50.5 struct vcpu *v = current; 50.6 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 50.7 - struct periodic_time *pt; 50.8 int intr_type = APIC_DM_EXTINT; 50.9 int intr_vector = -1; 50.10 - int re_injecting = 0; 50.11 50.12 - /* Check if an Injection is active */ 50.13 - /* Previous Interrupt delivery caused this Intercept? */ 50.14 + /* 50.15 + * Do not deliver a virtual interrupt (vintr) if an exception is pending. 50.16 + * This is because the delivery of the exception can arbitrarily delay 50.17 + * the injection of the vintr (for example, if the exception is handled 50.18 + * via an interrupt gate, hence zeroing RFLAGS.IF). In the meantime the 50.19 + * vTPR can be modified upwards and we can end up delivering the vintr 50.20 + * when it is not in fact valid to do so (because we do not re-check the 50.21 + * vTPR value). Moreover, the guest will be able to see the updated 50.22 + * APIC/PIC state (as if the interrupt had been acknowledged) yet will not 50.23 + * have actually received the interrupt. This could confuse the guest! 50.24 + */ 50.25 + if ( vmcb->eventinj.fields.v ) 50.26 + return; 50.27 + 50.28 + /* 50.29 + * Previous Interrupt delivery caused this intercept? 50.30 + * This will happen if the injection is latched by the processor (hence 50.31 + * clearing vintr.fields.irq) but then subsequently a fault occurs (e.g., 50.32 + * due to lack of shadow mapping of guest IDT or guest-kernel stack). 50.33 + * 50.34 + * NB. Exceptions that fault during delivery are lost. This needs to be 50.35 + * fixed but we'll usually get away with it since faults are usually 50.36 + * idempotent. But this isn't the case for e.g. software interrupts! 50.37 + */ 50.38 if ( vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0) ) 50.39 { 50.40 - v->arch.hvm_svm.saved_irq_vector = vmcb->exitintinfo.fields.vector; 50.41 + intr_vector = vmcb->exitintinfo.fields.vector; 50.42 vmcb->exitintinfo.bytes = 0; 50.43 - re_injecting = 1; 50.44 + HVMTRACE_1D(REINJ_VIRQ, v, intr_vector); 50.45 + svm_inject_extint(v, intr_vector); 50.46 + return; 50.47 } 50.48 50.49 - /* Previous interrupt still pending? */ 50.50 + /* 50.51 + * Previous interrupt still pending? This occurs if we return from VMRUN 50.52 + * very early in the entry-to-guest process. Usually this is because an 50.53 + * external physical interrupt was pending when we executed VMRUN. 50.54 + */ 50.55 if ( vmcb->vintr.fields.irq ) 50.56 - { 50.57 - intr_vector = vmcb->vintr.fields.vector; 50.58 - vmcb->vintr.bytes = 0; 50.59 - re_injecting = 1; 50.60 - } 50.61 - /* Pending IRQ saved at last VMExit? */ 50.62 - else if ( v->arch.hvm_svm.saved_irq_vector >= 0 ) 50.63 - { 50.64 - intr_vector = v->arch.hvm_svm.saved_irq_vector; 50.65 - v->arch.hvm_svm.saved_irq_vector = -1; 50.66 - re_injecting = 1; 50.67 - } 50.68 - /* Now let's check for newer interrrupts */ 50.69 - else 50.70 - { 50.71 - pt_update_irq(v); 50.72 + return; 50.73 50.74 - hvm_set_callback_irq_level(); 50.75 + /* Crank the handle on interrupt state and check for new interrrupts. */ 50.76 + pt_update_irq(v); 50.77 + hvm_set_callback_irq_level(); 50.78 + if ( !cpu_has_pending_irq(v) ) 50.79 + return; 50.80 50.81 - if ( cpu_has_pending_irq(v) ) 50.82 - { 50.83 - /* 50.84 - * Create a 'fake' virtual interrupt on to intercept as soon 50.85 - * as the guest _can_ take interrupts. Do not obtain the next 50.86 - * interrupt from the vlapic/pic if unable to inject. 50.87 - */ 50.88 - if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow ) 50.89 - { 50.90 - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; 50.91 - HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); 50.92 - svm_inject_extint(v, 0x0); /* actual vector doesn't really matter */ 50.93 - return; 50.94 - } 50.95 - intr_vector = cpu_get_interrupt(v, &intr_type); 50.96 - } 50.97 + /* 50.98 + * Create a 'fake' virtual interrupt on to intercept as soon as the 50.99 + * guest _can_ take interrupts. Do not obtain the next interrupt from 50.100 + * the vlapic/pic if unable to inject. 50.101 + */ 50.102 + if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow ) 50.103 + { 50.104 + vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; 50.105 + HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1); 50.106 + svm_inject_extint(v, 0x0); /* actual vector doesn't matter */ 50.107 + return; 50.108 } 50.109 50.110 - /* have we got an interrupt to inject? */ 50.111 - if ( intr_vector < 0 ) 50.112 - return; 50.113 + /* Okay, we can deliver the interrupt: grab it and update PIC state. */ 50.114 + intr_vector = cpu_get_interrupt(v, &intr_type); 50.115 + BUG_ON(intr_vector < 0); 50.116 50.117 - switch ( intr_type ) 50.118 - { 50.119 - case APIC_DM_EXTINT: 50.120 - case APIC_DM_FIXED: 50.121 - case APIC_DM_LOWEST: 50.122 - /* Re-injecting a PIT interruptt? */ 50.123 - if ( re_injecting && (pt = is_pt_irq(v, intr_vector, intr_type)) ) 50.124 - ++pt->pending_intr_nr; 50.125 - /* let's inject this interrupt */ 50.126 - if (re_injecting) 50.127 - HVMTRACE_1D(REINJ_VIRQ, v, intr_vector); 50.128 - else 50.129 - HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); 50.130 - svm_inject_extint(v, intr_vector); 50.131 - break; 50.132 - case APIC_DM_SMI: 50.133 - case APIC_DM_NMI: 50.134 - case APIC_DM_INIT: 50.135 - case APIC_DM_STARTUP: 50.136 - default: 50.137 - printk("Unsupported interrupt type: %d\n", intr_type); 50.138 - BUG(); 50.139 - break; 50.140 - } 50.141 + HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); 50.142 + svm_inject_extint(v, intr_vector); 50.143 50.144 pt_intr_post(v, intr_vector, intr_type); 50.145 }
51.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Mar 28 08:40:42 2007 +0000 51.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Mar 28 08:44:08 2007 +0000 51.3 @@ -64,8 +64,8 @@ extern void svm_dump_inst(unsigned long 51.4 extern int svm_dbg_on; 51.5 void svm_dump_regs(const char *from, struct cpu_user_regs *regs); 51.6 51.7 -static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v, 51.8 - struct cpu_user_regs *regs); 51.9 +static int svm_reset_to_realmode(struct vcpu *v, 51.10 + struct cpu_user_regs *regs); 51.11 51.12 /* va of hardware host save area */ 51.13 static void *hsa[NR_CPUS] __read_mostly; 51.14 @@ -749,19 +749,21 @@ static void svm_init_ap_context( 51.15 struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector) 51.16 { 51.17 struct vcpu *v; 51.18 + struct vmcb_struct *vmcb; 51.19 cpu_user_regs_t *regs; 51.20 u16 cs_sel; 51.21 51.22 /* We know this is safe because hvm_bringup_ap() does it */ 51.23 v = current->domain->vcpu[vcpuid]; 51.24 + vmcb = v->arch.hvm_svm.vmcb; 51.25 regs = &v->arch.guest_context.user_regs; 51.26 51.27 memset(ctxt, 0, sizeof(*ctxt)); 51.28 51.29 /* 51.30 * We execute the trampoline code in real mode. The trampoline vector 51.31 - * passed to us is page alligned and is the physicall frame number for 51.32 - * the code. We will execute this code in real mode. 51.33 + * passed to us is page alligned and is the physical frame number for 51.34 + * the code. We will execute this code in real mode. 51.35 */ 51.36 cs_sel = trampoline_vector << 8; 51.37 ctxt->user_regs.eip = 0x0; 51.38 @@ -771,11 +773,11 @@ static void svm_init_ap_context( 51.39 * This is the launch of an AP; set state so that we begin executing 51.40 * the trampoline code in real-mode. 51.41 */ 51.42 - svm_do_vmmcall_reset_to_realmode(v, regs); 51.43 + svm_reset_to_realmode(v, regs); 51.44 /* Adjust the vmcb's hidden register state. */ 51.45 - v->arch.hvm_svm.vmcb->rip = 0; 51.46 - v->arch.hvm_svm.vmcb->cs.sel = cs_sel; 51.47 - v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4); 51.48 + vmcb->rip = 0; 51.49 + vmcb->cs.sel = cs_sel; 51.50 + vmcb->cs.base = (cs_sel << 4); 51.51 } 51.52 51.53 static void svm_init_hypercall_page(struct domain *d, void *hypercall_page) 51.54 @@ -961,8 +963,6 @@ static int svm_vcpu_initialise(struct vc 51.55 v->arch.ctxt_switch_from = svm_ctxt_switch_from; 51.56 v->arch.ctxt_switch_to = svm_ctxt_switch_to; 51.57 51.58 - v->arch.hvm_svm.saved_irq_vector = -1; 51.59 - 51.60 v->arch.hvm_svm.launch_core = -1; 51.61 51.62 if ( (rc = svm_create_vmcb(v)) != 0 ) 51.63 @@ -2494,8 +2494,8 @@ void svm_handle_invlpg(const short invlp 51.64 * 51.65 * returns 0 on success, non-zero otherwise 51.66 */ 51.67 -static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v, 51.68 - struct cpu_user_regs *regs) 51.69 +static int svm_reset_to_realmode(struct vcpu *v, 51.70 + struct cpu_user_regs *regs) 51.71 { 51.72 struct vmcb_struct *vmcb; 51.73
52.1 --- a/xen/arch/x86/hvm/svm/vmcb.c Wed Mar 28 08:40:42 2007 +0000 52.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Mar 28 08:44:08 2007 +0000 52.3 @@ -203,6 +203,7 @@ static int construct_vmcb(struct vcpu *v 52.4 vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */ 52.5 vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG; 52.6 vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table); 52.7 + vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0; 52.8 } 52.9 52.10 return 0;
53.1 --- a/xen/arch/x86/hvm/vmx/intr.c Wed Mar 28 08:40:42 2007 +0000 53.2 +++ b/xen/arch/x86/hvm/vmx/intr.c Wed Mar 28 08:44:08 2007 +0000 53.3 @@ -89,7 +89,7 @@ static void update_tpr_threshold(struct 53.4 asmlinkage void vmx_intr_assist(void) 53.5 { 53.6 int intr_type = 0; 53.7 - int highest_vector; 53.8 + int intr_vector; 53.9 unsigned long eflags; 53.10 struct vcpu *v = current; 53.11 unsigned int idtv_info_field; 53.12 @@ -106,8 +106,9 @@ asmlinkage void vmx_intr_assist(void) 53.13 53.14 if ( unlikely(v->arch.hvm_vmx.vector_injected) ) 53.15 { 53.16 - v->arch.hvm_vmx.vector_injected=0; 53.17 - if (unlikely(has_ext_irq)) enable_irq_window(v); 53.18 + v->arch.hvm_vmx.vector_injected = 0; 53.19 + if ( unlikely(has_ext_irq) ) 53.20 + enable_irq_window(v); 53.21 return; 53.22 } 53.23 53.24 @@ -132,7 +133,6 @@ asmlinkage void vmx_intr_assist(void) 53.25 enable_irq_window(v); 53.26 53.27 HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); 53.28 - 53.29 return; 53.30 } 53.31 53.32 @@ -154,30 +154,13 @@ asmlinkage void vmx_intr_assist(void) 53.33 return; 53.34 } 53.35 53.36 - highest_vector = cpu_get_interrupt(v, &intr_type); 53.37 - if ( highest_vector < 0 ) 53.38 - return; 53.39 + intr_vector = cpu_get_interrupt(v, &intr_type); 53.40 + BUG_ON(intr_vector < 0); 53.41 53.42 - switch ( intr_type ) 53.43 - { 53.44 - case APIC_DM_EXTINT: 53.45 - case APIC_DM_FIXED: 53.46 - case APIC_DM_LOWEST: 53.47 - HVMTRACE_2D(INJ_VIRQ, v, highest_vector, /*fake=*/ 0); 53.48 - vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE); 53.49 - break; 53.50 + HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0); 53.51 + vmx_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE); 53.52 53.53 - case APIC_DM_SMI: 53.54 - case APIC_DM_NMI: 53.55 - case APIC_DM_INIT: 53.56 - case APIC_DM_STARTUP: 53.57 - default: 53.58 - printk("Unsupported interrupt type\n"); 53.59 - BUG(); 53.60 - break; 53.61 - } 53.62 - 53.63 - pt_intr_post(v, highest_vector, intr_type); 53.64 + pt_intr_post(v, intr_vector, intr_type); 53.65 } 53.66 53.67 /*
54.1 --- a/xen/arch/x86/irq.c Wed Mar 28 08:40:42 2007 +0000 54.2 +++ b/xen/arch/x86/irq.c Wed Mar 28 08:44:08 2007 +0000 54.3 @@ -56,7 +56,7 @@ asmlinkage void do_IRQ(struct cpu_user_r 54.4 irq_desc_t *desc = &irq_desc[vector]; 54.5 struct irqaction *action; 54.6 54.7 - perfc_incrc(irqs); 54.8 + perfc_incr(irqs); 54.9 54.10 spin_lock(&desc->lock); 54.11 desc->handler->ack(vector);
55.1 --- a/xen/arch/x86/mm.c Wed Mar 28 08:40:42 2007 +0000 55.2 +++ b/xen/arch/x86/mm.c Wed Mar 28 08:44:08 2007 +0000 55.3 @@ -1726,7 +1726,7 @@ int get_page_type(struct page_info *page 55.4 (!shadow_mode_enabled(page_get_owner(page)) || 55.5 ((nx & PGT_type_mask) == PGT_writable_page)) ) 55.6 { 55.7 - perfc_incrc(need_flush_tlb_flush); 55.8 + perfc_incr(need_flush_tlb_flush); 55.9 flush_tlb_mask(mask); 55.10 } 55.11 55.12 @@ -1969,6 +1969,8 @@ int do_mmuext_op( 55.13 if ( unlikely(!guest_handle_is_null(pdone)) ) 55.14 (void)copy_from_guest(&done, pdone, 1); 55.15 } 55.16 + else 55.17 + perfc_incr(calls_to_mmuext_op); 55.18 55.19 if ( unlikely(!guest_handle_okay(uops, count)) ) 55.20 { 55.21 @@ -2223,6 +2225,8 @@ int do_mmuext_op( 55.22 55.23 UNLOCK_BIGLOCK(d); 55.24 55.25 + perfc_add(num_mmuext_ops, i); 55.26 + 55.27 out: 55.28 /* Add incremental work we have done to the @done output parameter. */ 55.29 if ( unlikely(!guest_handle_is_null(pdone)) ) 55.30 @@ -2257,6 +2261,8 @@ int do_mmu_update( 55.31 if ( unlikely(!guest_handle_is_null(pdone)) ) 55.32 (void)copy_from_guest(&done, pdone, 1); 55.33 } 55.34 + else 55.35 + perfc_incr(calls_to_mmu_update); 55.36 55.37 if ( unlikely(!guest_handle_okay(ureqs, count)) ) 55.38 { 55.39 @@ -2273,9 +2279,6 @@ int do_mmu_update( 55.40 domain_mmap_cache_init(&mapcache); 55.41 domain_mmap_cache_init(&sh_mapcache); 55.42 55.43 - perfc_incrc(calls_to_mmu_update); 55.44 - perfc_addc(num_page_updates, count); 55.45 - 55.46 LOCK_BIGLOCK(d); 55.47 55.48 for ( i = 0; i < count; i++ ) 55.49 @@ -2431,13 +2434,15 @@ int do_mmu_update( 55.50 guest_handle_add_offset(ureqs, 1); 55.51 } 55.52 55.53 - domain_mmap_cache_destroy(&mapcache); 55.54 - domain_mmap_cache_destroy(&sh_mapcache); 55.55 - 55.56 process_deferred_ops(); 55.57 55.58 UNLOCK_BIGLOCK(d); 55.59 55.60 + domain_mmap_cache_destroy(&mapcache); 55.61 + domain_mmap_cache_destroy(&sh_mapcache); 55.62 + 55.63 + perfc_add(num_page_updates, i); 55.64 + 55.65 out: 55.66 /* Add incremental work we have done to the @done output parameter. */ 55.67 if ( unlikely(!guest_handle_is_null(pdone)) ) 55.68 @@ -2724,7 +2729,7 @@ int do_update_va_mapping(unsigned long v 55.69 cpumask_t pmask; 55.70 int rc = 0; 55.71 55.72 - perfc_incrc(calls_to_update_va); 55.73 + perfc_incr(calls_to_update_va); 55.74 55.75 if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) ) 55.76 return -EINVAL; 55.77 @@ -2740,6 +2745,10 @@ int do_update_va_mapping(unsigned long v 55.78 guest_unmap_l1e(v, pl1e); 55.79 pl1e = NULL; 55.80 55.81 + process_deferred_ops(); 55.82 + 55.83 + UNLOCK_BIGLOCK(d); 55.84 + 55.85 switch ( flags & UVMF_FLUSHTYPE_MASK ) 55.86 { 55.87 case UVMF_TLB_FLUSH: 55.88 @@ -2785,10 +2794,6 @@ int do_update_va_mapping(unsigned long v 55.89 break; 55.90 } 55.91 55.92 - process_deferred_ops(); 55.93 - 55.94 - UNLOCK_BIGLOCK(d); 55.95 - 55.96 return rc; 55.97 } 55.98 55.99 @@ -2806,6 +2811,9 @@ int do_update_va_mapping_otherdomain(uns 55.100 55.101 rc = do_update_va_mapping(va, val64, flags); 55.102 55.103 + BUG_ON(this_cpu(percpu_mm_info).deferred_ops); 55.104 + process_deferred_ops(); /* only to clear foreigndom */ 55.105 + 55.106 return rc; 55.107 } 55.108 55.109 @@ -3378,7 +3386,7 @@ int ptwr_do_page_fault(struct vcpu *v, u 55.110 goto bail; 55.111 55.112 UNLOCK_BIGLOCK(d); 55.113 - perfc_incrc(ptwr_emulations); 55.114 + perfc_incr(ptwr_emulations); 55.115 return EXCRET_fault_fixed; 55.116 55.117 bail:
56.1 --- a/xen/arch/x86/mm/shadow/common.c Wed Mar 28 08:40:42 2007 +0000 56.2 +++ b/xen/arch/x86/mm/shadow/common.c Wed Mar 28 08:44:08 2007 +0000 56.3 @@ -276,7 +276,7 @@ hvm_emulate_write(enum x86_segment seg, 56.4 56.5 /* How many emulations could we save if we unshadowed on stack writes? */ 56.6 if ( seg == x86_seg_ss ) 56.7 - perfc_incrc(shadow_fault_emulate_stack); 56.8 + perfc_incr(shadow_fault_emulate_stack); 56.9 56.10 rc = hvm_translate_linear_addr( 56.11 seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); 56.12 @@ -804,7 +804,7 @@ void shadow_prealloc(struct domain *d, u 56.13 ASSERT(v != NULL); /* Shouldn't have enabled shadows if we've no vcpus */ 56.14 56.15 /* Stage one: walk the list of pinned pages, unpinning them */ 56.16 - perfc_incrc(shadow_prealloc_1); 56.17 + perfc_incr(shadow_prealloc_1); 56.18 list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows) 56.19 { 56.20 sp = list_entry(l, struct shadow_page_info, list); 56.21 @@ -820,7 +820,7 @@ void shadow_prealloc(struct domain *d, u 56.22 /* Stage two: all shadow pages are in use in hierarchies that are 56.23 * loaded in cr3 on some vcpu. Walk them, unhooking the non-Xen 56.24 * mappings. */ 56.25 - perfc_incrc(shadow_prealloc_2); 56.26 + perfc_incr(shadow_prealloc_2); 56.27 56.28 for_each_vcpu(d, v2) 56.29 for ( i = 0 ; i < 4 ; i++ ) 56.30 @@ -929,7 +929,7 @@ mfn_t shadow_alloc(struct domain *d, 56.31 ASSERT(shadow_locked_by_me(d)); 56.32 ASSERT(order <= SHADOW_MAX_ORDER); 56.33 ASSERT(shadow_type != SH_type_none); 56.34 - perfc_incrc(shadow_alloc); 56.35 + perfc_incr(shadow_alloc); 56.36 56.37 /* Find smallest order which can satisfy the request. */ 56.38 for ( i = order; i <= SHADOW_MAX_ORDER; i++ ) 56.39 @@ -967,7 +967,7 @@ mfn_t shadow_alloc(struct domain *d, 56.40 tlbflush_filter(mask, sp[i].tlbflush_timestamp); 56.41 if ( unlikely(!cpus_empty(mask)) ) 56.42 { 56.43 - perfc_incrc(shadow_alloc_tlbflush); 56.44 + perfc_incr(shadow_alloc_tlbflush); 56.45 flush_tlb_mask(mask); 56.46 } 56.47 /* Now safe to clear the page for reuse */ 56.48 @@ -997,7 +997,7 @@ void shadow_free(struct domain *d, mfn_t 56.49 int i; 56.50 56.51 ASSERT(shadow_locked_by_me(d)); 56.52 - perfc_incrc(shadow_free); 56.53 + perfc_incr(shadow_free); 56.54 56.55 shadow_type = sp->type; 56.56 ASSERT(shadow_type != SH_type_none); 56.57 @@ -1406,7 +1406,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 56.58 56.59 sh_hash_audit(d); 56.60 56.61 - perfc_incrc(shadow_hash_lookups); 56.62 + perfc_incr(shadow_hash_lookups); 56.63 key = sh_hash(n, t); 56.64 sh_hash_audit_bucket(d, key); 56.65 56.66 @@ -1434,7 +1434,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 56.67 } 56.68 else 56.69 { 56.70 - perfc_incrc(shadow_hash_lookup_head); 56.71 + perfc_incr(shadow_hash_lookup_head); 56.72 } 56.73 return shadow_page_to_mfn(sp); 56.74 } 56.75 @@ -1442,7 +1442,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 56.76 sp = sp->next_shadow; 56.77 } 56.78 56.79 - perfc_incrc(shadow_hash_lookup_miss); 56.80 + perfc_incr(shadow_hash_lookup_miss); 56.81 return _mfn(INVALID_MFN); 56.82 } 56.83 56.84 @@ -1460,7 +1460,7 @@ void shadow_hash_insert(struct vcpu *v, 56.85 56.86 sh_hash_audit(d); 56.87 56.88 - perfc_incrc(shadow_hash_inserts); 56.89 + perfc_incr(shadow_hash_inserts); 56.90 key = sh_hash(n, t); 56.91 sh_hash_audit_bucket(d, key); 56.92 56.93 @@ -1486,7 +1486,7 @@ void shadow_hash_delete(struct vcpu *v, 56.94 56.95 sh_hash_audit(d); 56.96 56.97 - perfc_incrc(shadow_hash_deletes); 56.98 + perfc_incr(shadow_hash_deletes); 56.99 key = sh_hash(n, t); 56.100 sh_hash_audit_bucket(d, key); 56.101 56.102 @@ -1713,7 +1713,7 @@ int sh_remove_write_access(struct vcpu * 56.103 || (pg->u.inuse.type_info & PGT_count_mask) == 0 ) 56.104 return 0; 56.105 56.106 - perfc_incrc(shadow_writeable); 56.107 + perfc_incr(shadow_writeable); 56.108 56.109 /* If this isn't a "normal" writeable page, the domain is trying to 56.110 * put pagetables in special memory of some kind. We can't allow that. */ 56.111 @@ -1735,7 +1735,7 @@ int sh_remove_write_access(struct vcpu * 56.112 56.113 #define GUESS(_a, _h) do { \ 56.114 if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \ 56.115 - perfc_incrc(shadow_writeable_h_ ## _h); \ 56.116 + perfc_incr(shadow_writeable_h_ ## _h); \ 56.117 if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ 56.118 return 1; \ 56.119 } while (0) 56.120 @@ -1808,7 +1808,7 @@ int sh_remove_write_access(struct vcpu * 56.121 callbacks[shtype](v, last_smfn, gmfn); 56.122 56.123 if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count ) 56.124 - perfc_incrc(shadow_writeable_h_5); 56.125 + perfc_incr(shadow_writeable_h_5); 56.126 } 56.127 56.128 if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) 56.129 @@ -1817,7 +1817,7 @@ int sh_remove_write_access(struct vcpu * 56.130 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */ 56.131 56.132 /* Brute-force search of all the shadows, by walking the hash */ 56.133 - perfc_incrc(shadow_writeable_bf); 56.134 + perfc_incr(shadow_writeable_bf); 56.135 hash_foreach(v, callback_mask, callbacks, gmfn); 56.136 56.137 /* If that didn't catch the mapping, something is very wrong */ 56.138 @@ -1888,7 +1888,7 @@ int sh_remove_all_mappings(struct vcpu * 56.139 | 1 << SH_type_fl1_64_shadow 56.140 ; 56.141 56.142 - perfc_incrc(shadow_mappings); 56.143 + perfc_incr(shadow_mappings); 56.144 if ( (page->count_info & PGC_count_mask) == 0 ) 56.145 return 0; 56.146 56.147 @@ -1903,7 +1903,7 @@ int sh_remove_all_mappings(struct vcpu * 56.148 * Heuristics for finding the (probably) single mapping of this gmfn */ 56.149 56.150 /* Brute-force search of all the shadows, by walking the hash */ 56.151 - perfc_incrc(shadow_mappings_bf); 56.152 + perfc_incr(shadow_mappings_bf); 56.153 hash_foreach(v, callback_mask, callbacks, gmfn); 56.154 56.155 /* If that didn't catch the mapping, something is very wrong */ 56.156 @@ -1992,9 +1992,9 @@ static int sh_remove_shadow_via_pointer( 56.157 56.158 sh_unmap_domain_page(vaddr); 56.159 if ( rc ) 56.160 - perfc_incrc(shadow_up_pointer); 56.161 + perfc_incr(shadow_up_pointer); 56.162 else 56.163 - perfc_incrc(shadow_unshadow_bf); 56.164 + perfc_incr(shadow_unshadow_bf); 56.165 56.166 return rc; 56.167 } 56.168 @@ -2093,7 +2093,7 @@ void sh_remove_shadows(struct vcpu *v, m 56.169 } 56.170 56.171 /* Search for this shadow in all appropriate shadows */ 56.172 - perfc_incrc(shadow_unshadow); 56.173 + perfc_incr(shadow_unshadow); 56.174 sh_flags = pg->shadow_flags; 56.175 56.176 /* Lower-level shadows need to be excised from upper-level shadows.
57.1 --- a/xen/arch/x86/mm/shadow/multi.c Wed Mar 28 08:40:42 2007 +0000 57.2 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Mar 28 08:44:08 2007 +0000 57.3 @@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t 57.4 /* Look for shadows in the hash table */ 57.5 { 57.6 mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type); 57.7 - perfc_incrc(shadow_get_shadow_status); 57.8 + perfc_incr(shadow_get_shadow_status); 57.9 return smfn; 57.10 } 57.11 57.12 @@ -209,7 +209,7 @@ guest_walk_tables(struct vcpu *v, unsign 57.13 { 57.14 ASSERT(!guest_op || shadow_locked_by_me(v->domain)); 57.15 57.16 - perfc_incrc(shadow_guest_walk); 57.17 + perfc_incr(shadow_guest_walk); 57.18 memset(gw, 0, sizeof(*gw)); 57.19 gw->va = va; 57.20 57.21 @@ -448,14 +448,14 @@ static u32 guest_set_ad_bits(struct vcpu 57.22 == (_PAGE_DIRTY | _PAGE_ACCESSED) ) 57.23 return flags; /* Guest already has A and D bits set */ 57.24 flags |= _PAGE_DIRTY | _PAGE_ACCESSED; 57.25 - perfc_incrc(shadow_ad_update); 57.26 + perfc_incr(shadow_ad_update); 57.27 } 57.28 else 57.29 { 57.30 if ( flags & _PAGE_ACCESSED ) 57.31 return flags; /* Guest already has A bit set */ 57.32 flags |= _PAGE_ACCESSED; 57.33 - perfc_incrc(shadow_a_update); 57.34 + perfc_incr(shadow_a_update); 57.35 } 57.36 57.37 /* Set the bit(s) */ 57.38 @@ -863,7 +863,7 @@ shadow_write_entries(void *d, void *s, i 57.39 * using map_domain_page() to get a writeable mapping if we need to. */ 57.40 if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 ) 57.41 { 57.42 - perfc_incrc(shadow_linear_map_failed); 57.43 + perfc_incr(shadow_linear_map_failed); 57.44 map = sh_map_domain_page(mfn); 57.45 ASSERT(map != NULL); 57.46 dst = map + ((unsigned long)dst & (PAGE_SIZE - 1)); 57.47 @@ -925,7 +925,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl 57.48 57.49 if ( unlikely(!res) ) 57.50 { 57.51 - perfc_incrc(shadow_get_page_fail); 57.52 + perfc_incr(shadow_get_page_fail); 57.53 SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n"); 57.54 } 57.55 57.56 @@ -2198,7 +2198,7 @@ static int validate_gl4e(struct vcpu *v, 57.57 mfn_t sl3mfn = _mfn(INVALID_MFN); 57.58 int result = 0; 57.59 57.60 - perfc_incrc(shadow_validate_gl4e_calls); 57.61 + perfc_incr(shadow_validate_gl4e_calls); 57.62 57.63 if ( guest_l4e_get_flags(*new_gl4e) & _PAGE_PRESENT ) 57.64 { 57.65 @@ -2250,7 +2250,7 @@ static int validate_gl3e(struct vcpu *v, 57.66 mfn_t sl2mfn = _mfn(INVALID_MFN); 57.67 int result = 0; 57.68 57.69 - perfc_incrc(shadow_validate_gl3e_calls); 57.70 + perfc_incr(shadow_validate_gl3e_calls); 57.71 57.72 if ( guest_l3e_get_flags(*new_gl3e) & _PAGE_PRESENT ) 57.73 { 57.74 @@ -2277,7 +2277,7 @@ static int validate_gl2e(struct vcpu *v, 57.75 mfn_t sl1mfn = _mfn(INVALID_MFN); 57.76 int result = 0; 57.77 57.78 - perfc_incrc(shadow_validate_gl2e_calls); 57.79 + perfc_incr(shadow_validate_gl2e_calls); 57.80 57.81 if ( guest_l2e_get_flags(*new_gl2e) & _PAGE_PRESENT ) 57.82 { 57.83 @@ -2363,7 +2363,7 @@ static int validate_gl1e(struct vcpu *v, 57.84 mfn_t gmfn; 57.85 int result = 0, mmio; 57.86 57.87 - perfc_incrc(shadow_validate_gl1e_calls); 57.88 + perfc_incr(shadow_validate_gl1e_calls); 57.89 57.90 gfn = guest_l1e_get_gfn(*new_gl1e); 57.91 gmfn = vcpu_gfn_to_mfn(v, gfn); 57.92 @@ -2523,7 +2523,7 @@ static inline void check_for_early_unsha 57.93 u32 flags = mfn_to_page(gmfn)->shadow_flags; 57.94 if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) ) 57.95 { 57.96 - perfc_incrc(shadow_early_unshadow); 57.97 + perfc_incr(shadow_early_unshadow); 57.98 sh_remove_shadows(v, gmfn, 0, 0 /* Slow, can fail to unshadow */ ); 57.99 } 57.100 } 57.101 @@ -2642,7 +2642,7 @@ static int sh_page_fault(struct vcpu *v, 57.102 SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n", 57.103 v->domain->domain_id, v->vcpu_id, va, regs->error_code); 57.104 57.105 - perfc_incrc(shadow_fault); 57.106 + perfc_incr(shadow_fault); 57.107 // 57.108 // XXX: Need to think about eventually mapping superpages directly in the 57.109 // shadow (when possible), as opposed to splintering them into a 57.110 @@ -2670,7 +2670,7 @@ static int sh_page_fault(struct vcpu *v, 57.111 ASSERT(regs->error_code & PFEC_page_present); 57.112 regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present); 57.113 reset_early_unshadow(v); 57.114 - perfc_incrc(shadow_fault_fast_gnp); 57.115 + perfc_incr(shadow_fault_fast_gnp); 57.116 SHADOW_PRINTK("fast path not-present\n"); 57.117 return 0; 57.118 } 57.119 @@ -2688,7 +2688,7 @@ static int sh_page_fault(struct vcpu *v, 57.120 << PAGE_SHIFT) 57.121 | (va & ~PAGE_MASK); 57.122 } 57.123 - perfc_incrc(shadow_fault_fast_mmio); 57.124 + perfc_incr(shadow_fault_fast_mmio); 57.125 SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); 57.126 reset_early_unshadow(v); 57.127 handle_mmio(gpa); 57.128 @@ -2699,7 +2699,7 @@ static int sh_page_fault(struct vcpu *v, 57.129 /* This should be exceptionally rare: another vcpu has fixed 57.130 * the tables between the fault and our reading the l1e. 57.131 * Retry and let the hardware give us the right fault next time. */ 57.132 - perfc_incrc(shadow_fault_fast_fail); 57.133 + perfc_incr(shadow_fault_fast_fail); 57.134 SHADOW_PRINTK("fast path false alarm!\n"); 57.135 return EXCRET_fault_fixed; 57.136 } 57.137 @@ -2746,7 +2746,7 @@ static int sh_page_fault(struct vcpu *v, 57.138 goto mmio; 57.139 } 57.140 57.141 - perfc_incrc(shadow_fault_bail_not_present); 57.142 + perfc_incr(shadow_fault_bail_not_present); 57.143 goto not_a_shadow_fault; 57.144 } 57.145 57.146 @@ -2761,7 +2761,7 @@ static int sh_page_fault(struct vcpu *v, 57.147 !(accumulated_gflags & _PAGE_USER) ) 57.148 { 57.149 /* illegal user-mode access to supervisor-only page */ 57.150 - perfc_incrc(shadow_fault_bail_user_supervisor); 57.151 + perfc_incr(shadow_fault_bail_user_supervisor); 57.152 goto not_a_shadow_fault; 57.153 } 57.154 57.155 @@ -2772,7 +2772,7 @@ static int sh_page_fault(struct vcpu *v, 57.156 { 57.157 if ( unlikely(!(accumulated_gflags & _PAGE_RW)) ) 57.158 { 57.159 - perfc_incrc(shadow_fault_bail_ro_mapping); 57.160 + perfc_incr(shadow_fault_bail_ro_mapping); 57.161 goto not_a_shadow_fault; 57.162 } 57.163 } 57.164 @@ -2787,7 +2787,7 @@ static int sh_page_fault(struct vcpu *v, 57.165 if ( accumulated_gflags & _PAGE_NX_BIT ) 57.166 { 57.167 /* NX prevented this code fetch */ 57.168 - perfc_incrc(shadow_fault_bail_nx); 57.169 + perfc_incr(shadow_fault_bail_nx); 57.170 goto not_a_shadow_fault; 57.171 } 57.172 } 57.173 @@ -2802,7 +2802,7 @@ static int sh_page_fault(struct vcpu *v, 57.174 57.175 if ( !mmio && !mfn_valid(gmfn) ) 57.176 { 57.177 - perfc_incrc(shadow_fault_bail_bad_gfn); 57.178 + perfc_incr(shadow_fault_bail_bad_gfn); 57.179 SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", 57.180 gfn_x(gfn), mfn_x(gmfn)); 57.181 goto not_a_shadow_fault; 57.182 @@ -2844,12 +2844,12 @@ static int sh_page_fault(struct vcpu *v, 57.183 { 57.184 if ( ft == ft_demand_write ) 57.185 { 57.186 - perfc_incrc(shadow_fault_emulate_write); 57.187 + perfc_incr(shadow_fault_emulate_write); 57.188 goto emulate; 57.189 } 57.190 else if ( shadow_mode_trap_reads(d) && ft == ft_demand_read ) 57.191 { 57.192 - perfc_incrc(shadow_fault_emulate_read); 57.193 + perfc_incr(shadow_fault_emulate_read); 57.194 goto emulate; 57.195 } 57.196 } 57.197 @@ -2860,7 +2860,7 @@ static int sh_page_fault(struct vcpu *v, 57.198 goto mmio; 57.199 } 57.200 57.201 - perfc_incrc(shadow_fault_fixed); 57.202 + perfc_incr(shadow_fault_fixed); 57.203 d->arch.paging.shadow.fault_count++; 57.204 reset_early_unshadow(v); 57.205 57.206 @@ -2920,7 +2920,7 @@ static int sh_page_fault(struct vcpu *v, 57.207 { 57.208 SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n", 57.209 mfn_x(gmfn)); 57.210 - perfc_incrc(shadow_fault_emulate_failed); 57.211 + perfc_incr(shadow_fault_emulate_failed); 57.212 /* If this is actually a page table, then we have a bug, and need 57.213 * to support more operations in the emulator. More likely, 57.214 * though, this is a hint that this page should not be shadowed. */ 57.215 @@ -2935,7 +2935,7 @@ static int sh_page_fault(struct vcpu *v, 57.216 mmio: 57.217 if ( !guest_mode(regs) ) 57.218 goto not_a_shadow_fault; 57.219 - perfc_incrc(shadow_fault_mmio); 57.220 + perfc_incr(shadow_fault_mmio); 57.221 sh_audit_gw(v, &gw); 57.222 unmap_walk(v, &gw); 57.223 SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa); 57.224 @@ -2964,7 +2964,7 @@ sh_invlpg(struct vcpu *v, unsigned long 57.225 { 57.226 shadow_l2e_t sl2e; 57.227 57.228 - perfc_incrc(shadow_invlpg); 57.229 + perfc_incr(shadow_invlpg); 57.230 57.231 /* First check that we can safely read the shadow l2e. SMP/PAE linux can 57.232 * run as high as 6% of invlpg calls where we haven't shadowed the l2 57.233 @@ -2983,7 +2983,7 @@ sh_invlpg(struct vcpu *v, unsigned long 57.234 + shadow_l3_linear_offset(va)), 57.235 sizeof (sl3e)) != 0 ) 57.236 { 57.237 - perfc_incrc(shadow_invlpg_fault); 57.238 + perfc_incr(shadow_invlpg_fault); 57.239 return 0; 57.240 } 57.241 if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) ) 57.242 @@ -3002,7 +3002,7 @@ sh_invlpg(struct vcpu *v, unsigned long 57.243 sh_linear_l2_table(v) + shadow_l2_linear_offset(va), 57.244 sizeof (sl2e)) != 0 ) 57.245 { 57.246 - perfc_incrc(shadow_invlpg_fault); 57.247 + perfc_incr(shadow_invlpg_fault); 57.248 return 0; 57.249 } 57.250
58.1 --- a/xen/arch/x86/smp.c Wed Mar 28 08:40:42 2007 +0000 58.2 +++ b/xen/arch/x86/smp.c Wed Mar 28 08:44:08 2007 +0000 58.3 @@ -169,7 +169,7 @@ static unsigned long flush_va; 58.4 fastcall void smp_invalidate_interrupt(void) 58.5 { 58.6 ack_APIC_irq(); 58.7 - perfc_incrc(ipis); 58.8 + perfc_incr(ipis); 58.9 irq_enter(); 58.10 if ( !__sync_lazy_execstate() ) 58.11 { 58.12 @@ -329,7 +329,7 @@ void smp_send_stop(void) 58.13 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs) 58.14 { 58.15 ack_APIC_irq(); 58.16 - perfc_incrc(ipis); 58.17 + perfc_incr(ipis); 58.18 } 58.19 58.20 fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs) 58.21 @@ -338,7 +338,7 @@ fastcall void smp_call_function_interrup 58.22 void *info = call_data->info; 58.23 58.24 ack_APIC_irq(); 58.25 - perfc_incrc(ipis); 58.26 + perfc_incr(ipis); 58.27 58.28 if ( !cpu_isset(smp_processor_id(), call_data->selected) ) 58.29 return;
59.1 --- a/xen/arch/x86/time.c Wed Mar 28 08:40:42 2007 +0000 59.2 +++ b/xen/arch/x86/time.c Wed Mar 28 08:44:08 2007 +0000 59.3 @@ -670,14 +670,20 @@ static inline void version_update_end(u3 59.4 (*version)++; 59.5 } 59.6 59.7 -static inline void __update_vcpu_system_time(struct vcpu *v) 59.8 +void update_vcpu_system_time(struct vcpu *v) 59.9 { 59.10 struct cpu_time *t; 59.11 struct vcpu_time_info *u; 59.12 59.13 + if ( v->vcpu_info == NULL ) 59.14 + return; 59.15 + 59.16 t = &this_cpu(cpu_time); 59.17 u = &vcpu_info(v, time); 59.18 59.19 + if ( u->tsc_timestamp == t->local_tsc_stamp ) 59.20 + return; 59.21 + 59.22 version_update_begin(&u->version); 59.23 59.24 u->tsc_timestamp = t->local_tsc_stamp; 59.25 @@ -688,13 +694,6 @@ static inline void __update_vcpu_system_ 59.26 version_update_end(&u->version); 59.27 } 59.28 59.29 -void update_vcpu_system_time(struct vcpu *v) 59.30 -{ 59.31 - if ( vcpu_info(v, time.tsc_timestamp) != 59.32 - this_cpu(cpu_time).local_tsc_stamp ) 59.33 - __update_vcpu_system_time(v); 59.34 -} 59.35 - 59.36 void update_domain_wallclock_time(struct domain *d) 59.37 { 59.38 spin_lock(&wc_lock); 59.39 @@ -771,9 +770,10 @@ static void local_time_calibration(void 59.40 local_irq_enable(); 59.41 59.42 #if 0 59.43 - printk("PRE%d: tsc=%lld stime=%lld master=%lld\n", 59.44 + printk("PRE%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64"\n", 59.45 smp_processor_id(), prev_tsc, prev_local_stime, prev_master_stime); 59.46 - printk("CUR%d: tsc=%lld stime=%lld master=%lld -> %lld\n", 59.47 + printk("CUR%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64 59.48 + " -> %"PRId64"\n", 59.49 smp_processor_id(), curr_tsc, curr_local_stime, curr_master_stime, 59.50 curr_master_stime - curr_local_stime); 59.51 #endif 59.52 @@ -855,6 +855,8 @@ static void local_time_calibration(void 59.53 t->stime_local_stamp = curr_local_stime; 59.54 t->stime_master_stamp = curr_master_stime; 59.55 59.56 + update_vcpu_system_time(current); 59.57 + 59.58 out: 59.59 set_timer(&t->calibration_timer, NOW() + EPOCH); 59.60
60.1 --- a/xen/arch/x86/traps.c Wed Mar 28 08:40:42 2007 +0000 60.2 +++ b/xen/arch/x86/traps.c Wed Mar 28 08:44:08 2007 +0000 60.3 @@ -637,29 +637,35 @@ asmlinkage int do_invalid_op(struct cpu_ 60.4 memcmp(bug.ud2, "\xf\xb", sizeof(bug.ud2)) || 60.5 (bug.ret != 0xc2) ) 60.6 goto die; 60.7 + eip += sizeof(bug); 60.8 60.9 id = bug.id & 3; 60.10 - if ( id == BUGFRAME_rsvd ) 60.11 - goto die; 60.12 60.13 if ( id == BUGFRAME_dump ) 60.14 { 60.15 show_execution_state(regs); 60.16 - regs->eip += sizeof(bug); 60.17 + regs->eip = (unsigned long)eip; 60.18 return EXCRET_fault_fixed; 60.19 } 60.20 60.21 - /* BUG() or ASSERT(): decode the filename pointer and line number. */ 60.22 - ASSERT((id == BUGFRAME_bug) || (id == BUGFRAME_assert)); 60.23 - eip += sizeof(bug); 60.24 + /* WARN, BUG or ASSERT: decode the filename pointer and line number. */ 60.25 if ( !is_kernel(eip) || 60.26 __copy_from_user(&bug_str, eip, sizeof(bug_str)) || 60.27 memcmp(bug_str.mov, BUG_MOV_STR, sizeof(bug_str.mov)) ) 60.28 goto die; 60.29 + eip += sizeof(bug_str); 60.30 60.31 filename = is_kernel(bug_str.str) ? (char *)bug_str.str : "<unknown>"; 60.32 lineno = bug.id >> 2; 60.33 60.34 + if ( id == BUGFRAME_warn ) 60.35 + { 60.36 + printk("Xen WARN at %.50s:%d\n", filename, lineno); 60.37 + show_execution_state(regs); 60.38 + regs->eip = (unsigned long)eip; 60.39 + return EXCRET_fault_fixed; 60.40 + } 60.41 + 60.42 if ( id == BUGFRAME_bug ) 60.43 { 60.44 printk("Xen BUG at %.50s:%d\n", filename, lineno); 60.45 @@ -668,13 +674,13 @@ asmlinkage int do_invalid_op(struct cpu_ 60.46 panic("Xen BUG at %.50s:%d\n", filename, lineno); 60.47 } 60.48 60.49 - /* ASSERT(): decode the predicate string pointer. */ 60.50 + /* ASSERT: decode the predicate string pointer. */ 60.51 ASSERT(id == BUGFRAME_assert); 60.52 - eip += sizeof(bug_str); 60.53 if ( !is_kernel(eip) || 60.54 __copy_from_user(&bug_str, eip, sizeof(bug_str)) || 60.55 memcmp(bug_str.mov, BUG_MOV_STR, sizeof(bug_str.mov)) ) 60.56 goto die; 60.57 + eip += sizeof(bug_str); 60.58 60.59 predicate = is_kernel(bug_str.str) ? (char *)bug_str.str : "<unknown>"; 60.60 printk("Assertion '%s' failed at %.50s:%d\n", 60.61 @@ -950,7 +956,7 @@ asmlinkage int do_page_fault(struct cpu_ 60.62 60.63 DEBUGGER_trap_entry(TRAP_page_fault, regs); 60.64 60.65 - perfc_incrc(page_faults); 60.66 + perfc_incr(page_faults); 60.67 60.68 if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) ) 60.69 return rc; 60.70 @@ -962,7 +968,7 @@ asmlinkage int do_page_fault(struct cpu_ 60.71 60.72 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 60.73 { 60.74 - perfc_incrc(copy_user_faults); 60.75 + perfc_incr(copy_user_faults); 60.76 regs->eip = fixup; 60.77 return 0; 60.78 }
61.1 --- a/xen/arch/x86/x86_32/asm-offsets.c Wed Mar 28 08:40:42 2007 +0000 61.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Mar 28 08:44:08 2007 +0000 61.3 @@ -107,21 +107,11 @@ void __dummy__(void) 61.4 BLANK(); 61.5 61.6 #if PERF_COUNTERS 61.7 - OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls); 61.8 - OFFSET(PERFC_exceptions, struct perfcounter, exceptions); 61.9 + DEFINE(PERFC_hypercalls, PERFC_hypercalls); 61.10 + DEFINE(PERFC_exceptions, PERFC_exceptions); 61.11 BLANK(); 61.12 #endif 61.13 61.14 - OFFSET(MULTICALL_op, struct multicall_entry, op); 61.15 - OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]); 61.16 - OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]); 61.17 - OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]); 61.18 - OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]); 61.19 - OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]); 61.20 - OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]); 61.21 - OFFSET(MULTICALL_result, struct multicall_entry, result); 61.22 - BLANK(); 61.23 - 61.24 DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE)); 61.25 BLANK(); 61.26
62.1 --- a/xen/arch/x86/x86_32/domain_page.c Wed Mar 28 08:40:42 2007 +0000 62.2 +++ b/xen/arch/x86/x86_32/domain_page.c Wed Mar 28 08:44:08 2007 +0000 62.3 @@ -50,7 +50,7 @@ void *map_domain_page(unsigned long mfn) 62.4 62.5 ASSERT(!in_irq()); 62.6 62.7 - perfc_incrc(map_domain_page_count); 62.8 + perfc_incr(map_domain_page_count); 62.9 62.10 v = mapcache_current_vcpu(); 62.11 62.12 @@ -76,7 +76,7 @@ void *map_domain_page(unsigned long mfn) 62.13 cache->shadow_epoch[vcpu] = cache->epoch; 62.14 if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) ) 62.15 { 62.16 - perfc_incrc(domain_page_tlb_flush); 62.17 + perfc_incr(domain_page_tlb_flush); 62.18 local_flush_tlb(); 62.19 } 62.20 } 62.21 @@ -92,7 +92,7 @@ void *map_domain_page(unsigned long mfn) 62.22 } 62.23 62.24 /* /Second/, flush TLBs. */ 62.25 - perfc_incrc(domain_page_tlb_flush); 62.26 + perfc_incr(domain_page_tlb_flush); 62.27 local_flush_tlb(); 62.28 cache->shadow_epoch[vcpu] = ++cache->epoch; 62.29 cache->tlbflush_timestamp = tlbflush_current_time();
63.1 --- a/xen/arch/x86/x86_32/entry.S Wed Mar 28 08:40:42 2007 +0000 63.2 +++ b/xen/arch/x86/x86_32/entry.S Wed Mar 28 08:44:08 2007 +0000 63.3 @@ -173,7 +173,7 @@ ENTRY(hypercall) 63.4 GET_CURRENT(%ebx) 63.5 cmpl $NR_hypercalls,%eax 63.6 jae bad_hypercall 63.7 - PERFC_INCR(PERFC_hypercalls, %eax) 63.8 + PERFC_INCR(PERFC_hypercalls, %eax, %ebx) 63.9 #ifndef NDEBUG 63.10 /* Create shadow parameters and corrupt those not used by this call. */ 63.11 pushl %eax 63.12 @@ -429,7 +429,7 @@ 1: xorl %eax,%eax 63.13 movl %esp,%edx 63.14 pushl %edx # push the cpu_user_regs pointer 63.15 GET_CURRENT(%ebx) 63.16 - PERFC_INCR(PERFC_exceptions, %eax) 63.17 + PERFC_INCR(PERFC_exceptions, %eax, %ebx) 63.18 call *exception_table(,%eax,4) 63.19 addl $4,%esp 63.20 movl UREGS_eflags(%esp),%eax
64.1 --- a/xen/arch/x86/x86_32/seg_fixup.c Wed Mar 28 08:40:42 2007 +0000 64.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c Wed Mar 28 08:44:08 2007 +0000 64.3 @@ -434,7 +434,7 @@ int gpf_emulate_4gb(struct cpu_user_regs 64.4 goto fail; 64.5 64.6 /* Success! */ 64.7 - perfc_incrc(seg_fixups); 64.8 + perfc_incr(seg_fixups); 64.9 64.10 /* If requested, give a callback on otherwise unused vector 15. */ 64.11 if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
65.1 --- a/xen/arch/x86/x86_64/asm-offsets.c Wed Mar 28 08:40:42 2007 +0000 65.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Mar 28 08:44:08 2007 +0000 65.3 @@ -121,30 +121,8 @@ void __dummy__(void) 65.4 BLANK(); 65.5 65.6 #if PERF_COUNTERS 65.7 - OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls); 65.8 - OFFSET(PERFC_exceptions, struct perfcounter, exceptions); 65.9 - BLANK(); 65.10 -#endif 65.11 - 65.12 - OFFSET(MULTICALL_op, struct multicall_entry, op); 65.13 - OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]); 65.14 - OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]); 65.15 - OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]); 65.16 - OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]); 65.17 - OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]); 65.18 - OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]); 65.19 - OFFSET(MULTICALL_result, struct multicall_entry, result); 65.20 - BLANK(); 65.21 - 65.22 -#ifdef CONFIG_COMPAT 65.23 - OFFSET(COMPAT_MULTICALL_op, struct compat_multicall_entry, op); 65.24 - OFFSET(COMPAT_MULTICALL_arg0, struct compat_multicall_entry, args[0]); 65.25 - OFFSET(COMPAT_MULTICALL_arg1, struct compat_multicall_entry, args[1]); 65.26 - OFFSET(COMPAT_MULTICALL_arg2, struct compat_multicall_entry, args[2]); 65.27 - OFFSET(COMPAT_MULTICALL_arg3, struct compat_multicall_entry, args[3]); 65.28 - OFFSET(COMPAT_MULTICALL_arg4, struct compat_multicall_entry, args[4]); 65.29 - OFFSET(COMPAT_MULTICALL_arg5, struct compat_multicall_entry, args[5]); 65.30 - OFFSET(COMPAT_MULTICALL_result, struct compat_multicall_entry, result); 65.31 + DEFINE(PERFC_hypercalls, PERFC_hypercalls); 65.32 + DEFINE(PERFC_exceptions, PERFC_exceptions); 65.33 BLANK(); 65.34 #endif 65.35
66.1 --- a/xen/arch/x86/x86_64/compat/entry.S Wed Mar 28 08:40:42 2007 +0000 66.2 +++ b/xen/arch/x86/x86_64/compat/entry.S Wed Mar 28 08:44:08 2007 +0000 66.3 @@ -57,7 +57,7 @@ ENTRY(compat_hypercall) 66.4 movl UREGS_rbx(%rsp),%edi /* Arg 1 */ 66.5 #endif 66.6 leaq compat_hypercall_table(%rip),%r10 66.7 - PERFC_INCR(PERFC_hypercalls, %rax) 66.8 + PERFC_INCR(PERFC_hypercalls, %rax, %rbx) 66.9 callq *(%r10,%rax,8) 66.10 #ifndef NDEBUG 66.11 /* Deliberately corrupt parameter regs used by this hypercall. */
67.1 --- a/xen/arch/x86/x86_64/entry.S Wed Mar 28 08:40:42 2007 +0000 67.2 +++ b/xen/arch/x86/x86_64/entry.S Wed Mar 28 08:44:08 2007 +0000 67.3 @@ -147,7 +147,7 @@ ENTRY(syscall_enter) 67.4 pushq UREGS_rip+8(%rsp) 67.5 #endif 67.6 leaq hypercall_table(%rip),%r10 67.7 - PERFC_INCR(PERFC_hypercalls, %rax) 67.8 + PERFC_INCR(PERFC_hypercalls, %rax, %rbx) 67.9 callq *(%r10,%rax,8) 67.10 #ifndef NDEBUG 67.11 /* Deliberately corrupt parameter regs used by this hypercall. */ 67.12 @@ -396,7 +396,7 @@ 1: movq %rsp,%rdi 67.13 movl UREGS_entry_vector(%rsp),%eax 67.14 leaq exception_table(%rip),%rdx 67.15 GET_CURRENT(%rbx) 67.16 - PERFC_INCR(PERFC_exceptions, %rax) 67.17 + PERFC_INCR(PERFC_exceptions, %rax, %rbx) 67.18 callq *(%rdx,%rax,8) 67.19 testb $3,UREGS_cs(%rsp) 67.20 jz restore_all_xen
68.1 --- a/xen/arch/x86/x86_emulate.c Wed Mar 28 08:40:42 2007 +0000 68.2 +++ b/xen/arch/x86/x86_emulate.c Wed Mar 28 08:44:08 2007 +0000 68.3 @@ -1565,8 +1565,10 @@ x86_emulate( 68.4 if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() ) 68.5 { 68.6 dst.bytes = op_bytes = 8; 68.7 - if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 68.8 - &dst.val, 8, ctxt)) != 0 ) 68.9 + if ( dst.type == OP_REG ) 68.10 + dst.val = *dst.reg; 68.11 + else if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 68.12 + &dst.val, 8, ctxt)) != 0 ) 68.13 goto done; 68.14 } 68.15 src.val = _regs.eip; 68.16 @@ -1579,8 +1581,10 @@ x86_emulate( 68.17 if ( mode_64bit() && (dst.bytes == 4) ) 68.18 { 68.19 dst.bytes = 8; 68.20 - if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 68.21 - &dst.val, 8, ctxt)) != 0 ) 68.22 + if ( dst.type == OP_REG ) 68.23 + dst.val = *dst.reg; 68.24 + else if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 68.25 + &dst.val, 8, ctxt)) != 0 ) 68.26 goto done; 68.27 } 68.28 if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
69.1 --- a/xen/common/domain.c Wed Mar 28 08:40:42 2007 +0000 69.2 +++ b/xen/common/domain.c Wed Mar 28 08:44:08 2007 +0000 69.3 @@ -96,14 +96,16 @@ struct vcpu *alloc_vcpu( 69.4 69.5 v->domain = d; 69.6 v->vcpu_id = vcpu_id; 69.7 - v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); 69.8 spin_lock_init(&v->pause_lock); 69.9 69.10 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline; 69.11 v->runstate.state_entry_time = NOW(); 69.12 69.13 if ( !is_idle_domain(d) ) 69.14 + { 69.15 set_bit(_VCPUF_down, &v->vcpu_flags); 69.16 + v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); 69.17 + } 69.18 69.19 if ( sched_init_vcpu(v, cpu_id) != 0 ) 69.20 {
70.1 --- a/xen/common/multicall.c Wed Mar 28 08:40:42 2007 +0000 70.2 +++ b/xen/common/multicall.c Wed Mar 28 08:44:08 2007 +0000 70.3 @@ -10,6 +10,7 @@ 70.4 #include <xen/event.h> 70.5 #include <xen/multicall.h> 70.6 #include <xen/guest_access.h> 70.7 +#include <xen/perfc.h> 70.8 #include <asm/current.h> 70.9 #include <asm/hardirq.h> 70.10 70.11 @@ -69,14 +70,18 @@ do_multicall( 70.12 guest_handle_add_offset(call_list, 1); 70.13 } 70.14 70.15 + perfc_incr(calls_to_multicall); 70.16 + perfc_add(calls_from_multicall, nr_calls); 70.17 mcs->flags = 0; 70.18 return 0; 70.19 70.20 fault: 70.21 + perfc_incr(calls_to_multicall); 70.22 mcs->flags = 0; 70.23 return -EFAULT; 70.24 70.25 preempted: 70.26 + perfc_add(calls_from_multicall, i); 70.27 mcs->flags = 0; 70.28 return hypercall_create_continuation( 70.29 __HYPERVISOR_multicall, "hi", call_list, nr_calls-i);
71.1 --- a/xen/common/page_alloc.c Wed Mar 28 08:40:42 2007 +0000 71.2 +++ b/xen/common/page_alloc.c Wed Mar 28 08:44:08 2007 +0000 71.3 @@ -423,7 +423,7 @@ static struct page_info *alloc_heap_page 71.4 71.5 if ( unlikely(!cpus_empty(mask)) ) 71.6 { 71.7 - perfc_incrc(need_flush_tlb_flush); 71.8 + perfc_incr(need_flush_tlb_flush); 71.9 flush_tlb_mask(mask); 71.10 } 71.11
72.1 --- a/xen/common/perfc.c Wed Mar 28 08:40:42 2007 +0000 72.2 +++ b/xen/common/perfc.c Wed Mar 28 08:44:08 2007 +0000 72.3 @@ -10,81 +10,98 @@ 72.4 #include <public/sysctl.h> 72.5 #include <asm/perfc.h> 72.6 72.7 -#undef PERFCOUNTER 72.8 -#undef PERFCOUNTER_CPU 72.9 -#undef PERFCOUNTER_ARRAY 72.10 -#undef PERFSTATUS 72.11 -#undef PERFSTATUS_CPU 72.12 -#undef PERFSTATUS_ARRAY 72.13 #define PERFCOUNTER( var, name ) { name, TYPE_SINGLE, 0 }, 72.14 -#define PERFCOUNTER_CPU( var, name ) { name, TYPE_CPU, 0 }, 72.15 #define PERFCOUNTER_ARRAY( var, name, size ) { name, TYPE_ARRAY, size }, 72.16 #define PERFSTATUS( var, name ) { name, TYPE_S_SINGLE, 0 }, 72.17 -#define PERFSTATUS_CPU( var, name ) { name, TYPE_S_CPU, 0 }, 72.18 #define PERFSTATUS_ARRAY( var, name, size ) { name, TYPE_S_ARRAY, size }, 72.19 -static struct { 72.20 - char *name; 72.21 - enum { TYPE_SINGLE, TYPE_CPU, TYPE_ARRAY, 72.22 - TYPE_S_SINGLE, TYPE_S_CPU, TYPE_S_ARRAY 72.23 +static const struct { 72.24 + const char *name; 72.25 + enum { TYPE_SINGLE, TYPE_ARRAY, 72.26 + TYPE_S_SINGLE, TYPE_S_ARRAY 72.27 } type; 72.28 - int nr_elements; 72.29 + unsigned int nr_elements; 72.30 } perfc_info[] = { 72.31 #include <xen/perfc_defn.h> 72.32 }; 72.33 72.34 #define NR_PERFCTRS (sizeof(perfc_info) / sizeof(perfc_info[0])) 72.35 72.36 -struct perfcounter perfcounters; 72.37 +DEFINE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters); 72.38 72.39 void perfc_printall(unsigned char key) 72.40 { 72.41 - unsigned int i, j, sum; 72.42 + unsigned int i, j; 72.43 s_time_t now = NOW(); 72.44 - atomic_t *counters = (atomic_t *)&perfcounters; 72.45 72.46 printk("Xen performance counters SHOW (now = 0x%08X:%08X)\n", 72.47 (u32)(now>>32), (u32)now); 72.48 72.49 - for ( i = 0; i < NR_PERFCTRS; i++ ) 72.50 + for ( i = j = 0; i < NR_PERFCTRS; i++ ) 72.51 { 72.52 + unsigned int k, cpu; 72.53 + unsigned long long sum = 0; 72.54 + 72.55 printk("%-32s ", perfc_info[i].name); 72.56 switch ( perfc_info[i].type ) 72.57 { 72.58 case TYPE_SINGLE: 72.59 case TYPE_S_SINGLE: 72.60 - printk("TOTAL[%10d]", atomic_read(&counters[0])); 72.61 - counters += 1; 72.62 - break; 72.63 - case TYPE_CPU: 72.64 - case TYPE_S_CPU: 72.65 - sum = 0; 72.66 - for_each_online_cpu ( j ) 72.67 - sum += atomic_read(&counters[j]); 72.68 - printk("TOTAL[%10u]", sum); 72.69 - if (sum) 72.70 + for_each_online_cpu ( cpu ) 72.71 + sum += per_cpu(perfcounters, cpu)[j]; 72.72 + printk("TOTAL[%12Lu]", sum); 72.73 + if ( sum ) 72.74 { 72.75 - for_each_online_cpu ( j ) 72.76 - printk(" CPU%02d[%10d]", j, atomic_read(&counters[j])); 72.77 + k = 0; 72.78 + for_each_online_cpu ( cpu ) 72.79 + { 72.80 + if ( k > 0 && (k % 4) == 0 ) 72.81 + printk("\n%46s", ""); 72.82 + printk(" CPU%02u[%10"PRIperfc"u]", cpu, per_cpu(perfcounters, cpu)[j]); 72.83 + ++k; 72.84 + } 72.85 } 72.86 - counters += NR_CPUS; 72.87 + ++j; 72.88 break; 72.89 case TYPE_ARRAY: 72.90 case TYPE_S_ARRAY: 72.91 - for ( j = sum = 0; j < perfc_info[i].nr_elements; j++ ) 72.92 - sum += atomic_read(&counters[j]); 72.93 - printk("TOTAL[%10u]", sum); 72.94 -#ifdef PERF_ARRAYS 72.95 + for_each_online_cpu ( cpu ) 72.96 + { 72.97 + perfc_t *counters = per_cpu(perfcounters, cpu) + j; 72.98 + 72.99 + for ( k = 0; k < perfc_info[i].nr_elements; k++ ) 72.100 + sum += counters[k]; 72.101 + } 72.102 + printk("TOTAL[%12Lu]", sum); 72.103 if (sum) 72.104 { 72.105 - for ( j = 0; j < perfc_info[i].nr_elements; j++ ) 72.106 +#ifdef PERF_ARRAYS 72.107 + for ( k = 0; k < perfc_info[i].nr_elements; k++ ) 72.108 + { 72.109 + sum = 0; 72.110 + for_each_online_cpu ( cpu ) 72.111 + sum += per_cpu(perfcounters, cpu)[j + k]; 72.112 + if ( (k % 4) == 0 ) 72.113 + printk("\n%16s", ""); 72.114 + printk(" ARR%02u[%10Lu]", k, sum); 72.115 + } 72.116 +#else 72.117 + k = 0; 72.118 + for_each_online_cpu ( cpu ) 72.119 { 72.120 - if ( (j % 4) == 0 ) 72.121 - printk("\n "); 72.122 - printk(" ARR%02d[%10d]", j, atomic_read(&counters[j])); 72.123 + perfc_t *counters = per_cpu(perfcounters, cpu) + j; 72.124 + unsigned int n; 72.125 + 72.126 + sum = 0; 72.127 + for ( n = 0; n < perfc_info[i].nr_elements; n++ ) 72.128 + sum += counters[n]; 72.129 + if ( k > 0 && (k % 4) == 0 ) 72.130 + printk("\n%46s", ""); 72.131 + printk(" CPU%02u[%10Lu]", cpu, sum); 72.132 + ++k; 72.133 } 72.134 +#endif 72.135 } 72.136 -#endif 72.137 - counters += j; 72.138 + j += perfc_info[i].nr_elements; 72.139 break; 72.140 } 72.141 printk("\n"); 72.142 @@ -97,7 +114,6 @@ void perfc_reset(unsigned char key) 72.143 { 72.144 unsigned int i, j; 72.145 s_time_t now = NOW(); 72.146 - atomic_t *counters = (atomic_t *)&perfcounters; 72.147 72.148 if ( key != '\0' ) 72.149 printk("Xen performance counters RESET (now = 0x%08X:%08X)\n", 72.150 @@ -105,43 +121,39 @@ void perfc_reset(unsigned char key) 72.151 72.152 /* leave STATUS counters alone -- don't reset */ 72.153 72.154 - for ( i = 0; i < NR_PERFCTRS; i++ ) 72.155 + for ( i = j = 0; i < NR_PERFCTRS; i++ ) 72.156 { 72.157 + unsigned int cpu; 72.158 + 72.159 switch ( perfc_info[i].type ) 72.160 { 72.161 case TYPE_SINGLE: 72.162 - atomic_set(&counters[0],0); 72.163 + for_each_cpu ( cpu ) 72.164 + per_cpu(perfcounters, cpu)[j] = 0; 72.165 case TYPE_S_SINGLE: 72.166 - counters += 1; 72.167 - break; 72.168 - case TYPE_CPU: 72.169 - for ( j = 0; j < NR_CPUS; j++ ) 72.170 - atomic_set(&counters[j],0); 72.171 - case TYPE_S_CPU: 72.172 - counters += NR_CPUS; 72.173 + ++j; 72.174 break; 72.175 case TYPE_ARRAY: 72.176 - for ( j = 0; j < perfc_info[i].nr_elements; j++ ) 72.177 - atomic_set(&counters[j],0); 72.178 + for_each_cpu ( cpu ) 72.179 + memset(per_cpu(perfcounters, cpu) + j, 0, 72.180 + perfc_info[i].nr_elements * sizeof(perfc_t)); 72.181 case TYPE_S_ARRAY: 72.182 - counters += perfc_info[i].nr_elements; 72.183 + j += perfc_info[i].nr_elements; 72.184 break; 72.185 } 72.186 } 72.187 72.188 - arch_perfc_reset (); 72.189 + arch_perfc_reset(); 72.190 } 72.191 72.192 static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS]; 72.193 static xen_sysctl_perfc_val_t *perfc_vals; 72.194 -static int perfc_nbr_vals; 72.195 +static unsigned int perfc_nbr_vals; 72.196 static int perfc_init = 0; 72.197 static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc, 72.198 XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val) 72.199 { 72.200 - unsigned int i, j; 72.201 - unsigned int v = 0; 72.202 - atomic_t *counters = (atomic_t *)&perfcounters; 72.203 + unsigned int i, j, v; 72.204 72.205 /* We only copy the name and array-size information once. */ 72.206 if ( !perfc_init ) 72.207 @@ -154,11 +166,7 @@ static int perfc_copy_info(XEN_GUEST_HAN 72.208 { 72.209 case TYPE_SINGLE: 72.210 case TYPE_S_SINGLE: 72.211 - perfc_d[i].nr_vals = 1; 72.212 - break; 72.213 - case TYPE_CPU: 72.214 - case TYPE_S_CPU: 72.215 - perfc_d[i].nr_vals = num_online_cpus(); 72.216 + perfc_d[i].nr_vals = num_possible_cpus(); 72.217 break; 72.218 case TYPE_ARRAY: 72.219 case TYPE_S_ARRAY: 72.220 @@ -181,26 +189,31 @@ static int perfc_copy_info(XEN_GUEST_HAN 72.221 arch_perfc_gather(); 72.222 72.223 /* We gather the counts together every time. */ 72.224 - for ( i = 0; i < NR_PERFCTRS; i++ ) 72.225 + for ( i = j = v = 0; i < NR_PERFCTRS; i++ ) 72.226 { 72.227 + unsigned int cpu; 72.228 + 72.229 switch ( perfc_info[i].type ) 72.230 { 72.231 case TYPE_SINGLE: 72.232 case TYPE_S_SINGLE: 72.233 - perfc_vals[v++] = atomic_read(&counters[0]); 72.234 - counters += 1; 72.235 - break; 72.236 - case TYPE_CPU: 72.237 - case TYPE_S_CPU: 72.238 - for ( j = 0; j < perfc_d[i].nr_vals; j++ ) 72.239 - perfc_vals[v++] = atomic_read(&counters[j]); 72.240 - counters += NR_CPUS; 72.241 + for_each_cpu ( cpu ) 72.242 + perfc_vals[v++] = per_cpu(perfcounters, cpu)[j]; 72.243 + ++j; 72.244 break; 72.245 case TYPE_ARRAY: 72.246 case TYPE_S_ARRAY: 72.247 - for ( j = 0; j < perfc_d[i].nr_vals; j++ ) 72.248 - perfc_vals[v++] = atomic_read(&counters[j]); 72.249 - counters += perfc_info[i].nr_elements; 72.250 + memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals)); 72.251 + for_each_cpu ( cpu ) 72.252 + { 72.253 + perfc_t *counters = per_cpu(perfcounters, cpu) + j; 72.254 + unsigned int k; 72.255 + 72.256 + for ( k = 0; k < perfc_d[i].nr_vals; k++ ) 72.257 + perfc_vals[v + k] += counters[k]; 72.258 + } 72.259 + v += perfc_d[i].nr_vals; 72.260 + j += perfc_info[i].nr_elements; 72.261 break; 72.262 } 72.263 } 72.264 @@ -224,14 +237,12 @@ int perfc_control(xen_sysctl_perfc_op_t 72.265 switch ( pc->cmd ) 72.266 { 72.267 case XEN_SYSCTL_PERFCOP_reset: 72.268 - perfc_copy_info(pc->desc, pc->val); 72.269 + rc = perfc_copy_info(pc->desc, pc->val); 72.270 perfc_reset(0); 72.271 - rc = 0; 72.272 break; 72.273 72.274 case XEN_SYSCTL_PERFCOP_query: 72.275 - perfc_copy_info(pc->desc, pc->val); 72.276 - rc = 0; 72.277 + rc = perfc_copy_info(pc->desc, pc->val); 72.278 break; 72.279 72.280 default:
73.1 --- a/xen/common/schedule.c Wed Mar 28 08:40:42 2007 +0000 73.2 +++ b/xen/common/schedule.c Wed Mar 28 08:44:08 2007 +0000 73.3 @@ -606,7 +606,7 @@ static void schedule(void) 73.4 ASSERT(!in_irq()); 73.5 ASSERT(this_cpu(mc_state).flags == 0); 73.6 73.7 - perfc_incrc(sched_run); 73.8 + perfc_incr(sched_run); 73.9 73.10 sd = &this_cpu(schedule_data); 73.11 73.12 @@ -654,16 +654,13 @@ static void schedule(void) 73.13 73.14 spin_unlock_irq(&sd->schedule_lock); 73.15 73.16 - perfc_incrc(sched_ctx); 73.17 + perfc_incr(sched_ctx); 73.18 73.19 stop_timer(&prev->periodic_timer); 73.20 73.21 /* Ensure that the domain has an up-to-date time base. */ 73.22 - if ( !is_idle_vcpu(next) ) 73.23 - { 73.24 - update_vcpu_system_time(next); 73.25 - vcpu_periodic_timer_work(next); 73.26 - } 73.27 + update_vcpu_system_time(next); 73.28 + vcpu_periodic_timer_work(next); 73.29 73.30 TRACE_4D(TRC_SCHED_SWITCH, 73.31 prev->domain->domain_id, prev->vcpu_id, 73.32 @@ -684,7 +681,7 @@ void context_saved(struct vcpu *prev) 73.33 static void s_timer_fn(void *unused) 73.34 { 73.35 raise_softirq(SCHEDULE_SOFTIRQ); 73.36 - perfc_incrc(sched_irq); 73.37 + perfc_incr(sched_irq); 73.38 } 73.39 73.40 /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
74.1 --- a/xen/drivers/char/console.c Wed Mar 28 08:40:42 2007 +0000 74.2 +++ b/xen/drivers/char/console.c Wed Mar 28 08:44:08 2007 +0000 74.3 @@ -900,12 +900,18 @@ void panic(const char *fmt, ...) 74.4 void __bug(char *file, int line) 74.5 { 74.6 console_start_sync(); 74.7 - printk("BUG at %s:%d\n", file, line); 74.8 + printk("Xen BUG at %s:%d\n", file, line); 74.9 dump_execution_state(); 74.10 - panic("BUG at %s:%d\n", file, line); 74.11 + panic("Xen BUG at %s:%d\n", file, line); 74.12 for ( ; ; ) ; 74.13 } 74.14 74.15 +void __warn(char *file, int line) 74.16 +{ 74.17 + printk("Xen WARN at %s:%d\n", file, line); 74.18 + dump_execution_state(); 74.19 +} 74.20 + 74.21 /* 74.22 * Local variables: 74.23 * mode: C
75.1 --- a/xen/include/asm-ia64/bug.h Wed Mar 28 08:40:42 2007 +0000 75.2 +++ b/xen/include/asm-ia64/bug.h Wed Mar 28 08:44:08 2007 +0000 75.3 @@ -2,5 +2,6 @@ 75.4 #define __IA64_BUG_H__ 75.5 75.6 #define BUG() __bug(__FILE__, __LINE__) 75.7 +#define WARN() __warn(__FILE__, __LINE__) 75.8 75.9 #endif /* __IA64_BUG_H__ */
76.1 --- a/xen/include/asm-ia64/linux-xen/asm/asmmacro.h Wed Mar 28 08:40:42 2007 +0000 76.2 +++ b/xen/include/asm-ia64/linux-xen/asm/asmmacro.h Wed Mar 28 08:44:08 2007 +0000 76.3 @@ -116,4 +116,8 @@ 2:{ .mib; \ 76.4 # define dv_serialize_instruction 76.5 #endif 76.6 76.7 +#ifdef PERF_COUNTERS 76.8 +#define PERFC(n) (THIS_CPU(perfcounters) + (IA64_PERFC_ ## n) * 4) 76.9 +#endif 76.10 + 76.11 #endif /* _ASM_IA64_ASMMACRO_H */
77.1 --- a/xen/include/asm-ia64/linux-xen/asm/iosapic.h Wed Mar 28 08:40:42 2007 +0000 77.2 +++ b/xen/include/asm-ia64/linux-xen/asm/iosapic.h Wed Mar 28 08:44:08 2007 +0000 77.3 @@ -123,13 +123,6 @@ static inline void list_move(struct list 77.4 77.5 #define move_irq(x) 77.6 77.7 -#define WARN_ON(condition) do { \ 77.8 - if (unlikely((condition)!=0)) { \ 77.9 - printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ 77.10 - dump_stack(); \ 77.11 - } \ 77.12 -} while (0) 77.13 - 77.14 #ifdef nop 77.15 #undef nop 77.16 #endif
78.1 --- a/xen/include/asm-ia64/perfc_defn.h Wed Mar 28 08:40:42 2007 +0000 78.2 +++ b/xen/include/asm-ia64/perfc_defn.h Wed Mar 28 08:44:08 2007 +0000 78.3 @@ -1,34 +1,34 @@ 78.4 /* This file is legitimately included multiple times. */ 78.5 78.6 -PERFCOUNTER_CPU(dtlb_translate, "dtlb hit") 78.7 +PERFCOUNTER(dtlb_translate, "dtlb hit") 78.8 78.9 -PERFCOUNTER_CPU(tr_translate, "TR hit") 78.10 +PERFCOUNTER(tr_translate, "TR hit") 78.11 78.12 -PERFCOUNTER_CPU(vhpt_translate, "virtual vhpt translation") 78.13 -PERFCOUNTER_CPU(fast_vhpt_translate, "virtual vhpt fast translation") 78.14 +PERFCOUNTER(vhpt_translate, "virtual vhpt translation") 78.15 +PERFCOUNTER(fast_vhpt_translate, "virtual vhpt fast translation") 78.16 78.17 PERFCOUNTER(recover_to_page_fault, "recoveries to page fault") 78.18 PERFCOUNTER(recover_to_break_fault, "recoveries to break fault") 78.19 78.20 -PERFCOUNTER_CPU(phys_translate, "metaphysical translation") 78.21 +PERFCOUNTER(phys_translate, "metaphysical translation") 78.22 78.23 -PERFCOUNTER_CPU(idle_when_pending, "vcpu idle at event") 78.24 +PERFCOUNTER(idle_when_pending, "vcpu idle at event") 78.25 78.26 -PERFCOUNTER_CPU(pal_halt_light, "calls to pal_halt_light") 78.27 +PERFCOUNTER(pal_halt_light, "calls to pal_halt_light") 78.28 78.29 -PERFCOUNTER_CPU(lazy_cover, "lazy cover") 78.30 +PERFCOUNTER(lazy_cover, "lazy cover") 78.31 78.32 -PERFCOUNTER_CPU(mov_to_ar_imm, "privop mov_to_ar_imm") 78.33 -PERFCOUNTER_CPU(mov_to_ar_reg, "privop mov_to_ar_reg") 78.34 -PERFCOUNTER_CPU(mov_from_ar, "privop privified-mov_from_ar") 78.35 -PERFCOUNTER_CPU(ssm, "privop ssm") 78.36 -PERFCOUNTER_CPU(rsm, "privop rsm") 78.37 -PERFCOUNTER_CPU(rfi, "privop rfi") 78.38 -PERFCOUNTER_CPU(bsw0, "privop bsw0") 78.39 -PERFCOUNTER_CPU(bsw1, "privop bsw1") 78.40 -PERFCOUNTER_CPU(cover, "privop cover") 78.41 -PERFCOUNTER_CPU(fc, "privop privified-fc") 78.42 -PERFCOUNTER_CPU(cpuid, "privop privified-cpuid") 78.43 +PERFCOUNTER(mov_to_ar_imm, "privop mov_to_ar_imm") 78.44 +PERFCOUNTER(mov_to_ar_reg, "privop mov_to_ar_reg") 78.45 +PERFCOUNTER(mov_from_ar, "privop privified-mov_from_ar") 78.46 +PERFCOUNTER(ssm, "privop ssm") 78.47 +PERFCOUNTER(rsm, "privop rsm") 78.48 +PERFCOUNTER(rfi, "privop rfi") 78.49 +PERFCOUNTER(bsw0, "privop bsw0") 78.50 +PERFCOUNTER(bsw1, "privop bsw1") 78.51 +PERFCOUNTER(cover, "privop cover") 78.52 +PERFCOUNTER(fc, "privop privified-fc") 78.53 +PERFCOUNTER(cpuid, "privop privified-cpuid") 78.54 78.55 PERFCOUNTER_ARRAY(mov_to_cr, "privop mov to cr", 128) 78.56 PERFCOUNTER_ARRAY(mov_from_cr, "privop mov from cr", 128) 78.57 @@ -36,45 +36,45 @@ PERFCOUNTER_ARRAY(mov_from_cr, "p 78.58 PERFCOUNTER_ARRAY(misc_privop, "privop misc", 64) 78.59 78.60 // privileged instructions to fall into vmx_entry 78.61 -PERFCOUNTER_CPU(vmx_rsm, "vmx privop rsm") 78.62 -PERFCOUNTER_CPU(vmx_ssm, "vmx privop ssm") 78.63 -PERFCOUNTER_CPU(vmx_mov_to_psr, "vmx privop mov_to_psr") 78.64 -PERFCOUNTER_CPU(vmx_mov_from_psr, "vmx privop mov_from_psr") 78.65 -PERFCOUNTER_CPU(vmx_mov_from_cr, "vmx privop mov_from_cr") 78.66 -PERFCOUNTER_CPU(vmx_mov_to_cr, "vmx privop mov_to_cr") 78.67 -PERFCOUNTER_CPU(vmx_bsw0, "vmx privop bsw0") 78.68 -PERFCOUNTER_CPU(vmx_bsw1, "vmx privop bsw1") 78.69 -PERFCOUNTER_CPU(vmx_cover, "vmx privop cover") 78.70 -PERFCOUNTER_CPU(vmx_rfi, "vmx privop rfi") 78.71 -PERFCOUNTER_CPU(vmx_itr_d, "vmx privop itr_d") 78.72 -PERFCOUNTER_CPU(vmx_itr_i, "vmx privop itr_i") 78.73 -PERFCOUNTER_CPU(vmx_ptr_d, "vmx privop ptr_d") 78.74 -PERFCOUNTER_CPU(vmx_ptr_i, "vmx privop ptr_i") 78.75 -PERFCOUNTER_CPU(vmx_itc_d, "vmx privop itc_d") 78.76 -PERFCOUNTER_CPU(vmx_itc_i, "vmx privop itc_i") 78.77 -PERFCOUNTER_CPU(vmx_ptc_l, "vmx privop ptc_l") 78.78 -PERFCOUNTER_CPU(vmx_ptc_g, "vmx privop ptc_g") 78.79 -PERFCOUNTER_CPU(vmx_ptc_ga, "vmx privop ptc_ga") 78.80 -PERFCOUNTER_CPU(vmx_ptc_e, "vmx privop ptc_e") 78.81 -PERFCOUNTER_CPU(vmx_mov_to_rr, "vmx privop mov_to_rr") 78.82 -PERFCOUNTER_CPU(vmx_mov_from_rr, "vmx privop mov_from_rr") 78.83 -PERFCOUNTER_CPU(vmx_thash, "vmx privop thash") 78.84 -PERFCOUNTER_CPU(vmx_ttag, "vmx privop ttag") 78.85 -PERFCOUNTER_CPU(vmx_tpa, "vmx privop tpa") 78.86 -PERFCOUNTER_CPU(vmx_tak, "vmx privop tak") 78.87 -PERFCOUNTER_CPU(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm") 78.88 -PERFCOUNTER_CPU(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg") 78.89 -PERFCOUNTER_CPU(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg") 78.90 -PERFCOUNTER_CPU(vmx_mov_to_dbr, "vmx privop mov_to_dbr") 78.91 -PERFCOUNTER_CPU(vmx_mov_to_ibr, "vmx privop mov_to_ibr") 78.92 -PERFCOUNTER_CPU(vmx_mov_to_pmc, "vmx privop mov_to_pmc") 78.93 -PERFCOUNTER_CPU(vmx_mov_to_pmd, "vmx privop mov_to_pmd") 78.94 -PERFCOUNTER_CPU(vmx_mov_to_pkr, "vmx privop mov_to_pkr") 78.95 -PERFCOUNTER_CPU(vmx_mov_from_dbr, "vmx privop mov_from_dbr") 78.96 -PERFCOUNTER_CPU(vmx_mov_from_ibr, "vmx privop mov_from_ibr") 78.97 -PERFCOUNTER_CPU(vmx_mov_from_pmc, "vmx privop mov_from_pmc") 78.98 -PERFCOUNTER_CPU(vmx_mov_from_pkr, "vmx privop mov_from_pkr") 78.99 -PERFCOUNTER_CPU(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid") 78.100 +PERFCOUNTER(vmx_rsm, "vmx privop rsm") 78.101 +PERFCOUNTER(vmx_ssm, "vmx privop ssm") 78.102 +PERFCOUNTER(vmx_mov_to_psr, "vmx privop mov_to_psr") 78.103 +PERFCOUNTER(vmx_mov_from_psr, "vmx privop mov_from_psr") 78.104 +PERFCOUNTER(vmx_mov_from_cr, "vmx privop mov_from_cr") 78.105 +PERFCOUNTER(vmx_mov_to_cr, "vmx privop mov_to_cr") 78.106 +PERFCOUNTER(vmx_bsw0, "vmx privop bsw0") 78.107 +PERFCOUNTER(vmx_bsw1, "vmx privop bsw1") 78.108 +PERFCOUNTER(vmx_cover, "vmx privop cover") 78.109 +PERFCOUNTER(vmx_rfi, "vmx privop rfi") 78.110 +PERFCOUNTER(vmx_itr_d, "vmx privop itr_d") 78.111 +PERFCOUNTER(vmx_itr_i, "vmx privop itr_i") 78.112 +PERFCOUNTER(vmx_ptr_d, "vmx privop ptr_d") 78.113 +PERFCOUNTER(vmx_ptr_i, "vmx privop ptr_i") 78.114 +PERFCOUNTER(vmx_itc_d, "vmx privop itc_d") 78.115 +PERFCOUNTER(vmx_itc_i, "vmx privop itc_i") 78.116 +PERFCOUNTER(vmx_ptc_l, "vmx privop ptc_l") 78.117 +PERFCOUNTER(vmx_ptc_g, "vmx privop ptc_g") 78.118 +PERFCOUNTER(vmx_ptc_ga, "vmx privop ptc_ga") 78.119 +PERFCOUNTER(vmx_ptc_e, "vmx privop ptc_e") 78.120 +PERFCOUNTER(vmx_mov_to_rr, "vmx privop mov_to_rr") 78.121 +PERFCOUNTER(vmx_mov_from_rr, "vmx privop mov_from_rr") 78.122 +PERFCOUNTER(vmx_thash, "vmx privop thash") 78.123 +PERFCOUNTER(vmx_ttag, "vmx privop ttag") 78.124 +PERFCOUNTER(vmx_tpa, "vmx privop tpa") 78.125 +PERFCOUNTER(vmx_tak, "vmx privop tak") 78.126 +PERFCOUNTER(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm") 78.127 +PERFCOUNTER(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg") 78.128 +PERFCOUNTER(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg") 78.129 +PERFCOUNTER(vmx_mov_to_dbr, "vmx privop mov_to_dbr") 78.130 +PERFCOUNTER(vmx_mov_to_ibr, "vmx privop mov_to_ibr") 78.131 +PERFCOUNTER(vmx_mov_to_pmc, "vmx privop mov_to_pmc") 78.132 +PERFCOUNTER(vmx_mov_to_pmd, "vmx privop mov_to_pmd") 78.133 +PERFCOUNTER(vmx_mov_to_pkr, "vmx privop mov_to_pkr") 78.134 +PERFCOUNTER(vmx_mov_from_dbr, "vmx privop mov_from_dbr") 78.135 +PERFCOUNTER(vmx_mov_from_ibr, "vmx privop mov_from_ibr") 78.136 +PERFCOUNTER(vmx_mov_from_pmc, "vmx privop mov_from_pmc") 78.137 +PERFCOUNTER(vmx_mov_from_pkr, "vmx privop mov_from_pkr") 78.138 +PERFCOUNTER(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid") 78.139 78.140 78.141 PERFCOUNTER_ARRAY(slow_hyperprivop, "slow hyperprivops", HYPERPRIVOP_MAX + 1) 78.142 @@ -84,12 +84,12 @@ PERFCOUNTER_ARRAY(slow_reflect, "s 78.143 PERFCOUNTER_ARRAY(fast_reflect, "fast reflection", 0x80) 78.144 78.145 PERFSTATUS(vhpt_nbr_entries, "nbr of entries per VHPT") 78.146 -PERFSTATUS_CPU(vhpt_valid_entries, "nbr of valid entries in VHPT") 78.147 +PERFSTATUS(vhpt_valid_entries, "nbr of valid entries in VHPT") 78.148 78.149 PERFCOUNTER_ARRAY(vmx_mmio_access, "vmx_mmio_access", 8) 78.150 -PERFCOUNTER_CPU(vmx_pal_emul, "vmx_pal_emul") 78.151 +PERFCOUNTER(vmx_pal_emul, "vmx_pal_emul") 78.152 PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8) 78.153 -PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break") 78.154 +PERFCOUNTER(vmx_ia64_handle_break,"vmx_ia64_handle_break") 78.155 PERFCOUNTER_ARRAY(vmx_inject_guest_interruption, 78.156 "vmx_inject_guest_interruption", 0x80) 78.157 PERFCOUNTER_ARRAY(fw_hypercall, "fw_hypercall", 0x20) 78.158 @@ -106,69 +106,71 @@ PERFSTATUS(privop_addr_##name##_overflow 78.159 78.160 PERFPRIVOPADDR(get_ifa) 78.161 PERFPRIVOPADDR(thash) 78.162 + 78.163 +#undef PERFPRIVOPADDR 78.164 #endif 78.165 78.166 // vhpt.c 78.167 -PERFCOUNTER_CPU(local_vhpt_flush, "local_vhpt_flush") 78.168 -PERFCOUNTER_CPU(vcpu_vhpt_flush, "vcpu_vhpt_flush") 78.169 -PERFCOUNTER_CPU(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all") 78.170 -PERFCOUNTER_CPU(domain_flush_vtlb_all, "domain_flush_vtlb_all") 78.171 -PERFCOUNTER_CPU(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range") 78.172 -PERFCOUNTER_CPU(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry") 78.173 -PERFCOUNTER_CPU(domain_flush_vtlb_local, "domain_flush_vtlb_local") 78.174 -PERFCOUNTER_CPU(domain_flush_vtlb_global, "domain_flush_vtlb_global") 78.175 -PERFCOUNTER_CPU(domain_flush_vtlb_range, "domain_flush_vtlb_range") 78.176 +PERFCOUNTER(local_vhpt_flush, "local_vhpt_flush") 78.177 +PERFCOUNTER(vcpu_vhpt_flush, "vcpu_vhpt_flush") 78.178 +PERFCOUNTER(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all") 78.179 +PERFCOUNTER(domain_flush_vtlb_all, "domain_flush_vtlb_all") 78.180 +PERFCOUNTER(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range") 78.181 +PERFCOUNTER(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry") 78.182 +PERFCOUNTER(domain_flush_vtlb_local, "domain_flush_vtlb_local") 78.183 +PERFCOUNTER(domain_flush_vtlb_global, "domain_flush_vtlb_global") 78.184 +PERFCOUNTER(domain_flush_vtlb_range, "domain_flush_vtlb_range") 78.185 78.186 // domain.c 78.187 -PERFCOUNTER_CPU(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch") 78.188 +PERFCOUNTER(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch") 78.189 78.190 // mm.c 78.191 -PERFCOUNTER_CPU(assign_domain_page_replace, "assign_domain_page_replace") 78.192 -PERFCOUNTER_CPU(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel") 78.193 -PERFCOUNTER_CPU(zap_dcomain_page_one, "zap_dcomain_page_one") 78.194 -PERFCOUNTER_CPU(dom0vp_zap_physmap, "dom0vp_zap_physmap") 78.195 -PERFCOUNTER_CPU(dom0vp_add_physmap, "dom0vp_add_physmap") 78.196 -PERFCOUNTER_CPU(create_grant_host_mapping, "create_grant_host_mapping") 78.197 -PERFCOUNTER_CPU(destroy_grant_host_mapping, "destroy_grant_host_mapping") 78.198 -PERFCOUNTER_CPU(steal_page_refcount, "steal_page_refcount") 78.199 -PERFCOUNTER_CPU(steal_page, "steal_page") 78.200 -PERFCOUNTER_CPU(guest_physmap_add_page, "guest_physmap_add_page") 78.201 -PERFCOUNTER_CPU(guest_physmap_remove_page, "guest_physmap_remove_page") 78.202 -PERFCOUNTER_CPU(domain_page_flush_and_put, "domain_page_flush_and_put") 78.203 +PERFCOUNTER(assign_domain_page_replace, "assign_domain_page_replace") 78.204 +PERFCOUNTER(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel") 78.205 +PERFCOUNTER(zap_dcomain_page_one, "zap_dcomain_page_one") 78.206 +PERFCOUNTER(dom0vp_zap_physmap, "dom0vp_zap_physmap") 78.207 +PERFCOUNTER(dom0vp_add_physmap, "dom0vp_add_physmap") 78.208 +PERFCOUNTER(create_grant_host_mapping, "create_grant_host_mapping") 78.209 +PERFCOUNTER(destroy_grant_host_mapping, "destroy_grant_host_mapping") 78.210 +PERFCOUNTER(steal_page_refcount, "steal_page_refcount") 78.211 +PERFCOUNTER(steal_page, "steal_page") 78.212 +PERFCOUNTER(guest_physmap_add_page, "guest_physmap_add_page") 78.213 +PERFCOUNTER(guest_physmap_remove_page, "guest_physmap_remove_page") 78.214 +PERFCOUNTER(domain_page_flush_and_put, "domain_page_flush_and_put") 78.215 78.216 // dom0vp 78.217 -PERFCOUNTER_CPU(dom0vp_phystomach, "dom0vp_phystomach") 78.218 -PERFCOUNTER_CPU(dom0vp_machtophys, "dom0vp_machtophys") 78.219 +PERFCOUNTER(dom0vp_phystomach, "dom0vp_phystomach") 78.220 +PERFCOUNTER(dom0vp_machtophys, "dom0vp_machtophys") 78.221 78.222 #ifdef CONFIG_XEN_IA64_TLB_TRACK 78.223 // insert or dirty 78.224 -PERFCOUNTER_CPU(tlb_track_iod, "tlb_track_iod") 78.225 -PERFCOUNTER_CPU(tlb_track_iod_again, "tlb_track_iod_again") 78.226 -PERFCOUNTER_CPU(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked") 78.227 -PERFCOUNTER_CPU(tlb_track_iod_force_many, "tlb_track_iod_force_many") 78.228 -PERFCOUNTER_CPU(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many") 78.229 -PERFCOUNTER_CPU(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del") 78.230 -PERFCOUNTER_CPU(tlb_track_iod_found, "tlb_track_iod_found") 78.231 -PERFCOUNTER_CPU(tlb_track_iod_new_entry, "tlb_track_iod_new_entry") 78.232 -PERFCOUNTER_CPU(tlb_track_iod_new_failed, "tlb_track_iod_new_failed") 78.233 -PERFCOUNTER_CPU(tlb_track_iod_new_many, "tlb_track_iod_new_many") 78.234 -PERFCOUNTER_CPU(tlb_track_iod_insert, "tlb_track_iod_insert") 78.235 -PERFCOUNTER_CPU(tlb_track_iod_dirtied, "tlb_track_iod_dirtied") 78.236 +PERFCOUNTER(tlb_track_iod, "tlb_track_iod") 78.237 +PERFCOUNTER(tlb_track_iod_again, "tlb_track_iod_again") 78.238 +PERFCOUNTER(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked") 78.239 +PERFCOUNTER(tlb_track_iod_force_many, "tlb_track_iod_force_many") 78.240 +PERFCOUNTER(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many") 78.241 +PERFCOUNTER(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del") 78.242 +PERFCOUNTER(tlb_track_iod_found, "tlb_track_iod_found") 78.243 +PERFCOUNTER(tlb_track_iod_new_entry, "tlb_track_iod_new_entry") 78.244 +PERFCOUNTER(tlb_track_iod_new_failed, "tlb_track_iod_new_failed") 78.245 +PERFCOUNTER(tlb_track_iod_new_many, "tlb_track_iod_new_many") 78.246 +PERFCOUNTER(tlb_track_iod_insert, "tlb_track_iod_insert") 78.247 +PERFCOUNTER(tlb_track_iod_dirtied, "tlb_track_iod_dirtied") 78.248 78.249 // search and remove 78.250 -PERFCOUNTER_CPU(tlb_track_sar, "tlb_track_sar") 78.251 -PERFCOUNTER_CPU(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked") 78.252 -PERFCOUNTER_CPU(tlb_track_sar_not_found, "tlb_track_sar_not_found") 78.253 -PERFCOUNTER_CPU(tlb_track_sar_found, "tlb_track_sar_found") 78.254 -PERFCOUNTER_CPU(tlb_track_sar_many, "tlb_track_sar_many") 78.255 +PERFCOUNTER(tlb_track_sar, "tlb_track_sar") 78.256 +PERFCOUNTER(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked") 78.257 +PERFCOUNTER(tlb_track_sar_not_found, "tlb_track_sar_not_found") 78.258 +PERFCOUNTER(tlb_track_sar_found, "tlb_track_sar_found") 78.259 +PERFCOUNTER(tlb_track_sar_many, "tlb_track_sar_many") 78.260 78.261 // flush 78.262 -PERFCOUNTER_CPU(tlb_track_use_rr7, "tlb_track_use_rr7") 78.263 -PERFCOUNTER_CPU(tlb_track_swap_rr0, "tlb_track_swap_rr0") 78.264 +PERFCOUNTER(tlb_track_use_rr7, "tlb_track_use_rr7") 78.265 +PERFCOUNTER(tlb_track_swap_rr0, "tlb_track_swap_rr0") 78.266 #endif 78.267 78.268 // tlb flush clock 78.269 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK 78.270 -PERFCOUNTER_CPU(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge") 78.271 -PERFCOUNTER_CPU(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip") 78.272 +PERFCOUNTER(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge") 78.273 +PERFCOUNTER(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip") 78.274 #endif
79.1 --- a/xen/include/asm-ia64/privop_stat.h Wed Mar 28 08:40:42 2007 +0000 79.2 +++ b/xen/include/asm-ia64/privop_stat.h Wed Mar 28 08:44:08 2007 +0000 79.3 @@ -1,5 +1,5 @@ 79.4 -#ifndef _XEN_UA64_PRIVOP_STAT_H 79.5 -#define _XEN_UA64_PRIVOP_STAT_H 79.6 +#ifndef _XEN_IA64_PRIVOP_STAT_H 79.7 +#define _XEN_IA64_PRIVOP_STAT_H 79.8 #include <asm/config.h> 79.9 #include <xen/types.h> 79.10 #include <public/xen.h> 79.11 @@ -9,31 +9,24 @@ 79.12 extern void gather_privop_addrs(void); 79.13 extern void reset_privop_addrs(void); 79.14 79.15 -#undef PERFCOUNTER 79.16 #define PERFCOUNTER(var, name) 79.17 - 79.18 -#undef PERFCOUNTER_CPU 79.19 -#define PERFCOUNTER_CPU(var, name) 79.20 - 79.21 -#undef PERFCOUNTER_ARRAY 79.22 #define PERFCOUNTER_ARRAY(var, name, size) 79.23 79.24 -#undef PERFSTATUS 79.25 #define PERFSTATUS(var, name) 79.26 - 79.27 -#undef PERFSTATUS_CPU 79.28 -#define PERFSTATUS_CPU(var, name) 79.29 - 79.30 -#undef PERFSTATUS_ARRAY 79.31 #define PERFSTATUS_ARRAY(var, name, size) 79.32 79.33 -#undef PERFPRIVOPADDR 79.34 #define PERFPRIVOPADDR(name) privop_inst_##name, 79.35 79.36 enum privop_inst { 79.37 #include <asm/perfc_defn.h> 79.38 }; 79.39 79.40 +#undef PERFCOUNTER 79.41 +#undef PERFCOUNTER_ARRAY 79.42 + 79.43 +#undef PERFSTATUS 79.44 +#undef PERFSTATUS_ARRAY 79.45 + 79.46 #undef PERFPRIVOPADDR 79.47 79.48 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst) 79.49 @@ -45,4 +38,4 @@ extern void privop_count_addr(unsigned l 79.50 #define reset_privop_addrs() do {} while (0) 79.51 #endif 79.52 79.53 -#endif /* _XEN_UA64_PRIVOP_STAT_H */ 79.54 +#endif /* _XEN_IA64_PRIVOP_STAT_H */
80.1 --- a/xen/include/asm-ia64/tlb_track.h Wed Mar 28 08:40:42 2007 +0000 80.2 +++ b/xen/include/asm-ia64/tlb_track.h Wed Mar 28 08:44:08 2007 +0000 80.3 @@ -97,9 +97,9 @@ vcpu_tlb_track_insert_or_dirty(struct vc 80.4 { 80.5 /* optimization. 80.6 non-tracking pte is most common. */ 80.7 - perfc_incrc(tlb_track_iod); 80.8 + perfc_incr(tlb_track_iod); 80.9 if (!pte_tlb_tracking(entry->used)) { 80.10 - perfc_incrc(tlb_track_iod_not_tracked); 80.11 + perfc_incr(tlb_track_iod_not_tracked); 80.12 return; 80.13 } 80.14
81.1 --- a/xen/include/asm-powerpc/bug.h Wed Mar 28 08:40:42 2007 +0000 81.2 +++ b/xen/include/asm-powerpc/bug.h Wed Mar 28 08:44:08 2007 +0000 81.3 @@ -2,5 +2,6 @@ 81.4 #define __POWERPC_BUG_H__ 81.5 81.6 #define BUG() __bug(__FILE__, __LINE__) 81.7 +#define WARN() __warn(__FILE__, __LINE__) 81.8 81.9 #endif /* __POWERPC_BUG_H__ */
82.1 --- a/xen/include/asm-powerpc/debugger.h Wed Mar 28 08:40:42 2007 +0000 82.2 +++ b/xen/include/asm-powerpc/debugger.h Wed Mar 28 08:44:08 2007 +0000 82.3 @@ -67,10 +67,6 @@ static inline void unimplemented(void) 82.4 #endif 82.5 } 82.6 82.7 -extern void __warn(char *file, int line); 82.8 -#define WARN() __warn(__FILE__, __LINE__) 82.9 -#define WARN_ON(_p) do { if (_p) WARN(); } while ( 0 ) 82.10 - 82.11 extern void __attn(void); 82.12 #define ATTN() __attn(); 82.13
83.1 --- a/xen/include/asm-x86/bug.h Wed Mar 28 08:40:42 2007 +0000 83.2 +++ b/xen/include/asm-x86/bug.h Wed Mar 28 08:44:08 2007 +0000 83.3 @@ -14,8 +14,8 @@ struct bug_frame { 83.4 } __attribute__((packed)); 83.5 83.6 #define BUGFRAME_dump 0 83.7 -#define BUGFRAME_bug 1 83.8 -#define BUGFRAME_assert 2 83.9 -#define BUGFRAME_rsvd 3 83.10 +#define BUGFRAME_warn 1 83.11 +#define BUGFRAME_bug 2 83.12 +#define BUGFRAME_assert 3 83.13 83.14 #endif /* __X86_BUG_H__ */
84.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h Wed Mar 28 08:40:42 2007 +0000 84.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Wed Mar 28 08:44:08 2007 +0000 84.3 @@ -446,7 +446,6 @@ struct arch_svm_struct { 84.4 u64 vmcb_pa; 84.5 u32 *msrpm; 84.6 u64 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */ 84.7 - int saved_irq_vector; 84.8 int launch_core; 84.9 84.10 unsigned long flags; /* VMCB flags */
85.1 --- a/xen/include/asm-x86/multicall.h Wed Mar 28 08:40:42 2007 +0000 85.2 +++ b/xen/include/asm-x86/multicall.h Wed Mar 28 08:44:08 2007 +0000 85.3 @@ -6,84 +6,94 @@ 85.4 #define __ASM_X86_MULTICALL_H__ 85.5 85.6 #include <xen/errno.h> 85.7 -#include <asm/asm_defns.h> 85.8 85.9 #ifdef __x86_64__ 85.10 85.11 #define do_multicall_call(_call) \ 85.12 do { \ 85.13 __asm__ __volatile__ ( \ 85.14 - " movq "STR(MULTICALL_op)"(%0),%%rax; " \ 85.15 + " movq %c1(%0),%%rax; " \ 85.16 + " leaq hypercall_table(%%rip),%%rdi; " \ 85.17 " cmpq $("STR(NR_hypercalls)"),%%rax; " \ 85.18 " jae 2f; " \ 85.19 - " leaq hypercall_table(%%rip),%%rdi; " \ 85.20 - " leaq (%%rdi,%%rax,8),%%rax; " \ 85.21 - " movq "STR(MULTICALL_arg0)"(%0),%%rdi; " \ 85.22 - " movq "STR(MULTICALL_arg1)"(%0),%%rsi; " \ 85.23 - " movq "STR(MULTICALL_arg2)"(%0),%%rdx; " \ 85.24 - " movq "STR(MULTICALL_arg3)"(%0),%%rcx; " \ 85.25 - " movq "STR(MULTICALL_arg4)"(%0),%%r8; " \ 85.26 - " callq *(%%rax); " \ 85.27 - "1: movq %%rax,"STR(MULTICALL_result)"(%0)\n" \ 85.28 + " movq (%%rdi,%%rax,8),%%rax; " \ 85.29 + " movq %c2+0*%c3(%0),%%rdi; " \ 85.30 + " movq %c2+1*%c3(%0),%%rsi; " \ 85.31 + " movq %c2+2*%c3(%0),%%rdx; " \ 85.32 + " movq %c2+3*%c3(%0),%%rcx; " \ 85.33 + " movq %c2+4*%c3(%0),%%r8; " \ 85.34 + " callq *%%rax; " \ 85.35 + "1: movq %%rax,%c4(%0)\n" \ 85.36 ".section .fixup,\"ax\"\n" \ 85.37 "2: movq $-"STR(ENOSYS)",%%rax\n" \ 85.38 " jmp 1b\n" \ 85.39 ".previous\n" \ 85.40 - : : "b" (_call) \ 85.41 + : \ 85.42 + : "b" (_call), \ 85.43 + "i" (offsetof(__typeof__(*_call), op)), \ 85.44 + "i" (offsetof(__typeof__(*_call), args)), \ 85.45 + "i" (sizeof(*(_call)->args)), \ 85.46 + "i" (offsetof(__typeof__(*_call), result)) \ 85.47 /* all the caller-saves registers */ \ 85.48 : "rax", "rcx", "rdx", "rsi", "rdi", \ 85.49 "r8", "r9", "r10", "r11" ); \ 85.50 } while ( 0 ) 85.51 85.52 -#define compat_multicall_call(_call) \ 85.53 - do { \ 85.54 - __asm__ __volatile__ ( \ 85.55 - " movl "STR(COMPAT_MULTICALL_op)"(%0),%%eax; " \ 85.56 - " leaq compat_hypercall_table(%%rip),%%rdi; " \ 85.57 - " cmpl $("STR(NR_hypercalls)"),%%eax; " \ 85.58 - " jae 2f; " \ 85.59 - " movq (%%rdi,%%rax,8),%%rax; " \ 85.60 - " movl "STR(COMPAT_MULTICALL_arg0)"(%0),%%edi; " \ 85.61 - " movl "STR(COMPAT_MULTICALL_arg1)"(%0),%%esi; " \ 85.62 - " movl "STR(COMPAT_MULTICALL_arg2)"(%0),%%edx; " \ 85.63 - " movl "STR(COMPAT_MULTICALL_arg3)"(%0),%%ecx; " \ 85.64 - " movl "STR(COMPAT_MULTICALL_arg4)"(%0),%%r8d; " \ 85.65 - " callq *%%rax; " \ 85.66 - "1: movl %%eax,"STR(COMPAT_MULTICALL_result)"(%0)\n"\ 85.67 - ".section .fixup,\"ax\"\n" \ 85.68 - "2: movl $-"STR(ENOSYS)",%%eax\n" \ 85.69 - " jmp 1b\n" \ 85.70 - ".previous\n" \ 85.71 - : : "b" (_call) \ 85.72 - /* all the caller-saves registers */ \ 85.73 - : "rax", "rcx", "rdx", "rsi", "rdi", \ 85.74 - "r8", "r9", "r10", "r11" ); \ 85.75 - } while ( 0 ) 85.76 +#define compat_multicall_call(_call) \ 85.77 + __asm__ __volatile__ ( \ 85.78 + " movl %c1(%0),%%eax; " \ 85.79 + " leaq compat_hypercall_table(%%rip),%%rdi; "\ 85.80 + " cmpl $("STR(NR_hypercalls)"),%%eax; " \ 85.81 + " jae 2f; " \ 85.82 + " movq (%%rdi,%%rax,8),%%rax; " \ 85.83 + " movl %c2+0*%c3(%0),%%edi; " \ 85.84 + " movl %c2+1*%c3(%0),%%esi; " \ 85.85 + " movl %c2+2*%c3(%0),%%edx; " \ 85.86 + " movl %c2+3*%c3(%0),%%ecx; " \ 85.87 + " movl %c2+4*%c3(%0),%%r8d; " \ 85.88 + " callq *%%rax; " \ 85.89 + "1: movl %%eax,%c4(%0)\n" \ 85.90 + ".section .fixup,\"ax\"\n" \ 85.91 + "2: movl $-"STR(ENOSYS)",%%eax\n" \ 85.92 + " jmp 1b\n" \ 85.93 + ".previous\n" \ 85.94 + : \ 85.95 + : "b" (_call), \ 85.96 + "i" (offsetof(__typeof__(*_call), op)), \ 85.97 + "i" (offsetof(__typeof__(*_call), args)), \ 85.98 + "i" (sizeof(*(_call)->args)), \ 85.99 + "i" (offsetof(__typeof__(*_call), result)) \ 85.100 + /* all the caller-saves registers */ \ 85.101 + : "rax", "rcx", "rdx", "rsi", "rdi", \ 85.102 + "r8", "r9", "r10", "r11" ) \ 85.103 85.104 #else 85.105 85.106 #define do_multicall_call(_call) \ 85.107 - do { \ 85.108 __asm__ __volatile__ ( \ 85.109 - " pushl "STR(MULTICALL_arg4)"(%0); " \ 85.110 - " pushl "STR(MULTICALL_arg3)"(%0); " \ 85.111 - " pushl "STR(MULTICALL_arg2)"(%0); " \ 85.112 - " pushl "STR(MULTICALL_arg1)"(%0); " \ 85.113 - " pushl "STR(MULTICALL_arg0)"(%0); " \ 85.114 - " movl "STR(MULTICALL_op)"(%0),%%eax; " \ 85.115 + " movl %c1(%0),%%eax; " \ 85.116 + " pushl %c2+4*%c3(%0); " \ 85.117 + " pushl %c2+3*%c3(%0); " \ 85.118 + " pushl %c2+2*%c3(%0); " \ 85.119 + " pushl %c2+1*%c3(%0); " \ 85.120 + " pushl %c2+0*%c3(%0); " \ 85.121 " cmpl $("STR(NR_hypercalls)"),%%eax; " \ 85.122 " jae 2f; " \ 85.123 " call *hypercall_table(,%%eax,4); " \ 85.124 - "1: movl %%eax,"STR(MULTICALL_result)"(%0); " \ 85.125 + "1: movl %%eax,%c4(%0); " \ 85.126 " addl $20,%%esp\n" \ 85.127 ".section .fixup,\"ax\"\n" \ 85.128 "2: movl $-"STR(ENOSYS)",%%eax\n" \ 85.129 " jmp 1b\n" \ 85.130 ".previous\n" \ 85.131 - : : "b" (_call) \ 85.132 + : \ 85.133 + : "bSD" (_call), \ 85.134 + "i" (offsetof(__typeof__(*_call), op)), \ 85.135 + "i" (offsetof(__typeof__(*_call), args)), \ 85.136 + "i" (sizeof(*(_call)->args)), \ 85.137 + "i" (offsetof(__typeof__(*_call), result)) \ 85.138 /* all the caller-saves registers */ \ 85.139 - : "eax", "ecx", "edx" ); \ 85.140 - } while ( 0 ) 85.141 + : "eax", "ecx", "edx" ) \ 85.142 85.143 #endif 85.144
86.1 --- a/xen/include/asm-x86/perfc_defn.h Wed Mar 28 08:40:42 2007 +0000 86.2 +++ b/xen/include/asm-x86/perfc_defn.h Wed Mar 28 08:44:08 2007 +0000 86.3 @@ -12,81 +12,83 @@ PERFCOUNTER_ARRAY(cause_vector, 86.4 #define SVM_PERF_EXIT_REASON_SIZE (1+136) 86.5 PERFCOUNTER_ARRAY(svmexits, "SVMexits", SVM_PERF_EXIT_REASON_SIZE) 86.6 86.7 -PERFCOUNTER_CPU(seg_fixups, "segmentation fixups") 86.8 +PERFCOUNTER(seg_fixups, "segmentation fixups") 86.9 86.10 -PERFCOUNTER_CPU(apic_timer, "apic timer interrupts") 86.11 +PERFCOUNTER(apic_timer, "apic timer interrupts") 86.12 86.13 -PERFCOUNTER_CPU(domain_page_tlb_flush, "domain page tlb flushes") 86.14 +PERFCOUNTER(domain_page_tlb_flush, "domain page tlb flushes") 86.15 86.16 -PERFCOUNTER_CPU(calls_to_mmu_update, "calls_to_mmu_update") 86.17 -PERFCOUNTER_CPU(num_page_updates, "num_page_updates") 86.18 -PERFCOUNTER_CPU(calls_to_update_va, "calls_to_update_va_map") 86.19 -PERFCOUNTER_CPU(page_faults, "page faults") 86.20 -PERFCOUNTER_CPU(copy_user_faults, "copy_user faults") 86.21 +PERFCOUNTER(calls_to_mmuext_op, "calls to mmuext_op") 86.22 +PERFCOUNTER(num_mmuext_ops, "mmuext ops") 86.23 +PERFCOUNTER(calls_to_mmu_update, "calls to mmu_update") 86.24 +PERFCOUNTER(num_page_updates, "page updates") 86.25 +PERFCOUNTER(calls_to_update_va, "calls to update_va_map") 86.26 +PERFCOUNTER(page_faults, "page faults") 86.27 +PERFCOUNTER(copy_user_faults, "copy_user faults") 86.28 86.29 -PERFCOUNTER_CPU(map_domain_page_count, "map_domain_page count") 86.30 -PERFCOUNTER_CPU(ptwr_emulations, "writable pt emulations") 86.31 +PERFCOUNTER(map_domain_page_count, "map_domain_page count") 86.32 +PERFCOUNTER(ptwr_emulations, "writable pt emulations") 86.33 86.34 -PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed") 86.35 +PERFCOUNTER(exception_fixed, "pre-exception fixed") 86.36 86.37 86.38 /* Shadow counters */ 86.39 -PERFCOUNTER_CPU(shadow_alloc, "calls to shadow_alloc") 86.40 -PERFCOUNTER_CPU(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs") 86.41 +PERFCOUNTER(shadow_alloc, "calls to shadow_alloc") 86.42 +PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs") 86.43 86.44 /* STATUS counters do not reset when 'P' is hit */ 86.45 PERFSTATUS(shadow_alloc_count, "number of shadow pages in use") 86.46 -PERFCOUNTER_CPU(shadow_free, "calls to shadow_free") 86.47 -PERFCOUNTER_CPU(shadow_prealloc_1, "shadow recycles old shadows") 86.48 -PERFCOUNTER_CPU(shadow_prealloc_2, "shadow recycles in-use shadows") 86.49 -PERFCOUNTER_CPU(shadow_linear_map_failed, "shadow hit read-only linear map") 86.50 -PERFCOUNTER_CPU(shadow_a_update, "shadow A bit update") 86.51 -PERFCOUNTER_CPU(shadow_ad_update, "shadow A&D bit update") 86.52 -PERFCOUNTER_CPU(shadow_fault, "calls to shadow_fault") 86.53 -PERFCOUNTER_CPU(shadow_fault_fast_gnp, "shadow_fault fast path n/p") 86.54 -PERFCOUNTER_CPU(shadow_fault_fast_mmio, "shadow_fault fast path mmio") 86.55 -PERFCOUNTER_CPU(shadow_fault_fast_fail, "shadow_fault fast path error") 86.56 -PERFCOUNTER_CPU(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn") 86.57 -PERFCOUNTER_CPU(shadow_fault_bail_not_present, 86.58 +PERFCOUNTER(shadow_free, "calls to shadow_free") 86.59 +PERFCOUNTER(shadow_prealloc_1, "shadow recycles old shadows") 86.60 +PERFCOUNTER(shadow_prealloc_2, "shadow recycles in-use shadows") 86.61 +PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map") 86.62 +PERFCOUNTER(shadow_a_update, "shadow A bit update") 86.63 +PERFCOUNTER(shadow_ad_update, "shadow A&D bit update") 86.64 +PERFCOUNTER(shadow_fault, "calls to shadow_fault") 86.65 +PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p") 86.66 +PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio") 86.67 +PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error") 86.68 +PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn") 86.69 +PERFCOUNTER(shadow_fault_bail_not_present, 86.70 "shadow_fault guest not-present") 86.71 -PERFCOUNTER_CPU(shadow_fault_bail_nx, "shadow_fault guest NX fault") 86.72 -PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault") 86.73 -PERFCOUNTER_CPU(shadow_fault_bail_user_supervisor, 86.74 +PERFCOUNTER(shadow_fault_bail_nx, "shadow_fault guest NX fault") 86.75 +PERFCOUNTER(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault") 86.76 +PERFCOUNTER(shadow_fault_bail_user_supervisor, 86.77 "shadow_fault guest U/S fault") 86.78 -PERFCOUNTER_CPU(shadow_fault_emulate_read, "shadow_fault emulates a read") 86.79 -PERFCOUNTER_CPU(shadow_fault_emulate_write, "shadow_fault emulates a write") 86.80 -PERFCOUNTER_CPU(shadow_fault_emulate_failed, "shadow_fault emulator fails") 86.81 -PERFCOUNTER_CPU(shadow_fault_emulate_stack, "shadow_fault emulate stack write") 86.82 -PERFCOUNTER_CPU(shadow_fault_mmio, "shadow_fault handled as mmio") 86.83 -PERFCOUNTER_CPU(shadow_fault_fixed, "shadow_fault fixed fault") 86.84 -PERFCOUNTER_CPU(shadow_ptwr_emulate, "shadow causes ptwr to emulate") 86.85 -PERFCOUNTER_CPU(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e") 86.86 -PERFCOUNTER_CPU(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e") 86.87 -PERFCOUNTER_CPU(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e") 86.88 -PERFCOUNTER_CPU(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e") 86.89 -PERFCOUNTER_CPU(shadow_hash_lookups, "calls to shadow_hash_lookup") 86.90 -PERFCOUNTER_CPU(shadow_hash_lookup_head, "shadow hash hit in bucket head") 86.91 -PERFCOUNTER_CPU(shadow_hash_lookup_miss, "shadow hash misses") 86.92 -PERFCOUNTER_CPU(shadow_get_shadow_status, "calls to get_shadow_status") 86.93 -PERFCOUNTER_CPU(shadow_hash_inserts, "calls to shadow_hash_insert") 86.94 -PERFCOUNTER_CPU(shadow_hash_deletes, "calls to shadow_hash_delete") 86.95 -PERFCOUNTER_CPU(shadow_writeable, "shadow removes write access") 86.96 -PERFCOUNTER_CPU(shadow_writeable_h_1, "shadow writeable: 32b w2k3") 86.97 -PERFCOUNTER_CPU(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") 86.98 -PERFCOUNTER_CPU(shadow_writeable_h_3, "shadow writeable: 64b w2k3") 86.99 -PERFCOUNTER_CPU(shadow_writeable_h_4, "shadow writeable: 32b linux low") 86.100 -PERFCOUNTER_CPU(shadow_writeable_h_5, "shadow writeable: 32b linux high") 86.101 -PERFCOUNTER_CPU(shadow_writeable_bf, "shadow writeable brute-force") 86.102 -PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings") 86.103 -PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force") 86.104 -PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit") 86.105 -PERFCOUNTER_CPU(shadow_unshadow, "shadow unshadows a page") 86.106 -PERFCOUNTER_CPU(shadow_up_pointer, "shadow unshadow by up-pointer") 86.107 -PERFCOUNTER_CPU(shadow_unshadow_bf, "shadow unshadow brute-force") 86.108 -PERFCOUNTER_CPU(shadow_get_page_fail, "shadow_get_page_from_l1e failed") 86.109 -PERFCOUNTER_CPU(shadow_guest_walk, "shadow walks guest tables") 86.110 -PERFCOUNTER_CPU(shadow_invlpg, "shadow emulates invlpg") 86.111 -PERFCOUNTER_CPU(shadow_invlpg_fault, "shadow invlpg faults") 86.112 +PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read") 86.113 +PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write") 86.114 +PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails") 86.115 +PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write") 86.116 +PERFCOUNTER(shadow_fault_mmio, "shadow_fault handled as mmio") 86.117 +PERFCOUNTER(shadow_fault_fixed, "shadow_fault fixed fault") 86.118 +PERFCOUNTER(shadow_ptwr_emulate, "shadow causes ptwr to emulate") 86.119 +PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e") 86.120 +PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e") 86.121 +PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e") 86.122 +PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e") 86.123 +PERFCOUNTER(shadow_hash_lookups, "calls to shadow_hash_lookup") 86.124 +PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head") 86.125 +PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses") 86.126 +PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status") 86.127 +PERFCOUNTER(shadow_hash_inserts, "calls to shadow_hash_insert") 86.128 +PERFCOUNTER(shadow_hash_deletes, "calls to shadow_hash_delete") 86.129 +PERFCOUNTER(shadow_writeable, "shadow removes write access") 86.130 +PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3") 86.131 +PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") 86.132 +PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3") 86.133 +PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: 32b linux low") 86.134 +PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: 32b linux high") 86.135 +PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force") 86.136 +PERFCOUNTER(shadow_mappings, "shadow removes all mappings") 86.137 +PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force") 86.138 +PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit") 86.139 +PERFCOUNTER(shadow_unshadow, "shadow unshadows a page") 86.140 +PERFCOUNTER(shadow_up_pointer, "shadow unshadow by up-pointer") 86.141 +PERFCOUNTER(shadow_unshadow_bf, "shadow unshadow brute-force") 86.142 +PERFCOUNTER(shadow_get_page_fail, "shadow_get_page_from_l1e failed") 86.143 +PERFCOUNTER(shadow_guest_walk, "shadow walks guest tables") 86.144 +PERFCOUNTER(shadow_invlpg, "shadow emulates invlpg") 86.145 +PERFCOUNTER(shadow_invlpg_fault, "shadow invlpg faults") 86.146 86.147 86.148 /*#endif*/ /* __XEN_PERFC_DEFN_H__ */
87.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h Wed Mar 28 08:40:42 2007 +0000 87.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h Wed Mar 28 08:44:08 2007 +0000 87.3 @@ -1,6 +1,8 @@ 87.4 #ifndef __X86_32_ASM_DEFNS_H__ 87.5 #define __X86_32_ASM_DEFNS_H__ 87.6 87.7 +#include <asm/percpu.h> 87.8 + 87.9 #ifndef NDEBUG 87.10 /* Indicate special exception stack frame by inverting the frame pointer. */ 87.11 #define SETUP_EXCEPTION_FRAME_POINTER \ 87.12 @@ -47,10 +49,14 @@ 87.13 1: 87.14 87.15 #ifdef PERF_COUNTERS 87.16 -#define PERFC_INCR(_name,_idx) \ 87.17 - lock incl perfcounters+_name(,_idx,4) 87.18 +#define PERFC_INCR(_name,_idx,_cur) \ 87.19 + pushl _cur; \ 87.20 + movl VCPU_processor(_cur),_cur; \ 87.21 + shll $PERCPU_SHIFT,_cur; \ 87.22 + incl per_cpu__perfcounters+_name*4(_cur,_idx,4);\ 87.23 + popl _cur 87.24 #else 87.25 -#define PERFC_INCR(_name,_idx) 87.26 +#define PERFC_INCR(_name,_idx,_cur) 87.27 #endif 87.28 87.29 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
88.1 --- a/xen/include/asm-x86/x86_32/bug.h Wed Mar 28 08:40:42 2007 +0000 88.2 +++ b/xen/include/asm-x86/x86_32/bug.h Wed Mar 28 08:44:08 2007 +0000 88.3 @@ -12,6 +12,12 @@ struct bug_frame_str { 88.4 "ud2 ; ret $%c0" \ 88.5 : : "i" (BUGFRAME_dump) ) 88.6 88.7 +#define WARN() \ 88.8 + asm volatile ( \ 88.9 + "ud2 ; ret $%c0 ; .byte 0xbc ; .long %c1" \ 88.10 + : : "i" (BUGFRAME_warn | (__LINE__<<2)), \ 88.11 + "i" (__FILE__) ) 88.12 + 88.13 #define BUG() \ 88.14 asm volatile ( \ 88.15 "ud2 ; ret $%c0 ; .byte 0xbc ; .long %c1" \
89.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h Wed Mar 28 08:40:42 2007 +0000 89.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h Wed Mar 28 08:44:08 2007 +0000 89.3 @@ -1,6 +1,8 @@ 89.4 #ifndef __X86_64_ASM_DEFNS_H__ 89.5 #define __X86_64_ASM_DEFNS_H__ 89.6 89.7 +#include <asm/percpu.h> 89.8 + 89.9 #ifndef NDEBUG 89.10 /* Indicate special exception stack frame by inverting the frame pointer. */ 89.11 #define SETUP_EXCEPTION_FRAME_POINTER \ 89.12 @@ -47,13 +49,18 @@ 89.13 popq %rdi; 89.14 89.15 #ifdef PERF_COUNTERS 89.16 -#define PERFC_INCR(_name,_idx) \ 89.17 - pushq %rdx; \ 89.18 - leaq perfcounters+_name(%rip),%rdx; \ 89.19 - lock incl (%rdx,_idx,4); \ 89.20 - popq %rdx; 89.21 +#define PERFC_INCR(_name,_idx,_cur) \ 89.22 + pushq _cur; \ 89.23 + movslq VCPU_processor(_cur),_cur; \ 89.24 + pushq %rdx; \ 89.25 + leaq per_cpu__perfcounters(%rip),%rdx; \ 89.26 + shlq $PERCPU_SHIFT,_cur; \ 89.27 + addq %rdx,_cur; \ 89.28 + popq %rdx; \ 89.29 + incl _name*4(_cur,_idx,4); \ 89.30 + popq _cur 89.31 #else 89.32 -#define PERFC_INCR(_name,_idx) 89.33 +#define PERFC_INCR(_name,_idx,_cur) 89.34 #endif 89.35 89.36 /* Work around AMD erratum #88 */
90.1 --- a/xen/include/asm-x86/x86_64/bug.h Wed Mar 28 08:40:42 2007 +0000 90.2 +++ b/xen/include/asm-x86/x86_64/bug.h Wed Mar 28 08:44:08 2007 +0000 90.3 @@ -12,6 +12,12 @@ struct bug_frame_str { 90.4 "ud2 ; ret $%c0" \ 90.5 : : "i" (BUGFRAME_dump) ) 90.6 90.7 +#define WARN() \ 90.8 + asm volatile ( \ 90.9 + "ud2 ; ret $%c0 ; .byte 0x48,0xbc ; .quad %c1" \ 90.10 + : : "i" (BUGFRAME_warn | (__LINE__<<2)), \ 90.11 + "i" (__FILE__) ) 90.12 + 90.13 #define BUG() \ 90.14 asm volatile ( \ 90.15 "ud2 ; ret $%c0 ; .byte 0x48,0xbc ; .quad %c1" \
91.1 --- a/xen/include/public/foreign/Makefile Wed Mar 28 08:40:42 2007 +0000 91.2 +++ b/xen/include/public/foreign/Makefile Wed Mar 28 08:44:08 2007 +0000 91.3 @@ -1,5 +1,5 @@ 91.4 -XEN_ROOT := ../../../.. 91.5 -include $(XEN_ROOT)/tools/Rules.mk 91.6 +XEN_ROOT=../../../.. 91.7 +include $(XEN_ROOT)/Config.mk 91.8 91.9 architectures := x86_32 x86_64 ia64 91.10 headers := $(patsubst %, %.h, $(architectures))
92.1 --- a/xen/include/xen/lib.h Wed Mar 28 08:40:42 2007 +0000 92.2 +++ b/xen/include/xen/lib.h Wed Mar 28 08:44:08 2007 +0000 92.3 @@ -10,8 +10,10 @@ 92.4 #include <asm/bug.h> 92.5 92.6 void __bug(char *file, int line) __attribute__((noreturn)); 92.7 +void __warn(char *file, int line); 92.8 92.9 -#define BUG_ON(_p) do { if (_p) BUG(); } while ( 0 ) 92.10 +#define BUG_ON(p) do { if (p) BUG(); } while (0) 92.11 +#define WARN_ON(p) do { if (p) WARN(); } while (0) 92.12 92.13 /* Force a compilation error if condition is true */ 92.14 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
93.1 --- a/xen/include/xen/perfc.h Wed Mar 28 08:40:42 2007 +0000 93.2 +++ b/xen/include/xen/perfc.h Wed Mar 28 08:44:08 2007 +0000 93.3 @@ -1,4 +1,3 @@ 93.4 - 93.5 #ifndef __XEN_PERFC_H__ 93.6 #define __XEN_PERFC_H__ 93.7 93.8 @@ -6,102 +5,92 @@ 93.9 93.10 #include <xen/lib.h> 93.11 #include <xen/smp.h> 93.12 -#include <asm/atomic.h> 93.13 +#include <xen/percpu.h> 93.14 93.15 -/* 93.16 +/* 93.17 * NOTE: new counters must be defined in perfc_defn.h 93.18 * 93.19 + * Counter declarations: 93.20 * PERFCOUNTER (counter, string) define a new performance counter 93.21 - * PERFCOUNTER_CPU (counter, string, size) define a counter per CPU 93.22 - * PERFCOUNTER_ARRY (counter, string, size) define an array of counters 93.23 + * PERFCOUNTER_ARRAY (counter, string, size) define an array of counters 93.24 * 93.25 - * unlike "COUNTERS", "STATUS" variables DO NOT RESET 93.26 + * Unlike counters, status variables do not reset: 93.27 * PERFSTATUS (counter, string) define a new performance stauts 93.28 - * PERFSTATUS_CPU (counter, string, size) define a status var per CPU 93.29 - * PERFSTATUS_ARRY (counter, string, size) define an array of status vars 93.30 + * PERFSTATUS_ARRAY (counter, string, size) define an array of status vars 93.31 * 93.32 * unsigned long perfc_value (counter) get value of a counter 93.33 - * unsigned long perfc_valuec (counter) get value of a per CPU counter 93.34 * unsigned long perfc_valuea (counter, index) get value of an array counter 93.35 * unsigned long perfc_set (counter, val) set value of a counter 93.36 - * unsigned long perfc_setc (counter, val) set value of a per CPU counter 93.37 * unsigned long perfc_seta (counter, index, val) set value of an array counter 93.38 * void perfc_incr (counter) increment a counter 93.39 - * void perfc_incrc (counter, index) increment a per CPU counter 93.40 + * void perfc_decr (counter) decrement a status 93.41 * void perfc_incra (counter, index) increment an array counter 93.42 * void perfc_add (counter, value) add a value to a counter 93.43 - * void perfc_addc (counter, value) add a value to a per CPU counter 93.44 * void perfc_adda (counter, index, value) add a value to array counter 93.45 * void perfc_print (counter) print out the counter 93.46 */ 93.47 93.48 -#define PERFCOUNTER( var, name ) \ 93.49 - atomic_t var[1]; 93.50 -#define PERFCOUNTER_CPU( var, name ) \ 93.51 - atomic_t var[NR_CPUS]; 93.52 -#define PERFCOUNTER_ARRAY( var, name, size ) \ 93.53 - atomic_t var[size]; 93.54 -#define PERFSTATUS( var, name ) \ 93.55 - atomic_t var[1]; 93.56 -#define PERFSTATUS_CPU( var, name ) \ 93.57 - atomic_t var[NR_CPUS]; 93.58 -#define PERFSTATUS_ARRAY( var, name, size ) \ 93.59 - atomic_t var[size]; 93.60 +#define PERFCOUNTER( name, descr ) \ 93.61 + PERFC_##name, 93.62 +#define PERFCOUNTER_ARRAY( name, descr, size ) \ 93.63 + PERFC_##name, \ 93.64 + PERFC_LAST_##name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) - 1]), 93.65 93.66 -struct perfcounter { 93.67 +#define PERFSTATUS PERFCOUNTER 93.68 +#define PERFSTATUS_ARRAY PERFCOUNTER_ARRAY 93.69 + 93.70 +enum perfcounter { 93.71 #include <xen/perfc_defn.h> 93.72 + NUM_PERFCOUNTERS 93.73 }; 93.74 93.75 -extern struct perfcounter perfcounters; 93.76 +#undef PERFCOUNTER 93.77 +#undef PERFCOUNTER_ARRAY 93.78 +#undef PERFSTATUS 93.79 +#undef PERFSTATUS_ARRAY 93.80 93.81 -#define perfc_value(x) atomic_read(&perfcounters.x[0]) 93.82 -#define perfc_valuec(x) atomic_read(&perfcounters.x[smp_processor_id()]) 93.83 +typedef unsigned perfc_t; 93.84 +#define PRIperfc "" 93.85 + 93.86 +DECLARE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters); 93.87 + 93.88 +#define perfc_value(x) this_cpu(perfcounters)[PERFC_ ## x] 93.89 #define perfc_valuea(x,y) \ 93.90 - ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ? \ 93.91 - atomic_read(&perfcounters.x[y]) : 0 ) 93.92 -#define perfc_set(x,v) atomic_set(&perfcounters.x[0], v) 93.93 -#define perfc_setc(x,v) atomic_set(&perfcounters.x[smp_processor_id()], v) 93.94 + ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \ 93.95 + this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 ) 93.96 +#define perfc_set(x,v) (this_cpu(perfcounters)[PERFC_ ## x] = (v)) 93.97 #define perfc_seta(x,y,v) \ 93.98 - do { \ 93.99 - if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \ 93.100 - atomic_set(&perfcounters.x[y], v); \ 93.101 - } while ( 0 ) 93.102 -#define perfc_incr(x) atomic_inc(&perfcounters.x[0]) 93.103 -#define perfc_decr(x) atomic_dec(&perfcounters.x[0]) 93.104 -#define perfc_incrc(x) atomic_inc(&perfcounters.x[smp_processor_id()]) 93.105 -#define perfc_decrc(x) atomic_dec(&perfcounters.x[smp_processor_id()]) 93.106 + ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \ 93.107 + this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) ) 93.108 +#define perfc_incr(x) (++this_cpu(perfcounters)[PERFC_ ## x]) 93.109 +#define perfc_decr(x) (--this_cpu(perfcounters)[PERFC_ ## x]) 93.110 #define perfc_incra(x,y) \ 93.111 - do { \ 93.112 - if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \ 93.113 - atomic_inc(&perfcounters.x[y]); \ 93.114 - } while ( 0 ) 93.115 -#define perfc_add(x,y) atomic_add((y), &perfcounters.x[0]) 93.116 -#define perfc_addc(x,y) atomic_add((y), &perfcounters.x[smp_processor_id()]) 93.117 -#define perfc_adda(x,y,z) \ 93.118 - do { \ 93.119 - if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \ 93.120 - atomic_add((z), &perfcounters.x[y]); \ 93.121 - } while ( 0 ) 93.122 + ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \ 93.123 + ++this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 ) 93.124 +#define perfc_add(x,v) (this_cpu(perfcounters)[PERFC_ ## x] += (v)) 93.125 +#define perfc_adda(x,y,v) \ 93.126 + ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ? \ 93.127 + this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) ) 93.128 93.129 /* 93.130 * Histogram: special treatment for 0 and 1 count. After that equally spaced 93.131 * with last bucket taking the rest. 93.132 */ 93.133 #ifdef PERF_ARRAYS 93.134 -#define perfc_incr_histo(_x,_v,_n) \ 93.135 - do { \ 93.136 - if ( (_v) == 0 ) \ 93.137 - perfc_incra(_x, 0); \ 93.138 - else if ( (_v) == 1 ) \ 93.139 - perfc_incra(_x, 1); \ 93.140 - else if ( (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) < \ 93.141 - (PERFC_MAX_ ## _n - 3) ) \ 93.142 - perfc_incra(_x, (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) + 2); \ 93.143 - else \ 93.144 - perfc_incra(_x, PERFC_MAX_ ## _n - 1); \ 93.145 +#define perfc_incr_histo(x,v) \ 93.146 + do { \ 93.147 + if ( (v) == 0 ) \ 93.148 + perfc_incra(x, 0); \ 93.149 + else if ( (v) == 1 ) \ 93.150 + perfc_incra(x, 1); \ 93.151 + else if ( (((v) - 2) / PERFC_ ## x ## _BUCKET_SIZE) < \ 93.152 + (PERFC_LAST_ ## x - PERFC_ ## x - 2) ) \ 93.153 + perfc_incra(x, (((v) - 2) / PERFC_ ## x ## _BUCKET_SIZE) + 2); \ 93.154 + else \ 93.155 + perfc_incra(x, PERFC_LAST_ ## x - PERFC_ ## x); \ 93.156 } while ( 0 ) 93.157 #else 93.158 -#define perfc_incr_histo(_x,_v,_n) ((void)0) 93.159 +#define perfc_incr_histo(x,v) ((void)0) 93.160 #endif 93.161 93.162 struct xen_sysctl_perfc_op; 93.163 @@ -110,19 +99,14 @@ int perfc_control(struct xen_sysctl_perf 93.164 #else /* PERF_COUNTERS */ 93.165 93.166 #define perfc_value(x) (0) 93.167 -#define perfc_valuec(x) (0) 93.168 #define perfc_valuea(x,y) (0) 93.169 #define perfc_set(x,v) ((void)0) 93.170 -#define perfc_setc(x,v) ((void)0) 93.171 #define perfc_seta(x,y,v) ((void)0) 93.172 #define perfc_incr(x) ((void)0) 93.173 #define perfc_decr(x) ((void)0) 93.174 -#define perfc_incrc(x) ((void)0) 93.175 -#define perfc_decrc(x) ((void)0) 93.176 #define perfc_incra(x,y) ((void)0) 93.177 #define perfc_decra(x,y) ((void)0) 93.178 #define perfc_add(x,y) ((void)0) 93.179 -#define perfc_addc(x,y) ((void)0) 93.180 #define perfc_adda(x,y,z) ((void)0) 93.181 #define perfc_incr_histo(x,y,z) ((void)0) 93.182
94.1 --- a/xen/include/xen/perfc_defn.h Wed Mar 28 08:40:42 2007 +0000 94.2 +++ b/xen/include/xen/perfc_defn.h Wed Mar 28 08:44:08 2007 +0000 94.3 @@ -6,13 +6,16 @@ 94.4 94.5 PERFCOUNTER_ARRAY(hypercalls, "hypercalls", NR_hypercalls) 94.6 94.7 -PERFCOUNTER_CPU(irqs, "#interrupts") 94.8 -PERFCOUNTER_CPU(ipis, "#IPIs") 94.9 +PERFCOUNTER(calls_to_multicall, "calls to multicall") 94.10 +PERFCOUNTER(calls_from_multicall, "calls from multicall") 94.11 + 94.12 +PERFCOUNTER(irqs, "#interrupts") 94.13 +PERFCOUNTER(ipis, "#IPIs") 94.14 94.15 -PERFCOUNTER_CPU(sched_irq, "sched: timer") 94.16 -PERFCOUNTER_CPU(sched_run, "sched: runs through scheduler") 94.17 -PERFCOUNTER_CPU(sched_ctx, "sched: context switches") 94.18 +PERFCOUNTER(sched_irq, "sched: timer") 94.19 +PERFCOUNTER(sched_run, "sched: runs through scheduler") 94.20 +PERFCOUNTER(sched_ctx, "sched: context switches") 94.21 94.22 -PERFCOUNTER_CPU(need_flush_tlb_flush, "PG_need_flush tlb flushes") 94.23 +PERFCOUNTER(need_flush_tlb_flush, "PG_need_flush tlb flushes") 94.24 94.25 /*#endif*/ /* __XEN_PERFC_DEFN_H__ */