debuggers.hg

changeset 22717:d9dca2bfe6b2

mem_access: added INT3/CRx capture

* Allows a memory event listener to register for events on changes to
CR0, CR3, and CR4, as well as INT3 instructions, as a part of the
mem_access mechanism. These events can be either synchronous or
asynchronous.

* For INT3, the logic works independent of a debugger, and so both can
be supported.

* The presence and type of listener are stored and accessed through
HVM params.

* Changed the event mask handling to ensure that the right events are
captured based on the listeners.

* Added the ability to inject HW/SW traps into a VCPU when it next
resumes (rather than try to modify the existing IRQ injection
code paths). Only one trap to inject can be outstanding at a time.

Signed-off-by: Joe Epstein <jepstein98@gmail.com>
Acked-by: Keir Fraser <keir@xen.org>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
author Joe Epstein <jepstein98@gmail.com>
date Fri Jan 07 11:54:48 2011 +0000 (2011-01-07)
parents 76e07538870e
children 32ec2fab6ea4
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vcpu.h xen/include/public/hvm/hvm_op.h xen/include/public/hvm/params.h xen/include/public/mem_event.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Jan 07 11:54:45 2011 +0000
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Jan 07 11:54:48 2011 +0000
     1.3 @@ -309,6 +309,15 @@ void hvm_do_resume(struct vcpu *v)
     1.4              return; /* bail */
     1.5          }
     1.6      }
     1.7 +
     1.8 +    /* Inject pending hw/sw trap */
     1.9 +    if (v->arch.hvm_vcpu.inject_trap != -1) 
    1.10 +    {
    1.11 +        hvm_inject_exception(v->arch.hvm_vcpu.inject_trap, 
    1.12 +                             v->arch.hvm_vcpu.inject_error_code, 
    1.13 +                             v->arch.hvm_vcpu.inject_cr2);
    1.14 +        v->arch.hvm_vcpu.inject_trap = -1;
    1.15 +    }
    1.16  }
    1.17  
    1.18  static void hvm_init_ioreq_page(
    1.19 @@ -949,6 +958,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
    1.20      spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
    1.21      INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
    1.22  
    1.23 +    v->arch.hvm_vcpu.inject_trap = -1;
    1.24 +
    1.25  #ifdef CONFIG_COMPAT
    1.26      rc = setup_compat_arg_xlat(v);
    1.27      if ( rc != 0 )
    1.28 @@ -3236,10 +3247,45 @@ long do_hvm_op(unsigned long op, XEN_GUE
    1.29              case HVM_PARAM_ACPI_IOPORTS_LOCATION:
    1.30                  rc = pmtimer_change_ioport(d, a.value);
    1.31                  break;
    1.32 +            case HVM_PARAM_MEMORY_EVENT_CR0:
    1.33 +            case HVM_PARAM_MEMORY_EVENT_CR3:
    1.34 +            case HVM_PARAM_MEMORY_EVENT_CR4:
    1.35 +                if ( d->domain_id == current->domain->domain_id )
    1.36 +                    rc = -EPERM;
    1.37 +                break;
    1.38 +            case HVM_PARAM_MEMORY_EVENT_INT3:
    1.39 +                if ( d->domain_id == current->domain->domain_id ) 
    1.40 +                {
    1.41 +                    rc = -EPERM;
    1.42 +                    break;
    1.43 +                }
    1.44 +                if ( a.value & HVMPME_onchangeonly )
    1.45 +                    rc = -EINVAL;
    1.46 +                break;
    1.47              }
    1.48  
    1.49 -            if ( rc == 0 )
    1.50 +            if ( rc == 0 ) 
    1.51 +            {
    1.52                  d->arch.hvm_domain.params[a.index] = a.value;
    1.53 +
    1.54 +                switch( a.index )
    1.55 +                {
    1.56 +                case HVM_PARAM_MEMORY_EVENT_INT3:
    1.57 +                {
    1.58 +                    domain_pause(d);
    1.59 +                    domain_unpause(d); /* Causes guest to latch new status */
    1.60 +                    break;
    1.61 +                }
    1.62 +                case HVM_PARAM_MEMORY_EVENT_CR3:
    1.63 +                {
    1.64 +                    for_each_vcpu ( d, v )
    1.65 +                        hvm_funcs.update_guest_cr(v, 0); /* Latches new CR3 mask through CR0 code */
    1.66 +                    break;
    1.67 +                }
    1.68 +                }
    1.69 +
    1.70 +            }
    1.71 +
    1.72          }
    1.73          else
    1.74          {
    1.75 @@ -3657,6 +3703,44 @@ long do_hvm_op(unsigned long op, XEN_GUE
    1.76          break;
    1.77      }
    1.78  
    1.79 +    case HVMOP_inject_trap: 
    1.80 +    {
    1.81 +        xen_hvm_inject_trap_t tr;
    1.82 +        struct domain *d;
    1.83 +        struct vcpu *v;
    1.84 +
    1.85 +        if ( copy_from_guest(&tr, arg, 1 ) )
    1.86 +            return -EFAULT;
    1.87 +
    1.88 +        if ( current->domain->domain_id == tr.domid )
    1.89 +            return -EPERM;
    1.90 +
    1.91 +        rc = rcu_lock_target_domain_by_id(tr.domid, &d);
    1.92 +        if ( rc != 0 )
    1.93 +            return rc;
    1.94 +
    1.95 +        rc = -EINVAL;
    1.96 +        if ( !is_hvm_domain(d) )
    1.97 +            goto param_fail8;
    1.98 +
    1.99 +        rc = -ENOENT;
   1.100 +        if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
   1.101 +            goto param_fail8;
   1.102 +        
   1.103 +        if ( v->arch.hvm_vcpu.inject_trap != -1 )
   1.104 +            rc = -EBUSY;
   1.105 +        else 
   1.106 +        {
   1.107 +            v->arch.hvm_vcpu.inject_trap       = tr.trap;
   1.108 +            v->arch.hvm_vcpu.inject_error_code = tr.error_code;
   1.109 +            v->arch.hvm_vcpu.inject_cr2        = tr.cr2;
   1.110 +        }
   1.111 +
   1.112 +    param_fail8:
   1.113 +        rcu_unlock_domain(d);
   1.114 +        break;
   1.115 +    }
   1.116 +
   1.117      default:
   1.118      {
   1.119          gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
   1.120 @@ -3697,6 +3781,84 @@ int hvm_debug_op(struct vcpu *v, int32_t
   1.121      return rc;
   1.122  }
   1.123  
   1.124 +static int hvm_memory_event_traps(long p, uint32_t reason,
   1.125 +                                  unsigned long value, unsigned long old, 
   1.126 +                                  bool_t gla_valid, unsigned long gla) 
   1.127 +{
   1.128 +    struct vcpu* v = current;
   1.129 +    struct domain *d = v->domain;
   1.130 +    mem_event_request_t req;
   1.131 +    int rc;
   1.132 +
   1.133 +    if ( !(p & HVMPME_MODE_MASK) ) 
   1.134 +        return 0;
   1.135 +
   1.136 +    if ( (p & HVMPME_onchangeonly) && (value == old) )
   1.137 +        return 1;
   1.138 +    
   1.139 +    rc = mem_event_check_ring(d);
   1.140 +    if ( rc )
   1.141 +        return rc;
   1.142 +    
   1.143 +    memset(&req, 0, sizeof(req));
   1.144 +    req.type = MEM_EVENT_TYPE_ACCESS;
   1.145 +    req.reason = reason;
   1.146 +    
   1.147 +    if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync ) 
   1.148 +    {
   1.149 +        req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;    
   1.150 +        vcpu_pause_nosync(v);   
   1.151 +    }
   1.152 +
   1.153 +    req.gfn = value;
   1.154 +    req.vcpu_id = v->vcpu_id;
   1.155 +    if ( gla_valid ) 
   1.156 +    {
   1.157 +        req.offset = gla & ((1 << PAGE_SHIFT) - 1);
   1.158 +        req.gla = gla;
   1.159 +        req.gla_valid = 1;
   1.160 +    }
   1.161 +    
   1.162 +    mem_event_put_request(d, &req);      
   1.163 +    
   1.164 +    return 1;
   1.165 +}
   1.166 +
   1.167 +void hvm_memory_event_cr0(unsigned long value, unsigned long old) 
   1.168 +{
   1.169 +    hvm_memory_event_traps(current->domain->arch.hvm_domain
   1.170 +                             .params[HVM_PARAM_MEMORY_EVENT_CR0],
   1.171 +                           MEM_EVENT_REASON_CR0,
   1.172 +                           value, old, 0, 0);
   1.173 +}
   1.174 +
   1.175 +void hvm_memory_event_cr3(unsigned long value, unsigned long old) 
   1.176 +{
   1.177 +    hvm_memory_event_traps(current->domain->arch.hvm_domain
   1.178 +                             .params[HVM_PARAM_MEMORY_EVENT_CR3],
   1.179 +                           MEM_EVENT_REASON_CR3,
   1.180 +                           value, old, 0, 0);
   1.181 +}
   1.182 +
   1.183 +void hvm_memory_event_cr4(unsigned long value, unsigned long old) 
   1.184 +{
   1.185 +    hvm_memory_event_traps(current->domain->arch.hvm_domain
   1.186 +                             .params[HVM_PARAM_MEMORY_EVENT_CR4],
   1.187 +                           MEM_EVENT_REASON_CR4,
   1.188 +                           value, old, 0, 0);
   1.189 +}
   1.190 +
   1.191 +int hvm_memory_event_int3(unsigned long gla) 
   1.192 +{
   1.193 +    uint32_t pfec = PFEC_page_present;
   1.194 +    unsigned long gfn;
   1.195 +    gfn = paging_gva_to_gfn(current, gla, &pfec);
   1.196 +
   1.197 +    return hvm_memory_event_traps(current->domain->arch.hvm_domain
   1.198 +                                    .params[HVM_PARAM_MEMORY_EVENT_INT3],
   1.199 +                                  MEM_EVENT_REASON_INT3,
   1.200 +                                  gfn, 0, 1, gla);
   1.201 +}
   1.202  
   1.203  /*
   1.204   * Local variables:
     2.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jan 07 11:54:45 2011 +0000
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jan 07 11:54:48 2011 +0000
     2.3 @@ -1082,7 +1082,9 @@ void vmx_do_resume(struct vcpu *v)
     2.4          hvm_asid_flush_vcpu(v);
     2.5      }
     2.6  
     2.7 -    debug_state = v->domain->debugger_attached;
     2.8 +    debug_state = v->domain->debugger_attached 
     2.9 +                  || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
    2.10 +
    2.11      if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
    2.12      {
    2.13          v->arch.hvm_vcpu.debug_state_latch = debug_state;
     3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Jan 07 11:54:45 2011 +0000
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Jan 07 11:54:48 2011 +0000
     3.3 @@ -1064,12 +1064,16 @@ static void vmx_update_guest_cr(struct v
     3.4  
     3.5          if ( paging_mode_hap(v->domain) )
     3.6          {
     3.7 -            /* We manage GUEST_CR3 when guest CR0.PE is zero. */
     3.8 +            /* We manage GUEST_CR3 when guest CR0.PE is zero or when cr3 memevents are on */            
     3.9              uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
    3.10                                   CPU_BASED_CR3_STORE_EXITING);
    3.11              v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
    3.12              if ( !hvm_paging_enabled(v) )
    3.13                  v->arch.hvm_vmx.exec_control |= cr3_ctls;
    3.14 +
    3.15 +            if ( v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] )
    3.16 +                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
    3.17 +
    3.18              vmx_update_cpu_exec_control(v);
    3.19  
    3.20              /* Changing CR0.PE can change some bits in real CR4. */
    3.21 @@ -1252,9 +1256,12 @@ void vmx_inject_hw_exception(int trap, i
    3.22      unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
    3.23      struct vcpu *curr = current;
    3.24  
    3.25 +    int type = X86_EVENTTYPE_HW_EXCEPTION;
    3.26 +
    3.27      switch ( trap )
    3.28      {
    3.29      case TRAP_debug:
    3.30 +        type = X86_EVENTTYPE_SW_EXCEPTION;
    3.31          if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
    3.32          {
    3.33              __restore_debug_registers(curr);
    3.34 @@ -1269,6 +1276,9 @@ void vmx_inject_hw_exception(int trap, i
    3.35              domain_pause_for_debugger();
    3.36              return;
    3.37          }
    3.38 +
    3.39 +        type = X86_EVENTTYPE_SW_EXCEPTION;
    3.40 +        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
    3.41      }
    3.42  
    3.43      if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
    3.44 @@ -1279,7 +1289,7 @@ void vmx_inject_hw_exception(int trap, i
    3.45              error_code = 0;
    3.46      }
    3.47  
    3.48 -    __vmx_inject_exception(trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
    3.49 +    __vmx_inject_exception(trap, type, error_code);
    3.50  
    3.51      if ( trap == TRAP_page_fault )
    3.52          HVMTRACE_LONG_2D(PF_INJECT, error_code,
    3.53 @@ -1565,6 +1575,8 @@ static int mov_to_cr(int gp, int cr, str
    3.54      unsigned long value;
    3.55      struct vcpu *v = current;
    3.56      struct vlapic *vlapic = vcpu_vlapic(v);
    3.57 +    int rc = 0;
    3.58 +    unsigned long old;
    3.59  
    3.60      switch ( gp )
    3.61      {
    3.62 @@ -1589,13 +1601,25 @@ static int mov_to_cr(int gp, int cr, str
    3.63      switch ( cr )
    3.64      {
    3.65      case 0:
    3.66 -        return !hvm_set_cr0(value);
    3.67 +        old = v->arch.hvm_vcpu.guest_cr[0];
    3.68 +        rc = !hvm_set_cr0(value);
    3.69 +        if (rc)
    3.70 +            hvm_memory_event_cr0(value, old);
    3.71 +        return rc;
    3.72  
    3.73      case 3:
    3.74 -        return !hvm_set_cr3(value);
    3.75 +        old = v->arch.hvm_vcpu.guest_cr[3];
    3.76 +        rc = !hvm_set_cr3(value);
    3.77 +        if (rc)
    3.78 +            hvm_memory_event_cr3(value, old);        
    3.79 +        return rc;
    3.80  
    3.81      case 4:
    3.82 -        return !hvm_set_cr4(value);
    3.83 +        old = v->arch.hvm_vcpu.guest_cr[4];
    3.84 +        rc = !hvm_set_cr4(value);
    3.85 +        if (rc)
    3.86 +            hvm_memory_event_cr4(value, old);
    3.87 +        return rc; 
    3.88  
    3.89      case 8:
    3.90          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
    3.91 @@ -1676,11 +1700,17 @@ static int vmx_cr_access(unsigned long e
    3.92          cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
    3.93          mov_from_cr(cr, gp, regs);
    3.94          break;
    3.95 -    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
    3.96 +    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: 
    3.97 +    {
    3.98 +        unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
    3.99          v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
   3.100          vmx_update_guest_cr(v, 0);
   3.101 +
   3.102 +        hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
   3.103 +
   3.104          HVMTRACE_0D(CLTS);
   3.105          break;
   3.106 +    }
   3.107      case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
   3.108          value = v->arch.hvm_vcpu.guest_cr[0];
   3.109          /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
   3.110 @@ -2351,13 +2381,29 @@ asmlinkage void vmx_vmexit_handler(struc
   3.111                  goto exit_and_crash;
   3.112              domain_pause_for_debugger();
   3.113              break;
   3.114 -        case TRAP_int3:
   3.115 -            if ( !v->domain->debugger_attached )
   3.116 -                goto exit_and_crash;
   3.117 -            update_guest_eip(); /* Safe: INT3 */
   3.118 -            current->arch.gdbsx_vcpu_event = TRAP_int3;
   3.119 -            domain_pause_for_debugger();
   3.120 -            break;
   3.121 +        case TRAP_int3: 
   3.122 +        {
   3.123 +            if ( v->domain->debugger_attached )
   3.124 +            {
   3.125 +                update_guest_eip(); /* Safe: INT3 */            
   3.126 +                current->arch.gdbsx_vcpu_event = TRAP_int3;
   3.127 +                domain_pause_for_debugger();
   3.128 +                break;
   3.129 +            }
   3.130 +            else {
   3.131 +                int handled = hvm_memory_event_int3(regs->eip);
   3.132 +                
   3.133 +                if ( handled < 0 ) 
   3.134 +                {
   3.135 +                    vmx_inject_exception(TRAP_int3, HVM_DELIVER_NO_ERROR_CODE, 0);
   3.136 +                    break;
   3.137 +                }
   3.138 +                else if ( handled )
   3.139 +                    break;
   3.140 +            }
   3.141 +
   3.142 +            goto exit_and_crash;
   3.143 +        }
   3.144          case TRAP_no_device:
   3.145              vmx_fpu_dirty_intercept();
   3.146              break;
     4.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Jan 07 11:54:45 2011 +0000
     4.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Jan 07 11:54:48 2011 +0000
     4.3 @@ -372,4 +372,12 @@ bool_t hvm_hap_nested_page_fault(unsigne
     4.4  int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
     4.5  int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content);
     4.6  
     4.7 +/* Called for current VCPU on crX changes by guest */
     4.8 +void hvm_memory_event_cr0(unsigned long value, unsigned long old);
     4.9 +void hvm_memory_event_cr3(unsigned long value, unsigned long old);
    4.10 +void hvm_memory_event_cr4(unsigned long value, unsigned long old);
    4.11 +
    4.12 +/* Called for current VCPU on int3: returns -1 if no listener */
    4.13 +int hvm_memory_event_int3(unsigned long gla);
    4.14 +
    4.15  #endif /* __ASM_X86_HVM_HVM_H__ */
     5.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Fri Jan 07 11:54:45 2011 +0000
     5.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Fri Jan 07 11:54:48 2011 +0000
     5.3 @@ -114,6 +114,11 @@ struct hvm_vcpu {
     5.4      /* We may write up to m128 as a number of device-model transactions. */
     5.5      paddr_t mmio_large_write_pa;
     5.6      unsigned int mmio_large_write_bytes;
     5.7 +
     5.8 +    /* Pending hw/sw interrupt */
     5.9 +    int           inject_trap;       /* -1 for nothing to inject */
    5.10 +    int           inject_error_code;
    5.11 +    unsigned long inject_cr2;
    5.12  };
    5.13  
    5.14  #endif /* __ASM_X86_HVM_VCPU_H__ */
     6.1 --- a/xen/include/public/hvm/hvm_op.h	Fri Jan 07 11:54:45 2011 +0000
     6.2 +++ b/xen/include/public/hvm/hvm_op.h	Fri Jan 07 11:54:48 2011 +0000
     6.3 @@ -200,4 +200,26 @@ struct xen_hvm_get_mem_access {
     6.4  };
     6.5  typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
     6.6  DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
     6.7 +
     6.8 +#define HVMOP_inject_trap            14
     6.9 +/* Inject a trap into a VCPU, which will get taken up on the next
    6.10 + * scheduling of it. Note that the caller should know enough of the
    6.11 + * state of the CPU before injecting, to know what the effect of
    6.12 + * injecting the trap will be.
    6.13 + */
    6.14 +struct xen_hvm_inject_trap {
    6.15 +    /* Domain to be queried. */
    6.16 +    domid_t domid;
    6.17 +    /* VCPU */
    6.18 +    uint32_t vcpuid;
    6.19 +    /* Trap number */
    6.20 +    uint32_t trap;
    6.21 +    /* Error code, or -1 to skip */
    6.22 +    uint32_t error_code;
    6.23 +    /* CR2 for page faults */
    6.24 +    uint64_t cr2;
    6.25 +};
    6.26 +typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
    6.27 +DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
    6.28 +
    6.29  #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
     7.1 --- a/xen/include/public/hvm/params.h	Fri Jan 07 11:54:45 2011 +0000
     7.2 +++ b/xen/include/public/hvm/params.h	Fri Jan 07 11:54:48 2011 +0000
     7.3 @@ -124,6 +124,19 @@
     7.4   */
     7.5  #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
     7.6  
     7.7 -#define HVM_NR_PARAMS          20
     7.8 +/* Enable blocking memory events, async or sync (pause vcpu until response) 
     7.9 + * onchangeonly indicates messages only on a change of value */
    7.10 +#define HVM_PARAM_MEMORY_EVENT_CR0   20
    7.11 +#define HVM_PARAM_MEMORY_EVENT_CR3   21
    7.12 +#define HVM_PARAM_MEMORY_EVENT_CR4   22
    7.13 +#define HVM_PARAM_MEMORY_EVENT_INT3  23
    7.14 +
    7.15 +#define HVMPME_MODE_MASK       (3 << 0)
    7.16 +#define HVMPME_mode_disabled   0
    7.17 +#define HVMPME_mode_async      1
    7.18 +#define HVMPME_mode_sync       2
    7.19 +#define HVMPME_onchangeonly    (1 << 2)
    7.20 +
    7.21 +#define HVM_NR_PARAMS          24
    7.22  
    7.23  #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
     8.1 --- a/xen/include/public/mem_event.h	Fri Jan 07 11:54:45 2011 +0000
     8.2 +++ b/xen/include/public/mem_event.h	Fri Jan 07 11:54:48 2011 +0000
     8.3 @@ -37,6 +37,10 @@
     8.4  /* Reasons for the memory event request */
     8.5  #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */
     8.6  #define MEM_EVENT_REASON_VIOLATION   1    /* access violation, GFN is address */
     8.7 +#define MEM_EVENT_REASON_CR0         2    /* CR0 was hit: gfn is CR0 value */
     8.8 +#define MEM_EVENT_REASON_CR3         3    /* CR3 was hit: gfn is CR3 value */
     8.9 +#define MEM_EVENT_REASON_CR4         4    /* CR4 was hit: gfn is CR4 value */
    8.10 +#define MEM_EVENT_REASON_INT3        5    /* int3 was hit: gla/gfn are RIP */
    8.11  
    8.12  typedef struct mem_event_shared_page {
    8.13      uint32_t port;