debuggers.hg

changeset 22714:02efc054da7b

mem_access: mem event additions for access

* Adds an ACCESS memory event type, with RESUME as the action.

* Refactors the bits in the memory event to store whether the memory event
was a read, write, or execute (for access memory events only). I used
bits sparingly to keep the structure somewhat the same size.

* Modified VMX to report the needed information in its nested page fault.
SVM is not implemented in this patch series.

Signed-off-by: Joe Epstein <jepstein98@gmail.com>
Acked-by: Keir Fraser <keir@xen.org>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
author Joe Epstein <jepstein98@gmail.com>
date Fri Jan 07 11:54:40 2011 +0000 (2011-01-07)
parents f14b296d263f
children 8af5bab1bf43
files tools/xenpaging/xenpaging.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/Makefile xen/arch/x86/mm/mem_access.c xen/arch/x86/mm/mem_event.c xen/arch/x86/mm/p2m.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/mem_access.h xen/include/asm-x86/mem_event.h xen/include/asm-x86/p2m.h xen/include/public/domctl.h xen/include/public/mem_event.h
line diff
     1.1 --- a/tools/xenpaging/xenpaging.c	Fri Jan 07 11:54:36 2011 +0000
     1.2 +++ b/tools/xenpaging/xenpaging.c	Fri Jan 07 11:54:40 2011 +0000
     1.3 @@ -658,7 +658,7 @@ int main(int argc, char *argv[])
     1.4              {
     1.5                  DPRINTF("page already populated (domain = %d; vcpu = %d;"
     1.6                          " p2mt = %x;"
     1.7 -                        " gfn = %"PRIx64"; paused = %"PRId64")\n",
     1.8 +                        " gfn = %"PRIx64"; paused = %d)\n",
     1.9                          paging->mem_event.domain_id, req.vcpu_id,
    1.10                          req.p2mt,
    1.11                          req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED);
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Jan 07 11:54:36 2011 +0000
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Jan 07 11:54:40 2011 +0000
     2.3 @@ -61,6 +61,8 @@
     2.4  #include <public/hvm/ioreq.h>
     2.5  #include <public/version.h>
     2.6  #include <public/memory.h>
     2.7 +#include <asm/mem_event.h>
     2.8 +#include <public/mem_event.h>
     2.9  
    2.10  bool_t __read_mostly hvm_enabled;
    2.11  
    2.12 @@ -1086,14 +1088,64 @@ void hvm_triple_fault(void)
    2.13      domain_shutdown(v->domain, SHUTDOWN_reboot);
    2.14  }
    2.15  
    2.16 -bool_t hvm_hap_nested_page_fault(unsigned long gfn)
    2.17 +bool_t hvm_hap_nested_page_fault(unsigned long gpa,
    2.18 +                                 bool_t gla_valid,
    2.19 +                                 unsigned long gla,
    2.20 +                                 bool_t access_valid,
    2.21 +                                 bool_t access_r,
    2.22 +                                 bool_t access_w,
    2.23 +                                 bool_t access_x)
    2.24  {
    2.25 +    unsigned long gfn = gpa >> PAGE_SHIFT;
    2.26      p2m_type_t p2mt;
    2.27 +    p2m_access_t p2ma;
    2.28      mfn_t mfn;
    2.29      struct vcpu *v = current;
    2.30      struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
    2.31  
    2.32 -    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
    2.33 +    mfn = gfn_to_mfn_type_current(p2m, gfn, &p2mt, &p2ma, p2m_guest);
    2.34 +
    2.35 +    /* Check access permissions first, then handle faults */
    2.36 +    if ( access_valid && (mfn_x(mfn) != INVALID_MFN) )
    2.37 +    {
    2.38 +        int violation = 0;
    2.39 +        /* If the access is against the permissions, then send to mem_event */
    2.40 +        switch (p2ma) 
    2.41 +        {
    2.42 +        case p2m_access_n:
    2.43 +        default:
    2.44 +            violation = access_r || access_w || access_x;
    2.45 +            break;
    2.46 +        case p2m_access_r:
    2.47 +            violation = access_w || access_x;
    2.48 +            break;
    2.49 +        case p2m_access_w:
    2.50 +            violation = access_r || access_x;
    2.51 +            break;
    2.52 +        case p2m_access_x:
    2.53 +            violation = access_r || access_w;
    2.54 +            break;
    2.55 +        case p2m_access_rx:
    2.56 +        case p2m_access_rx2rw:
    2.57 +            violation = access_w;
    2.58 +            break;
    2.59 +        case p2m_access_wx:
    2.60 +            violation = access_r;
    2.61 +            break;
    2.62 +        case p2m_access_rw:
    2.63 +            violation = access_x;
    2.64 +            break;
    2.65 +        case p2m_access_rwx:
    2.66 +            break;
    2.67 +        }
    2.68 +
    2.69 +        if ( violation )
    2.70 +        {
    2.71 +            p2m_mem_access_check(gpa, gla_valid, gla, access_r, access_w, access_x);
    2.72 +
    2.73 +            return 1;
    2.74 +        }
    2.75 +    }
    2.76  
    2.77      /*
    2.78       * If this GFN is emulated MMIO or marked as read-only, pass the fault
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Jan 07 11:54:36 2011 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Jan 07 11:54:40 2011 +0000
     3.3 @@ -979,7 +979,7 @@ static void svm_do_nested_pgfault(paddr_
     3.4          __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     3.5      }
     3.6  
     3.7 -    if ( hvm_hap_nested_page_fault(gfn) )
     3.8 +    if ( hvm_hap_nested_page_fault(gpa, 0, ~0ull, 0, 0, 0, 0) )
     3.9          return;
    3.10  
    3.11      /* Everything else is an error. */
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Jan 07 11:54:36 2011 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Jan 07 11:54:40 2011 +0000
     4.3 @@ -2079,7 +2079,14 @@ static void ept_handle_violation(unsigne
     4.4          __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     4.5      }
     4.6  
     4.7 -    if ( hvm_hap_nested_page_fault(gfn) )
     4.8 +    if ( hvm_hap_nested_page_fault(gpa,
     4.9 +                                   qualification & EPT_GLA_VALID       ? 1 : 0,
    4.10 +                                   qualification & EPT_GLA_VALID
    4.11 +                                     ? __vmread(GUEST_LINEAR_ADDRESS) : ~0ull,
    4.12 +                                   1, /* access types are as follows */
    4.13 +                                   qualification & EPT_READ_VIOLATION  ? 1 : 0,
    4.14 +                                   qualification & EPT_WRITE_VIOLATION ? 1 : 0,
    4.15 +                                   qualification & EPT_EXEC_VIOLATION  ? 1 : 0) )
    4.16          return;
    4.17  
    4.18      /* Everything else is an error. */
     5.1 --- a/xen/arch/x86/mm/Makefile	Fri Jan 07 11:54:36 2011 +0000
     5.2 +++ b/xen/arch/x86/mm/Makefile	Fri Jan 07 11:54:40 2011 +0000
     5.3 @@ -9,6 +9,7 @@ obj-$(x86_64) += guest_walk_4.o
     5.4  obj-$(x86_64) += mem_event.o
     5.5  obj-$(x86_64) += mem_paging.o
     5.6  obj-$(x86_64) += mem_sharing.o
     5.7 +obj-$(x86_64) += mem_access.o
     5.8  
     5.9  guest_walk_%.o: guest_walk.c Makefile
    5.10  	$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/mm/mem_access.c	Fri Jan 07 11:54:40 2011 +0000
     6.3 @@ -0,0 +1,59 @@
     6.4 +/******************************************************************************
     6.5 + * arch/x86/mm/mem_access.c
     6.6 + *
     6.7 + * Memory access support.
     6.8 + *
     6.9 + * Copyright (c) 2011 Virtuata, Inc.
    6.10 + *
    6.11 + * This program is free software; you can redistribute it and/or modify
    6.12 + * it under the terms of the GNU General Public License as published by
    6.13 + * the Free Software Foundation; either version 2 of the License, or
    6.14 + * (at your option) any later version.
    6.15 + *
    6.16 + * This program is distributed in the hope that it will be useful,
    6.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    6.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    6.19 + * GNU General Public License for more details.
    6.20 + *
    6.21 + * You should have received a copy of the GNU General Public License
    6.22 + * along with this program; if not, write to the Free Software
    6.23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    6.24 + */
    6.25 +
    6.26 +
    6.27 +#include <asm/p2m.h>
    6.28 +#include <asm/mem_event.h>
    6.29 +
    6.30 +
    6.31 +int mem_access_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
    6.32 +                      XEN_GUEST_HANDLE(void) u_domctl)
    6.33 +{
    6.34 +    int rc;
    6.35 +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
    6.36 +
    6.37 +    switch( mec->op )
    6.38 +    {
    6.39 +    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME:
    6.40 +    {
    6.41 +        p2m_mem_access_resume(p2m);
    6.42 +        rc = 0;
    6.43 +    }
    6.44 +    break;
    6.45 +
    6.46 +    default:
    6.47 +        rc = -ENOSYS;
    6.48 +        break;
    6.49 +    }
    6.50 +
    6.51 +    return rc;
    6.52 +}
    6.53 +
    6.54 +
    6.55 +/*
    6.56 + * Local variables:
    6.57 + * mode: C
    6.58 + * c-set-style: "BSD"
    6.59 + * c-basic-offset: 4
    6.60 + * indent-tabs-mode: nil
    6.61 + * End:
    6.62 + */
     7.1 --- a/xen/arch/x86/mm/mem_event.c	Fri Jan 07 11:54:36 2011 +0000
     7.2 +++ b/xen/arch/x86/mm/mem_event.c	Fri Jan 07 11:54:40 2011 +0000
     7.3 @@ -26,6 +26,7 @@
     7.4  #include <asm/p2m.h>
     7.5  #include <asm/mem_event.h>
     7.6  #include <asm/mem_paging.h>
     7.7 +#include <asm/mem_access.h>
     7.8  
     7.9  /* for public/io/ring.h macros */
    7.10  #define xen_mb()   mb()
    7.11 @@ -67,6 +68,9 @@ static int mem_event_enable(struct domai
    7.12  
    7.13      mem_event_ring_lock_init(d);
    7.14  
    7.15 +    /* Wake any VCPUs paused for memory events */
    7.16 +    mem_event_unpause_vcpus(d);
    7.17 +
    7.18      return 0;
    7.19  
    7.20   err_shared:
    7.21 @@ -143,12 +147,21 @@ void mem_event_unpause_vcpus(struct doma
    7.22              vcpu_wake(v);
    7.23  }
    7.24  
    7.25 +void mem_event_mark_and_pause(struct vcpu *v)
    7.26 +{
    7.27 +    set_bit(_VPF_mem_event, &v->pause_flags);
    7.28 +    vcpu_sleep_nosync(v);
    7.29 +}
    7.30 +
    7.31  int mem_event_check_ring(struct domain *d)
    7.32  {
    7.33      struct vcpu *curr = current;
    7.34      int free_requests;
    7.35      int ring_full;
    7.36  
    7.37 +    if ( !d->mem_event.ring_page )
    7.38 +        return -1;
    7.39 +
    7.40      mem_event_ring_lock(d);
    7.41  
    7.42      free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
    7.43 @@ -157,7 +170,7 @@ int mem_event_check_ring(struct domain *
    7.44          gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
    7.45          WARN_ON(free_requests == 0);
    7.46      }
    7.47 -    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
    7.48 +    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;
    7.49  
    7.50      if ( (curr->domain->domain_id == d->domain_id) && ring_full )
    7.51      {
    7.52 @@ -203,7 +216,11 @@ int mem_event_domctl(struct domain *d, x
    7.53          return rc;
    7.54  #endif
    7.55  
    7.56 -    if ( mec->mode == 0 )
    7.57 +    rc = -ENOSYS;
    7.58 +
    7.59 +    switch ( mec-> mode ) 
    7.60 +    {
    7.61 +    case 0:
    7.62      {
    7.63          switch( mec->op )
    7.64          {
    7.65 @@ -268,13 +285,18 @@ int mem_event_domctl(struct domain *d, x
    7.66              rc = -ENOSYS;
    7.67              break;
    7.68          }
    7.69 +        break;
    7.70      }
    7.71 -    else
    7.72 +    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
    7.73      {
    7.74 -        rc = -ENOSYS;
    7.75 -
    7.76 -        if ( mec->mode & XEN_DOMCTL_MEM_EVENT_OP_PAGING )
    7.77 -            rc = mem_paging_domctl(d, mec, u_domctl);
    7.78 +        rc = mem_paging_domctl(d, mec, u_domctl);
    7.79 +        break;
    7.80 +    }
    7.81 +    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
    7.82 +    {
    7.83 +        rc = mem_access_domctl(d, mec, u_domctl);
    7.84 +        break;
    7.85 +    }
    7.86      }
    7.87  
    7.88      return rc;
     8.1 --- a/xen/arch/x86/mm/p2m.c	Fri Jan 07 11:54:36 2011 +0000
     8.2 +++ b/xen/arch/x86/mm/p2m.c	Fri Jan 07 11:54:40 2011 +0000
     8.3 @@ -2858,6 +2858,97 @@ void p2m_mem_paging_resume(struct p2m_do
     8.4  }
     8.5  #endif /* __x86_64__ */
     8.6  
     8.7 +void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla, 
     8.8 +                          bool_t access_r, bool_t access_w, bool_t access_x)
     8.9 +{
    8.10 +    struct vcpu *v = current;
    8.11 +    mem_event_request_t req;
    8.12 +    unsigned long gfn = gpa >> PAGE_SHIFT;
    8.13 +    struct domain *d = v->domain;    
    8.14 +    struct p2m_domain* p2m = p2m_get_hostp2m(d);
    8.15 +    int res;
    8.16 +    mfn_t mfn;
    8.17 +    p2m_type_t p2mt;
    8.18 +    p2m_access_t p2ma;
    8.19 +    
    8.20 +    /* First, handle rx2rw conversion automatically */
    8.21 +    p2m_lock(p2m);
    8.22 +    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query);
    8.23 +
    8.24 +    if ( access_w && p2ma == p2m_access_rx2rw ) 
    8.25 +    {
    8.26 +        p2m->set_entry(p2m, gfn, mfn, 0, p2mt, p2m_access_rw);
    8.27 +        p2m_unlock(p2m);
    8.28 +        return;
    8.29 +    }
    8.30 +    p2m_unlock(p2m);
    8.31 +
    8.32 +    /* Otherwise, check if there is a memory event listener, and send the message along */
    8.33 +    res = mem_event_check_ring(d);
    8.34 +    if ( res < 0 ) 
    8.35 +    {
    8.36 +        /* No listener */
    8.37 +        if ( p2m->access_required ) 
    8.38 +        {
    8.39 +            printk(XENLOG_INFO 
    8.40 +                   "Memory access permissions failure, no mem_event listener: pausing VCPU %d, dom %d\n",
    8.41 +                   v->vcpu_id, d->domain_id);
    8.42 +
    8.43 +            mem_event_mark_and_pause(v);
    8.44 +        }
    8.45 +        else
    8.46 +        {
    8.47 +            /* A listener is not required, so clear the access restrictions */
    8.48 +            p2m_lock(p2m);
    8.49 +            p2m->set_entry(p2m, gfn, mfn, 0, p2mt, p2m_access_rwx);
    8.50 +            p2m_unlock(p2m);
    8.51 +        }
    8.52 +
    8.53 +        return;
    8.54 +    }
    8.55 +    else if ( res > 0 )
    8.56 +        return;  /* No space in buffer; VCPU paused */
    8.57 +
    8.58 +    memset(&req, 0, sizeof(req));
    8.59 +    req.type = MEM_EVENT_TYPE_ACCESS;
    8.60 +    req.reason = MEM_EVENT_REASON_VIOLATION;
    8.61 +
    8.62 +    /* Pause the current VCPU unconditionally */
    8.63 +    vcpu_pause_nosync(v);
    8.64 +    req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;    
    8.65 +
    8.66 +    /* Send request to mem event */
    8.67 +    req.gfn = gfn;
    8.68 +    req.offset = gpa & ((1 << PAGE_SHIFT) - 1);
    8.69 +    req.gla_valid = gla_valid;
    8.70 +    req.gla = gla;
    8.71 +    req.access_r = access_r;
    8.72 +    req.access_w = access_w;
    8.73 +    req.access_x = access_x;
    8.74 +    
    8.75 +    req.vcpu_id = v->vcpu_id;
    8.76 +
    8.77 +    mem_event_put_request(d, &req);   
    8.78 +
    8.79 +    /* VCPU paused, mem event request sent */
    8.80 +}
    8.81 +
    8.82 +void p2m_mem_access_resume(struct p2m_domain *p2m)
    8.83 +{
    8.84 +    struct domain *d = p2m->domain;
    8.85 +    mem_event_response_t rsp;
    8.86 +
    8.87 +    mem_event_get_response(d, &rsp);
    8.88 +
    8.89 +    /* Unpause domain */
    8.90 +    if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
    8.91 +        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
    8.92 +
    8.93 +    /* Unpause any domains that were paused because the ring was full or no listener 
    8.94 +     * was available */
    8.95 +    mem_event_unpause_vcpus(d);
    8.96 +}
    8.97 +
    8.98  /*
    8.99   * Local variables:
   8.100   * mode: C
     9.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Jan 07 11:54:36 2011 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Jan 07 11:54:40 2011 +0000
     9.3 @@ -356,7 +356,12 @@ static inline void hvm_set_info_guest(st
     9.4  
     9.5  int hvm_debug_op(struct vcpu *v, int32_t op);
     9.6  
     9.7 -bool_t hvm_hap_nested_page_fault(unsigned long gfn);
     9.8 +bool_t hvm_hap_nested_page_fault(unsigned long gpa,
     9.9 +                                 bool_t gla_valid, unsigned long gla,
    9.10 +                                 bool_t access_valid, 
    9.11 +                                 bool_t access_r,
    9.12 +                                 bool_t access_w,
    9.13 +                                 bool_t access_x);
    9.14  
    9.15  #define hvm_msr_tsc_aux(v) ({                                               \
    9.16      struct domain *__d = (v)->domain;                                       \
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/include/asm-x86/mem_access.h	Fri Jan 07 11:54:40 2011 +0000
    10.3 @@ -0,0 +1,35 @@
    10.4 +/******************************************************************************
    10.5 + * include/asm-x86/mem_paging.h
    10.6 + *
    10.7 + * Memory access support.
    10.8 + *
    10.9 + * Copyright (c) 2011 Virtuata, Inc.
   10.10 + *
   10.11 + * This program is free software; you can redistribute it and/or modify
   10.12 + * it under the terms of the GNU General Public License as published by
   10.13 + * the Free Software Foundation; either version 2 of the License, or
   10.14 + * (at your option) any later version.
   10.15 + *
   10.16 + * This program is distributed in the hope that it will be useful,
   10.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   10.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   10.19 + * GNU General Public License for more details.
   10.20 + *
   10.21 + * You should have received a copy of the GNU General Public License
   10.22 + * along with this program; if not, write to the Free Software
   10.23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   10.24 + */
   10.25 +
   10.26 +
   10.27 +int mem_access_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
   10.28 +                      XEN_GUEST_HANDLE(void) u_domctl);
   10.29 +
   10.30 +
   10.31 +/*
   10.32 + * Local variables:
   10.33 + * mode: C
   10.34 + * c-set-style: "BSD"
   10.35 + * c-basic-offset: 4
   10.36 + * indent-tabs-mode: nil
   10.37 + * End:
   10.38 + */
    11.1 --- a/xen/include/asm-x86/mem_event.h	Fri Jan 07 11:54:36 2011 +0000
    11.2 +++ b/xen/include/asm-x86/mem_event.h	Fri Jan 07 11:54:40 2011 +0000
    11.3 @@ -24,6 +24,8 @@
    11.4  #ifndef __MEM_EVENT_H__
    11.5  #define __MEM_EVENT_H__
    11.6  
    11.7 +/* Pauses VCPU while marking pause flag for mem event */
    11.8 +void mem_event_mark_and_pause(struct vcpu *v);
    11.9  int mem_event_check_ring(struct domain *d);
   11.10  void mem_event_put_request(struct domain *d, mem_event_request_t *req);
   11.11  void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
    12.1 --- a/xen/include/asm-x86/p2m.h	Fri Jan 07 11:54:36 2011 +0000
    12.2 +++ b/xen/include/asm-x86/p2m.h	Fri Jan 07 11:54:40 2011 +0000
    12.3 @@ -522,6 +522,13 @@ static inline void p2m_mem_paging_popula
    12.4  { }
    12.5  #endif
    12.6  
    12.7 +/* Send mem event based on the access (gla is -1ull if not available).  Handles
    12.8 + * the rw2rx conversion */
    12.9 +void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla, 
   12.10 +                          bool_t access_r, bool_t access_w, bool_t access_x);
   12.11 +/* Resumes the running of the VCPU, restarting the last instruction */
   12.12 +void p2m_mem_access_resume(struct p2m_domain *p2m);
   12.13 +
   12.14  struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
   12.15  
   12.16  #endif /* _XEN_P2M_H */
    13.1 --- a/xen/include/public/domctl.h	Fri Jan 07 11:54:36 2011 +0000
    13.2 +++ b/xen/include/public/domctl.h	Fri Jan 07 11:54:40 2011 +0000
    13.3 @@ -714,7 +714,7 @@ struct xen_domctl_gdbsx_domstatus {
    13.4  /*
    13.5   * Page memory in and out. 
    13.6   */
    13.7 -#define XEN_DOMCTL_MEM_EVENT_OP_PAGING (1 << 0)
    13.8 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING            1
    13.9  
   13.10  /* Domain memory paging */
   13.11  #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE   0
   13.12 @@ -722,6 +722,19 @@ struct xen_domctl_gdbsx_domstatus {
   13.13  #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP       2
   13.14  #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME     3
   13.15  
   13.16 +/*
   13.17 + * Access permissions.
   13.18 + *
   13.19 + * There are HVM hypercalls to set the per-page access permissions of every
   13.20 + * page in a domain.  When one of these permissions--independent, read, 
   13.21 + * write, and execute--is violated, the VCPU is paused and a memory event 
   13.22 + * is sent with what happened.  (See public/mem_event.h)  The memory event 
   13.23 + * handler can then resume the VCPU and redo the access with an 
   13.24 + * ACCESS_RESUME mode for the following domctl.
   13.25 + */
   13.26 +#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS            2
   13.27 +#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME     0 
   13.28 +
   13.29  struct xen_domctl_mem_event_op {
   13.30      uint32_t       op;           /* XEN_DOMCTL_MEM_EVENT_OP_* */
   13.31      uint32_t       mode;         /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */
    14.1 --- a/xen/include/public/mem_event.h	Fri Jan 07 11:54:36 2011 +0000
    14.2 +++ b/xen/include/public/mem_event.h	Fri Jan 07 11:54:40 2011 +0000
    14.3 @@ -26,18 +26,40 @@
    14.4  #include "xen.h"
    14.5  #include "io/ring.h"
    14.6  
    14.7 +/* Memory event type */
    14.8 +#define MEM_EVENT_TYPE_SHARED   0
    14.9 +#define MEM_EVENT_TYPE_PAGING   1
   14.10 +#define MEM_EVENT_TYPE_ACCESS   2
   14.11 +
   14.12  /* Memory event flags */
   14.13  #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
   14.14  
   14.15 +/* Reasons for the memory event request */
   14.16 +#define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */
   14.17 +#define MEM_EVENT_REASON_VIOLATION   1    /* access violation, GFN is address */
   14.18 +
   14.19  typedef struct mem_event_shared_page {
   14.20      uint32_t port;
   14.21  } mem_event_shared_page_t;
   14.22  
   14.23  typedef struct mem_event_st {
   14.24 +    uint16_t type;
   14.25 +    uint16_t flags;
   14.26 +    uint32_t vcpu_id;
   14.27 +
   14.28      uint64_t gfn;
   14.29 +    uint64_t offset;
   14.30 +    uint64_t gla; /* if gla_valid */
   14.31 +
   14.32      uint32_t p2mt;
   14.33 -    uint32_t vcpu_id;
   14.34 -    uint64_t flags;
   14.35 +
   14.36 +    uint16_t access_r:1;
   14.37 +    uint16_t access_w:1;
   14.38 +    uint16_t access_x:1;
   14.39 +    uint16_t gla_valid:1;
   14.40 +    uint16_t available:12;
   14.41 +
   14.42 +    uint16_t reason;
   14.43  } mem_event_request_t, mem_event_response_t;
   14.44  
   14.45  DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);