debuggers.hg

changeset 20924:6ade83cb21ca

xentrace: Trace p2m events

Add more tracing to aid in debugging ballooning / PoD:
* Nested page faults for EPT/NPT systems
* set_p2m_enry
* Decrease reservation (for ballooning)
* PoD populate, zero reclaim, superpage splinter

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 03 09:35:23 2010 +0000 (2010-02-03)
parents 3312e31dcdeb
children 2a07df55c08a
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/p2m.c xen/common/memory.c xen/include/public/trace.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Feb 03 09:33:12 2010 +0000
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Feb 03 09:35:23 2010 +0000
     1.3 @@ -893,6 +893,22 @@ static void svm_do_nested_pgfault(paddr_
     1.4      mfn_t mfn;
     1.5      p2m_type_t p2mt;
     1.6  
     1.7 +    if ( tb_init_done )
     1.8 +    {
     1.9 +        struct {
    1.10 +            uint64_t gpa;
    1.11 +            uint64_t mfn;
    1.12 +            u32 qualification;
    1.13 +            u32 p2mt;
    1.14 +        } _d;
    1.15 +
    1.16 +        _d.gpa = gpa;
    1.17 +        _d.qualification = 0;
    1.18 +        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
    1.19 +        
    1.20 +        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
    1.21 +    }
    1.22 +
    1.23      if ( hvm_hap_nested_page_fault(gfn) )
    1.24          return;
    1.25  
     2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 03 09:33:12 2010 +0000
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 03 09:35:23 2010 +0000
     2.3 @@ -2100,6 +2100,22 @@ static void ept_handle_violation(unsigne
     2.4      mfn_t mfn;
     2.5      p2m_type_t p2mt;
     2.6  
     2.7 +    if ( tb_init_done )
     2.8 +    {
     2.9 +        struct {
    2.10 +            uint64_t gpa;
    2.11 +            uint64_t mfn;
    2.12 +            u32 qualification;
    2.13 +            u32 p2mt;
    2.14 +        } _d;
    2.15 +
    2.16 +        _d.gpa = gpa;
    2.17 +        _d.qualification = qualification;
    2.18 +        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
    2.19 +        
    2.20 +        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
    2.21 +    }
    2.22 +
    2.23      if ( (qualification & EPT_GLA_VALID) &&
    2.24           hvm_hap_nested_page_fault(gfn) )
    2.25          return;
     3.1 --- a/xen/arch/x86/mm/p2m.c	Wed Feb 03 09:33:12 2010 +0000
     3.2 +++ b/xen/arch/x86/mm/p2m.c	Wed Feb 03 09:35:23 2010 +0000
     3.3 @@ -829,6 +829,21 @@ p2m_pod_zero_check_superpage(struct doma
     3.4              goto out_reset;
     3.5      }
     3.6  
     3.7 +    if ( tb_init_done )
     3.8 +    {
     3.9 +        struct {
    3.10 +            u64 gfn, mfn;
    3.11 +            int d:16,order:16;
    3.12 +        } t;
    3.13 +
    3.14 +        t.gfn = gfn;
    3.15 +        t.mfn = mfn_x(mfn);
    3.16 +        t.d = d->domain_id;
    3.17 +        t.order = 9;
    3.18 +
    3.19 +        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t);
    3.20 +    }
    3.21 +
    3.22      /* Finally!  We've passed all the checks, and can add the mfn superpage
    3.23       * back on the PoD cache, and account for the new p2m PoD entries */
    3.24      p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
    3.25 @@ -928,6 +943,21 @@ p2m_pod_zero_check(struct domain *d, uns
    3.26          }
    3.27          else
    3.28          {
    3.29 +            if ( tb_init_done )
    3.30 +            {
    3.31 +                struct {
    3.32 +                    u64 gfn, mfn;
    3.33 +                    int d:16,order:16;
    3.34 +                } t;
    3.35 +
    3.36 +                t.gfn = gfns[i];
    3.37 +                t.mfn = mfn_x(mfns[i]);
    3.38 +                t.d = d->domain_id;
    3.39 +                t.order = 0;
    3.40 +        
    3.41 +                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t);
    3.42 +            }
    3.43 +
    3.44              /* Add to cache, and account for the new p2m PoD entry */
    3.45              p2m_pod_cache_add(d, mfn_to_page(mfns[i]), 0);
    3.46              d->arch.p2m->pod.entry_count++;
    3.47 @@ -1073,6 +1103,21 @@ p2m_pod_demand_populate(struct domain *d
    3.48      p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
    3.49      BUG_ON(p2md->pod.entry_count < 0);
    3.50  
    3.51 +    if ( tb_init_done )
    3.52 +    {
    3.53 +        struct {
    3.54 +            u64 gfn, mfn;
    3.55 +            int d:16,order:16;
    3.56 +        } t;
    3.57 +
    3.58 +        t.gfn = gfn;
    3.59 +        t.mfn = mfn_x(mfn);
    3.60 +        t.d = d->domain_id;
    3.61 +        t.order = order;
    3.62 +        
    3.63 +        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
    3.64 +    }
    3.65 +
    3.66      return 0;
    3.67  out_of_memory:
    3.68      spin_unlock(&d->page_alloc_lock);
    3.69 @@ -1091,6 +1136,18 @@ remap_and_retry:
    3.70      for(i=0; i<(1<<order); i++)
    3.71          set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
    3.72                        p2m_populate_on_demand);
    3.73 +    if ( tb_init_done )
    3.74 +    {
    3.75 +        struct {
    3.76 +            u64 gfn;
    3.77 +            int d:16;
    3.78 +        } t;
    3.79 +
    3.80 +        t.gfn = gfn;
    3.81 +        t.d = d->domain_id;
    3.82 +        
    3.83 +        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned char *)&t);
    3.84 +    }
    3.85  
    3.86      return 0;
    3.87  }
    3.88 @@ -1141,6 +1198,23 @@ p2m_set_entry(struct domain *d, unsigned
    3.89      l2_pgentry_t l2e_content;
    3.90      int rv=0;
    3.91  
    3.92 +    if ( tb_init_done )
    3.93 +    {
    3.94 +        struct {
    3.95 +            u64 gfn, mfn;
    3.96 +            int p2mt;
    3.97 +            int d:16,order:16;
    3.98 +        } t;
    3.99 +
   3.100 +        t.gfn = gfn;
   3.101 +        t.mfn = mfn_x(mfn);
   3.102 +        t.p2mt = p2mt;
   3.103 +        t.d = d->domain_id;
   3.104 +        t.order = page_order;
   3.105 +
   3.106 +        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
   3.107 +    }
   3.108 +
   3.109  #if CONFIG_PAGING_LEVELS >= 4
   3.110      if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
   3.111                           L4_PAGETABLE_SHIFT - PAGE_SHIFT,
   3.112 @@ -1225,7 +1299,7 @@ p2m_set_entry(struct domain *d, unsigned
   3.113      /* Success */
   3.114      rv = 1;
   3.115  
   3.116 - out:
   3.117 +out:
   3.118      unmap_domain_page(table);
   3.119      return rv;
   3.120  }
     4.1 --- a/xen/common/memory.c	Wed Feb 03 09:33:12 2010 +0000
     4.2 +++ b/xen/common/memory.c	Wed Feb 03 09:35:23 2010 +0000
     4.3 @@ -28,6 +28,7 @@
     4.4  #include <xen/numa.h>
     4.5  #include <public/memory.h>
     4.6  #include <xsm/xsm.h>
     4.7 +#include <xen/trace.h>
     4.8  
     4.9  struct memop_args {
    4.10      /* INPUT */
    4.11 @@ -222,6 +223,20 @@ static void decrease_reservation(struct 
    4.12          if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
    4.13              goto out;
    4.14  
    4.15 +        if ( tb_init_done )
    4.16 +        {
    4.17 +            struct {
    4.18 +                u64 gfn;
    4.19 +                int d:16,order:16;
    4.20 +            } t;
    4.21 +
    4.22 +            t.gfn = gmfn;
    4.23 +            t.d = a->domain->domain_id;
    4.24 +            t.order = a->extent_order;
    4.25 +        
    4.26 +            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned char *)&t);
    4.27 +        }
    4.28 +
    4.29          /* See if populate-on-demand wants to handle this */
    4.30          if ( is_hvm_domain(a->domain)
    4.31               && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
     5.1 --- a/xen/include/public/trace.h	Wed Feb 03 09:33:12 2010 +0000
     5.2 +++ b/xen/include/public/trace.h	Wed Feb 03 09:35:23 2010 +0000
     5.3 @@ -82,6 +82,12 @@
     5.4  #define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
     5.5  #define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
     5.6  #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
     5.7 +#define TRC_MEM_SET_P2M_ENTRY       (TRC_MEM + 4)
     5.8 +#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
     5.9 +#define TRC_MEM_POD_POPULATE        (TRC_MEM + 16)
    5.10 +#define TRC_MEM_POD_ZERO_RECLAIM    (TRC_MEM + 17)
    5.11 +#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
    5.12 +
    5.13  
    5.14  #define TRC_PV_HYPERCALL             (TRC_PV +  1)
    5.15  #define TRC_PV_TRAP                  (TRC_PV +  3)
    5.16 @@ -149,6 +155,8 @@
    5.17  #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
    5.18  #define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
    5.19  #define TRC_HVM_INTR_WINDOW     (TRC_HVM_HANDLER + 0x20)
    5.20 +#define TRC_HVM_NPF             (TRC_HVM_HANDLER + 0x21)
    5.21 +
    5.22  #define TRC_HVM_IOPORT_WRITE    (TRC_HVM_HANDLER + 0x216)
    5.23  #define TRC_HVM_IOMEM_WRITE     (TRC_HVM_HANDLER + 0x217)
    5.24