debuggers.hg

changeset 22751:c5b42971234a

xenpaging: drop paged pages in guest_remove_page

Simply drop paged-pages in guest_remove_page(), and notify xenpaging
to drop its reference to the gfn. If the ring is full, the page will
remain in paged-out state in xenpaging. This is not an issue, it just
means this gfn will not be nominated again.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
author Keir Fraser <keir@xen.org>
date Tue Jan 11 10:38:28 2011 +0000 (2011-01-11)
parents ca590ccc7a0b
children ca10302ac285
files tools/xenpaging/xenpaging.c xen/arch/x86/mm/p2m.c xen/common/memory.c xen/include/asm-x86/p2m.h xen/include/public/mem_event.h
line diff
     1.1 --- a/tools/xenpaging/xenpaging.c	Tue Jan 11 10:37:45 2011 +0000
     1.2 +++ b/tools/xenpaging/xenpaging.c	Tue Jan 11 10:38:28 2011 +0000
     1.3 @@ -638,25 +638,34 @@ int main(int argc, char *argv[])
     1.4                      goto out;
     1.5                  }
     1.6                  
     1.7 -                /* Populate the page */
     1.8 -                rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
     1.9 -                if ( rc != 0 )
    1.10 +                if ( req.flags & MEM_EVENT_FLAG_DROP_PAGE )
    1.11 +                {
    1.12 +                    DPRINTF("drop_page ^ gfn %"PRIx64" pageslot %d\n", req.gfn, i);
    1.13 +                    /* Notify policy of page being dropped */
    1.14 +                    policy_notify_paged_in(req.gfn);
    1.15 +                }
    1.16 +                else
    1.17                  {
    1.18 -                    ERROR("Error populating page");
    1.19 -                    goto out;
    1.20 -                }
    1.21 +                    /* Populate the page */
    1.22 +                    rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
    1.23 +                    if ( rc != 0 )
    1.24 +                    {
    1.25 +                        ERROR("Error populating page");
    1.26 +                        goto out;
    1.27 +                    }
    1.28  
    1.29 -                /* Prepare the response */
    1.30 -                rsp.gfn = req.gfn;
    1.31 -                rsp.p2mt = req.p2mt;
    1.32 -                rsp.vcpu_id = req.vcpu_id;
    1.33 -                rsp.flags = req.flags;
    1.34 +                    /* Prepare the response */
    1.35 +                    rsp.gfn = req.gfn;
    1.36 +                    rsp.p2mt = req.p2mt;
    1.37 +                    rsp.vcpu_id = req.vcpu_id;
    1.38 +                    rsp.flags = req.flags;
    1.39  
    1.40 -                rc = xenpaging_resume_page(paging, &rsp, 1);
    1.41 -                if ( rc != 0 )
    1.42 -                {
    1.43 -                    ERROR("Error resuming page");
    1.44 -                    goto out;
    1.45 +                    rc = xenpaging_resume_page(paging, &rsp, 1);
    1.46 +                    if ( rc != 0 )
    1.47 +                    {
    1.48 +                        ERROR("Error resuming page");
    1.49 +                        goto out;
    1.50 +                    }
    1.51                  }
    1.52  
    1.53                  /* Evict a new page to replace the one we just paged in */
     2.1 --- a/xen/arch/x86/mm/p2m.c	Tue Jan 11 10:37:45 2011 +0000
     2.2 +++ b/xen/arch/x86/mm/p2m.c	Tue Jan 11 10:38:28 2011 +0000
     2.3 @@ -2211,12 +2211,15 @@ p2m_remove_page(struct p2m_domain *p2m, 
     2.4  
     2.5      P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
     2.6  
     2.7 -    for ( i = 0; i < (1UL << page_order); i++ )
     2.8 +    if ( mfn_valid(_mfn(mfn)) )
     2.9      {
    2.10 -        mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
    2.11 -        if ( !p2m_is_grant(t) )
    2.12 -            set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
    2.13 -        ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
    2.14 +        for ( i = 0; i < (1UL << page_order); i++ )
    2.15 +        {
    2.16 +            mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
    2.17 +            if ( !p2m_is_grant(t) )
    2.18 +                set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
    2.19 +            ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
    2.20 +        }
    2.21      }
    2.22      set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access);
    2.23  }
    2.24 @@ -2772,6 +2775,25 @@ int p2m_mem_paging_evict(struct p2m_doma
    2.25      return 0;
    2.26  }
    2.27  
    2.28 +void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
    2.29 +{
    2.30 +    struct vcpu *v = current;
    2.31 +    mem_event_request_t req;
    2.32 +    struct domain *d = p2m->domain;
    2.33 +
    2.34 +    /* Check that there's space on the ring for this request */
    2.35 +    if ( mem_event_check_ring(d) == 0)
    2.36 +    {
    2.37 +        /* Send release notification to pager */
    2.38 +        memset(&req, 0, sizeof(req));
    2.39 +        req.flags |= MEM_EVENT_FLAG_DROP_PAGE;
    2.40 +        req.gfn = gfn;
    2.41 +        req.vcpu_id = v->vcpu_id;
    2.42 +
    2.43 +        mem_event_put_request(d, &req);
    2.44 +    }
    2.45 +}
    2.46 +
    2.47  void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
    2.48  {
    2.49      struct vcpu *v = current;
    2.50 @@ -2846,13 +2868,16 @@ void p2m_mem_paging_resume(struct p2m_do
    2.51      /* Pull the response off the ring */
    2.52      mem_event_get_response(d, &rsp);
    2.53  
    2.54 -    /* Fix p2m entry */
    2.55 -    mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
    2.56 -    p2m_lock(p2m);
    2.57 -    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
    2.58 -    set_gpfn_from_mfn(mfn_x(mfn), gfn);
    2.59 -    audit_p2m(p2m, 1);
    2.60 -    p2m_unlock(p2m);
    2.61 +    /* Fix p2m entry if the page was not dropped */
    2.62 +    if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
    2.63 +    {
    2.64 +        mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
    2.65 +        p2m_lock(p2m);
    2.66 +        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
    2.67 +        set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
    2.68 +        audit_p2m(p2m, 1);
    2.69 +        p2m_unlock(p2m);
    2.70 +    }
    2.71  
    2.72      /* Unpause domain */
    2.73      if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
     3.1 --- a/xen/common/memory.c	Tue Jan 11 10:37:45 2011 +0000
     3.2 +++ b/xen/common/memory.c	Tue Jan 11 10:38:28 2011 +0000
     3.3 @@ -163,6 +163,12 @@ int guest_remove_page(struct domain *d, 
     3.4  
     3.5  #ifdef CONFIG_X86
     3.6      mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); 
     3.7 +    if ( unlikely(p2m_is_paging(p2mt)) )
     3.8 +    {
     3.9 +        guest_physmap_remove_page(d, gmfn, mfn, 0);
    3.10 +        p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
    3.11 +        return 1;
    3.12 +    }
    3.13  #else
    3.14      mfn = gmfn_to_mfn(d, gmfn);
    3.15  #endif
     4.1 --- a/xen/include/asm-x86/p2m.h	Tue Jan 11 10:37:45 2011 +0000
     4.2 +++ b/xen/include/asm-x86/p2m.h	Tue Jan 11 10:38:28 2011 +0000
     4.3 @@ -511,6 +511,8 @@ int set_shared_p2m_entry(struct p2m_doma
     4.4  int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
     4.5  /* Evict a frame */
     4.6  int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
     4.7 +/* Tell xenpaging to drop a paged out frame */
     4.8 +void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
     4.9  /* Start populating a paged out frame */
    4.10  void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
    4.11  /* Prepare the p2m for paging a frame in */
    4.12 @@ -518,6 +520,8 @@ int p2m_mem_paging_prep(struct p2m_domai
    4.13  /* Resume normal operation (in case a domain was paused) */
    4.14  void p2m_mem_paging_resume(struct p2m_domain *p2m);
    4.15  #else
    4.16 +static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
    4.17 +{ }
    4.18  static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
    4.19  { }
    4.20  #endif
     5.1 --- a/xen/include/public/mem_event.h	Tue Jan 11 10:37:45 2011 +0000
     5.2 +++ b/xen/include/public/mem_event.h	Tue Jan 11 10:38:28 2011 +0000
     5.3 @@ -33,6 +33,7 @@
     5.4  
     5.5  /* Memory event flags */
     5.6  #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
     5.7 +#define MEM_EVENT_FLAG_DROP_PAGE    (1 << 1)
     5.8  
     5.9  /* Reasons for the memory event request */
    5.10  #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */