debuggers.hg
changeset 9849:4e8a64d8bd0e
[IA64] regionreg.c: deallocate metaphysical rids
allocate_rid_range also allocates metaphysical rids.
deallocate_rid_range also deallocates mp rids.
init_rid_allocator() added.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
allocate_rid_range also allocates metaphysical rids.
deallocate_rid_range also deallocates mp rids.
init_rid_allocator() added.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author | awilliam@xenbuild.aw |
---|---|
date | Fri Apr 14 14:13:13 2006 -0600 (2006-04-14) |
parents | 918ce6a565b7 |
children | 96bc87dd7ca9 |
files | xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/regionreg.c xen/arch/ia64/xen/xensetup.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/regionreg.h |
line diff
1.1 --- a/xen/arch/ia64/xen/domain.c Thu Apr 13 14:57:13 2006 -0600 1.2 +++ b/xen/arch/ia64/xen/domain.c Fri Apr 14 14:13:13 2006 -0600 1.3 @@ -202,6 +202,18 @@ struct vcpu *alloc_vcpu_struct(struct do 1.4 v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4; 1.5 v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0; 1.6 v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4; 1.7 + 1.8 + /* Is it correct ? 1.9 + It depends on the domain rid usage. 1.10 + 1.11 + A domain may share rid among its processor (eg having a 1.12 + global VHPT). In this case, we should also share rid 1.13 + among vcpus and the rid range should be the same. 1.14 + 1.15 + However a domain may have per cpu rid allocation. In 1.16 + this case we don't want to share rid among vcpus, but we may 1.17 + do it if two vcpus are on the same cpu... */ 1.18 + 1.19 v->arch.starting_rid = d->arch.starting_rid; 1.20 v->arch.ending_rid = d->arch.ending_rid; 1.21 v->arch.breakimm = d->arch.breakimm; 1.22 @@ -259,12 +271,8 @@ int arch_domain_create(struct domain *d) 1.23 * to see guest issue uncacheable access in metaphysical mode. But 1.24 * keep such info here may be more sane. 1.25 */ 1.26 - if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL) 1.27 - || ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL)) 1.28 - BUG(); 1.29 -#define DOMAIN_RID_BITS_DEFAULT 18 1.30 - if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME 1.31 - BUG(); 1.32 + if (!allocate_rid_range(d,0)) 1.33 + goto fail_nomem; 1.34 d->arch.breakimm = 0x1000; 1.35 d->arch.sys_pgnr = 0; 1.36
2.1 --- a/xen/arch/ia64/xen/regionreg.c Thu Apr 13 14:57:13 2006 -0600 2.2 +++ b/xen/arch/ia64/xen/regionreg.c Fri Apr 14 14:13:13 2006 -0600 2.3 @@ -21,6 +21,8 @@ extern void *pal_vaddr; 2.4 /* FIXME: where these declarations should be there ? */ 2.5 extern void panic_domain(struct pt_regs *, const char *, ...); 2.6 2.7 +#define DOMAIN_RID_BITS_DEFAULT 18 2.8 + 2.9 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1) 2.10 #define IA64_MAX_IMPL_RID_BITS 24 2.11 2.12 @@ -51,26 +53,12 @@ ia64_set_rr (unsigned long rr, unsigned 2.13 } 2.14 #endif 2.15 2.16 -// use this to allocate a rid out of the "Xen reserved rid block" 2.17 -static unsigned long allocate_reserved_rid(void) 2.18 -{ 2.19 - static unsigned long currentrid = XEN_DEFAULT_RID+1; 2.20 - unsigned long t = currentrid; 2.21 - 2.22 - unsigned long max = RIDS_PER_RIDBLOCK; 2.23 - 2.24 - if (++currentrid >= max) return(-1UL); 2.25 - return t; 2.26 -} 2.27 - 2.28 - 2.29 -// returns -1 if none available 2.30 -unsigned long allocate_metaphysical_rr(void) 2.31 +static unsigned long allocate_metaphysical_rr(struct domain *d, int n) 2.32 { 2.33 ia64_rr rrv; 2.34 2.35 rrv.rrval = 0; // Or else may see reserved bit fault 2.36 - rrv.rid = allocate_reserved_rid(); 2.37 + rrv.rid = d->arch.starting_mp_rid + n; 2.38 rrv.ps = PAGE_SHIFT; 2.39 rrv.ve = 0; 2.40 /* Mangle metaphysical rid */ 2.41 @@ -78,30 +66,37 @@ unsigned long allocate_metaphysical_rr(v 2.42 return rrv.rrval; 2.43 } 2.44 2.45 -int deallocate_metaphysical_rid(unsigned long rid) 2.46 -{ 2.47 - // fix this when the increment allocation mechanism is fixed. 2.48 - return 1; 2.49 -} 2.50 - 2.51 /************************************* 2.52 Region Block setup/management 2.53 *************************************/ 2.54 2.55 static int implemented_rid_bits = 0; 2.56 +static int mp_rid_shift; 2.57 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 }; 2.58 2.59 -static void get_impl_rid_bits(void) 2.60 +void init_rid_allocator (void) 2.61 { 2.62 + int log_blocks; 2.63 pal_vm_info_2_u_t vm_info_2; 2.64 2.65 /* Get machine rid_size. */ 2.66 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0); 2.67 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size; 2.68 2.69 - if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS || 2.70 - implemented_rid_bits > IA64_MAX_IMPL_RID_BITS) 2.71 - BUG(); 2.72 + /* We need at least a few space... */ 2.73 + BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS); 2.74 + 2.75 + /* And we can accept too much space. */ 2.76 + if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS) 2.77 + implemented_rid_bits = IA64_MAX_IMPL_RID_BITS; 2.78 + 2.79 + log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS); 2.80 + 2.81 + printf ("Maximum of simultaneous domains: %d\n", 2.82 + (1 << log_blocks) - 1); 2.83 + 2.84 + mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks; 2.85 + BUG_ON (mp_rid_shift < 3); 2.86 } 2.87 2.88 2.89 @@ -113,13 +108,14 @@ int allocate_rid_range(struct domain *d, 2.90 { 2.91 int i, j, n_rid_blocks; 2.92 2.93 - if (implemented_rid_bits == 0) get_impl_rid_bits(); 2.94 - 2.95 + if (ridbits == 0) 2.96 + ridbits = DOMAIN_RID_BITS_DEFAULT; 2.97 + 2.98 if (ridbits >= IA64_MAX_IMPL_RID_BITS) 2.99 - ridbits = IA64_MAX_IMPL_RID_BITS - 1; 2.100 + ridbits = IA64_MAX_IMPL_RID_BITS - 1; 2.101 2.102 if (ridbits < IA64_MIN_IMPL_RID_BITS) 2.103 - ridbits = IA64_MIN_IMPL_RID_BITS; 2.104 + ridbits = IA64_MIN_IMPL_RID_BITS; 2.105 2.106 // convert to rid_blocks and find one 2.107 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS); 2.108 @@ -128,24 +124,37 @@ int allocate_rid_range(struct domain *d, 2.109 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) { 2.110 if (ridblock_owner[i] == NULL) { 2.111 for (j = i; j < i + n_rid_blocks; ++j) { 2.112 - if (ridblock_owner[j]) break; 2.113 + if (ridblock_owner[j]) 2.114 + break; 2.115 } 2.116 - if (ridblock_owner[j] == NULL) break; 2.117 + if (ridblock_owner[j] == NULL) 2.118 + break; 2.119 } 2.120 } 2.121 2.122 - if (i >= MAX_RID_BLOCKS) return 0; 2.123 + if (i >= MAX_RID_BLOCKS) 2.124 + return 0; 2.125 2.126 // found an unused block: 2.127 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits) 2.128 // mark this block as owned 2.129 - for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d; 2.130 + for (j = i; j < i + n_rid_blocks; ++j) 2.131 + ridblock_owner[j] = d; 2.132 2.133 // setup domain struct 2.134 d->arch.rid_bits = ridbits; 2.135 - d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS; 2.136 -printf("###allocating rid_range, domain %p: starting_rid=%x, ending_rid=%x\n", 2.137 -d,d->arch.starting_rid, d->arch.ending_rid); 2.138 + d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; 2.139 + d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS; 2.140 + 2.141 + d->arch.starting_mp_rid = i << mp_rid_shift; 2.142 + d->arch.ending_mp_rid = (i + 1) << mp_rid_shift; 2.143 + 2.144 + d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0); 2.145 + d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1); 2.146 + 2.147 + printf("###allocating rid_range, domain %p: rid=%x-%x mp_rid=%x\n", 2.148 + d, d->arch.starting_rid, d->arch.ending_rid, 2.149 + d->arch.starting_mp_rid); 2.150 2.151 return 1; 2.152 } 2.153 @@ -169,11 +178,13 @@ int deallocate_rid_range(struct domain * 2.154 #endif 2.155 2.156 for (i = rid_block_start; i < rid_block_end; ++i) 2.157 - ridblock_owner[i] = NULL; 2.158 + ridblock_owner[i] = NULL; 2.159 2.160 d->arch.rid_bits = 0; 2.161 d->arch.starting_rid = 0; 2.162 d->arch.ending_rid = 0; 2.163 + d->arch.starting_mp_rid = 0; 2.164 + d->arch.ending_mp_rid = 0; 2.165 return 1; 2.166 } 2.167 2.168 @@ -259,23 +270,6 @@ int set_metaphysical_rr0(void) 2.169 return 1; 2.170 } 2.171 2.172 -// validates/changes region registers 0-6 in the currently executing domain 2.173 -// Note that this is the one and only SP API (other than executing a privop) 2.174 -// for a domain to use to change region registers 2.175 -static int set_all_rr(u64 rr0, u64 rr1, u64 rr2, u64 rr3, 2.176 - u64 rr4, u64 rr5, u64 rr6, u64 rr7) 2.177 -{ 2.178 - if (!set_one_rr(0x0000000000000000L, rr0)) return 0; 2.179 - if (!set_one_rr(0x2000000000000000L, rr1)) return 0; 2.180 - if (!set_one_rr(0x4000000000000000L, rr2)) return 0; 2.181 - if (!set_one_rr(0x6000000000000000L, rr3)) return 0; 2.182 - if (!set_one_rr(0x8000000000000000L, rr4)) return 0; 2.183 - if (!set_one_rr(0xa000000000000000L, rr5)) return 0; 2.184 - if (!set_one_rr(0xc000000000000000L, rr6)) return 0; 2.185 - if (!set_one_rr(0xe000000000000000L, rr7)) return 0; 2.186 - return 1; 2.187 -} 2.188 - 2.189 void init_all_rr(struct vcpu *v) 2.190 { 2.191 ia64_rr rrv;
3.1 --- a/xen/arch/ia64/xen/xensetup.c Thu Apr 13 14:57:13 2006 -0600 3.2 +++ b/xen/arch/ia64/xen/xensetup.c Fri Apr 14 14:13:13 2006 -0600 3.3 @@ -364,6 +364,8 @@ printk("About to call sort_main_extable( 3.4 sort_main_extable(); 3.5 3.6 3.7 + init_rid_allocator (); 3.8 + 3.9 /* Create initial domain 0. */ 3.10 printk("About to call domain_create()\n"); 3.11 dom0 = domain_create(0, 0);
4.1 --- a/xen/include/asm-ia64/domain.h Thu Apr 13 14:57:13 2006 -0600 4.2 +++ b/xen/include/asm-ia64/domain.h Fri Apr 14 14:13:13 2006 -0600 4.3 @@ -22,10 +22,19 @@ struct arch_domain { 4.4 struct mm_struct *mm; 4.5 unsigned long metaphysical_rr0; 4.6 unsigned long metaphysical_rr4; 4.7 + 4.8 + /* There are two ranges of RID for a domain: 4.9 + one big range, used to virtualize domain RID, 4.10 + one small range for internal Xen use (metaphysical). */ 4.11 + /* Big range. */ 4.12 int starting_rid; /* first RID assigned to domain */ 4.13 int ending_rid; /* one beyond highest RID assigned to domain */ 4.14 int rid_bits; /* number of virtual rid bits (default: 18) */ 4.15 - int breakimm; 4.16 + /* Metaphysical range. */ 4.17 + int starting_mp_rid; 4.18 + int ending_mp_rid; 4.19 + 4.20 + int breakimm; /* The imm value for hypercalls. */ 4.21 4.22 int physmap_built; /* Whether is physmap built or not */ 4.23 int imp_va_msb;
5.1 --- a/xen/include/asm-ia64/regionreg.h Thu Apr 13 14:57:13 2006 -0600 5.2 +++ b/xen/include/asm-ia64/regionreg.h Fri Apr 14 14:13:13 2006 -0600 5.3 @@ -64,10 +64,12 @@ vmMangleRID(unsigned long RIDVal) 5.4 // since vmMangleRID is symmetric, use it for unmangling also 5.5 #define vmUnmangleRID(x) vmMangleRID(x) 5.6 5.7 -extern unsigned long allocate_metaphysical_rr(void); 5.8 -extern int deallocate_metaphysical_rid(unsigned long rid); 5.9 +extern void init_rid_allocator (void); 5.10 5.11 struct domain; 5.12 + 5.13 +/* Allocate RIDs range and metaphysical RIDs for domain d. 5.14 + If ridbits is 0, a default value is used instead. */ 5.15 extern int allocate_rid_range(struct domain *d, unsigned long ridbits); 5.16 extern int deallocate_rid_range(struct domain *d); 5.17