debuggers.hg

changeset 21084:6384675aa29a

Increase default console ring allocation size and reduce default verbosity

In order to have better chance that relevant messages fit into the
ring buffer, allocate a dynamic (larger) one in more cases, and make
the default allocation size depend on both the number of CPUs and the
log level. Also free the static buffer if a dynamic one was obtained.

In order for "xm dmesg" to retrieve larger buffers, eliminate
pyxc_readconsolering()'s 32k limitation resulting from the use of a
statically allocated buffer.

Finally, suppress on x86 most per-CPU boot time messages (by default,
most of them can be re-enabled with a new command line option
"cpuinfo", some others are now only printed more than once when there
are inconsistencies between CPUs). This reduces both boot time (namely
when a graphical console is in use) and pressure on the console ring
and serial transmit buffers.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 17 08:34:16 2010 +0000 (2010-03-17)
parents 499a11c1c25e
children 95f5a4ce8f24
files tools/python/xen/lowlevel/xc/xc.c xen/arch/ia64/xen/xen.lds.S xen/arch/x86/cpu/amd.c xen/arch/x86/cpu/common.c xen/arch/x86/cpu/intel_cacheinfo.c xen/arch/x86/cpu/mcheck/amd_f10.c xen/arch/x86/cpu/mcheck/amd_k8.c xen/arch/x86/cpu/mcheck/k7.c xen/arch/x86/cpu/mcheck/mce.c xen/arch/x86/cpu/mcheck/mce.h xen/arch/x86/cpu/mcheck/mce_intel.c xen/arch/x86/hvm/asid.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/drivers/char/console.c xen/include/asm-x86/processor.h
line diff
     1.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 08:31:17 2010 +0000
     1.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 08:34:16 2010 +0000
     1.3 @@ -1062,14 +1062,16 @@ static PyObject *pyxc_readconsolering(Xc
     1.4                                        PyObject *kwds)
     1.5  {
     1.6      unsigned int clear = 0, index = 0, incremental = 0;
     1.7 -    char         _str[32768], *str = _str;
     1.8 -    unsigned int count = 32768;
     1.9 +    unsigned int count = 16384 + 1, size = count;
    1.10 +    char        *str = malloc(size), *ptr;
    1.11 +    PyObject    *obj;
    1.12      int          ret;
    1.13  
    1.14      static char *kwd_list[] = { "clear", "index", "incremental", NULL };
    1.15  
    1.16      if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwd_list,
    1.17 -                                      &clear, &index, &incremental) )
    1.18 +                                      &clear, &index, &incremental) ||
    1.19 +         !str )
    1.20          return NULL;
    1.21  
    1.22      ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
    1.23 @@ -1077,7 +1079,30 @@ static PyObject *pyxc_readconsolering(Xc
    1.24      if ( ret < 0 )
    1.25          return pyxc_error_to_exception();
    1.26  
    1.27 -    return PyString_FromStringAndSize(str, count);
    1.28 +    while ( !incremental && count == size )
    1.29 +    {
    1.30 +        size += count - 1;
    1.31 +        if ( size < count )
    1.32 +            break;
    1.33 +
    1.34 +        ptr = realloc(str, size);
    1.35 +        if ( !ptr )
    1.36 +            break;
    1.37 +
    1.38 +        str = ptr + count;
    1.39 +        count = size - count;
    1.40 +        ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
    1.41 +                                 1, &index);
    1.42 +        if ( ret < 0 )
    1.43 +            break;
    1.44 +
    1.45 +        count += str - ptr;
    1.46 +        str = ptr;
    1.47 +    }
    1.48 +
    1.49 +    obj = PyString_FromStringAndSize(str, count);
    1.50 +    free(str);
    1.51 +    return obj;
    1.52  }
    1.53  
    1.54  
     2.1 --- a/xen/arch/ia64/xen/xen.lds.S	Wed Mar 17 08:31:17 2010 +0000
     2.2 +++ b/xen/arch/ia64/xen/xen.lds.S	Wed Mar 17 08:34:16 2010 +0000
     2.3 @@ -223,7 +223,12 @@ SECTIONS
     2.4    .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
     2.5  	{ *(.sbss) *(.scommon) }
     2.6    .bss : AT(ADDR(.bss) - LOAD_OFFSET)
     2.7 -	{ *(.bss) *(COMMON) }
     2.8 +	{
     2.9 +		. = ALIGN(PAGE_SIZE);
    2.10 +		*(.bss.page_aligned)
    2.11 +		*(.bss)
    2.12 +		*(COMMON)
    2.13 +	}
    2.14  
    2.15    _end = .;
    2.16  
     3.1 --- a/xen/arch/x86/cpu/amd.c	Wed Mar 17 08:31:17 2010 +0000
     3.2 +++ b/xen/arch/x86/cpu/amd.c	Wed Mar 17 08:34:16 2010 +0000
     3.3 @@ -493,8 +493,9 @@ static void __devinit init_amd(struct cp
     3.4  		}
     3.5  		cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
     3.6  		phys_proc_id[cpu] >>= bits;
     3.7 -		printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
     3.8 -		       cpu, c->x86_max_cores, cpu_core_id[cpu]);
     3.9 +		if (opt_cpu_info)
    3.10 +			printk("CPU %d(%d) -> Core %d\n",
    3.11 +			       cpu, c->x86_max_cores, cpu_core_id[cpu]);
    3.12  	}
    3.13  #endif
    3.14  
     4.1 --- a/xen/arch/x86/cpu/common.c	Wed Mar 17 08:31:17 2010 +0000
     4.2 +++ b/xen/arch/x86/cpu/common.c	Wed Mar 17 08:34:16 2010 +0000
     4.3 @@ -59,6 +59,9 @@ static struct cpu_dev * this_cpu = &defa
     4.4  
     4.5  integer_param("cachesize", cachesize_override);
     4.6  
     4.7 +int __cpuinitdata opt_cpu_info;
     4.8 +boolean_param("cpuinfo", opt_cpu_info);
     4.9 +
    4.10  int __cpuinit get_model_name(struct cpuinfo_x86 *c)
    4.11  {
    4.12  	unsigned int *v;
    4.13 @@ -97,8 +100,10 @@ void __cpuinit display_cacheinfo(struct 
    4.14  
    4.15  	if (n >= 0x80000005) {
    4.16  		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
    4.17 -		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
    4.18 -			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
    4.19 +		if (opt_cpu_info)
    4.20 +			printk("CPU: L1 I cache %dK (%d bytes/line),"
    4.21 +			              " D cache %dK (%d bytes/line)\n",
    4.22 +			       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
    4.23  		c->x86_cache_size=(ecx>>24)+(edx>>24);	
    4.24  	}
    4.25  
    4.26 @@ -121,8 +126,9 @@ void __cpuinit display_cacheinfo(struct 
    4.27  
    4.28  	c->x86_cache_size = l2size;
    4.29  
    4.30 -	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
    4.31 -	       l2size, ecx & 0xFF);
    4.32 +	if (opt_cpu_info)
    4.33 +		printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
    4.34 +		       l2size, ecx & 0xFF);
    4.35  }
    4.36  
    4.37  /* Naming convention should be: <Name> [(<Codename>)] */
    4.38 @@ -495,8 +501,9 @@ void __cpuinit detect_ht(struct cpuinfo_
    4.39  		index_msb = get_count_order(c->x86_num_siblings);
    4.40  		phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
    4.41  
    4.42 -		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
    4.43 -		       phys_proc_id[cpu]);
    4.44 +		if (opt_cpu_info)
    4.45 +			printk("CPU: Physical Processor ID: %d\n",
    4.46 +			       phys_proc_id[cpu]);
    4.47  
    4.48  		c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
    4.49  
    4.50 @@ -507,16 +514,22 @@ void __cpuinit detect_ht(struct cpuinfo_
    4.51  		cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
    4.52  					       ((1 << core_bits) - 1);
    4.53  
    4.54 -		if (c->x86_max_cores > 1)
    4.55 -			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
    4.56 +		if (opt_cpu_info && c->x86_max_cores > 1)
    4.57 +			printk("CPU: Processor Core ID: %d\n",
    4.58  			       cpu_core_id[cpu]);
    4.59  	}
    4.60  }
    4.61  #endif
    4.62  
    4.63 -void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
    4.64 +void __cpuinit print_cpu_info(unsigned int cpu)
    4.65  {
    4.66 -	char *vendor = NULL;
    4.67 +	const struct cpuinfo_x86 *c = cpu_data + cpu;
    4.68 +	const char *vendor = NULL;
    4.69 +
    4.70 +	if (!opt_cpu_info)
    4.71 +		return;
    4.72 +
    4.73 +	printk("CPU%u: ", cpu);
    4.74  
    4.75  	if (c->x86_vendor < X86_VENDOR_NUM)
    4.76  		vendor = this_cpu->c_vendor;
    4.77 @@ -578,7 +591,8 @@ void __cpuinit cpu_init(void)
    4.78  		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
    4.79  		for (;;) local_irq_enable();
    4.80  	}
    4.81 -	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
    4.82 +	if (opt_cpu_info)
    4.83 +		printk("Initializing CPU#%d\n", cpu);
    4.84  
    4.85  	if (cpu_has_pat)
    4.86  		wrmsrl(MSR_IA32_CR_PAT, host_pat);
     5.1 --- a/xen/arch/x86/cpu/intel_cacheinfo.c	Wed Mar 17 08:31:17 2010 +0000
     5.2 +++ b/xen/arch/x86/cpu/intel_cacheinfo.c	Wed Mar 17 08:34:16 2010 +0000
     5.3 @@ -415,21 +415,23 @@ unsigned int __cpuinit init_intel_cachei
     5.4  		l3 = new_l3;
     5.5  	}
     5.6  
     5.7 -	if (trace)
     5.8 -		printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
     5.9 -	else if ( l1i )
    5.10 -		printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
    5.11 +	if (opt_cpu_info) {
    5.12 +		if (trace)
    5.13 +			printk("CPU: Trace cache: %dK uops", trace);
    5.14 +		else if ( l1i )
    5.15 +			printk("CPU: L1 I cache: %dK", l1i);
    5.16  
    5.17 -	if (l1d)
    5.18 -		printk(", L1 D cache: %dK\n", l1d);
    5.19 -	else
    5.20 -		printk("\n");
    5.21 +		if (l1d)
    5.22 +			printk(", L1 D cache: %dK\n", l1d);
    5.23 +		else
    5.24 +			printk("\n");
    5.25  
    5.26 -	if (l2)
    5.27 -		printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
    5.28 +		if (l2)
    5.29 +			printk("CPU: L2 cache: %dK\n", l2);
    5.30  
    5.31 -	if (l3)
    5.32 -		printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
    5.33 +		if (l3)
    5.34 +			printk("CPU: L3 cache: %dK\n", l3);
    5.35 +	}
    5.36  
    5.37  	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
    5.38  
     6.1 --- a/xen/arch/x86/cpu/mcheck/amd_f10.c	Wed Mar 17 08:31:17 2010 +0000
     6.2 +++ b/xen/arch/x86/cpu/mcheck/amd_f10.c	Wed Mar 17 08:34:16 2010 +0000
     6.3 @@ -83,15 +83,12 @@ amd_f10_handler(struct mc_info *mi, uint
     6.4  }
     6.5  
     6.6  /* AMD Family10 machine check */
     6.7 -int amd_f10_mcheck_init(struct cpuinfo_x86 *c) 
     6.8 +enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c)
     6.9  { 
    6.10 -	if (!amd_k8_mcheck_init(c))
    6.11 -		return 0;
    6.12 +	if (amd_k8_mcheck_init(c) == mcheck_none)
    6.13 +		return mcheck_none;
    6.14  
    6.15  	x86_mce_callback_register(amd_f10_handler);
    6.16  
    6.17 -	printk("CPU%i: AMD Family%xh machine check reporting enabled\n",
    6.18 -	       smp_processor_id(), c->x86);
    6.19 -
    6.20 -	return 1;
    6.21 +	return mcheck_amd_famXX;
    6.22  }
     7.1 --- a/xen/arch/x86/cpu/mcheck/amd_k8.c	Wed Mar 17 08:31:17 2010 +0000
     7.2 +++ b/xen/arch/x86/cpu/mcheck/amd_k8.c	Wed Mar 17 08:34:16 2010 +0000
     7.3 @@ -76,14 +76,14 @@ static void k8_machine_check(struct cpu_
     7.4  }
     7.5  
     7.6  /* AMD K8 machine check */
     7.7 -int amd_k8_mcheck_init(struct cpuinfo_x86 *c)
     7.8 +enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c)
     7.9  {
    7.10  	uint32_t i;
    7.11  	enum mcequirk_amd_flags quirkflag;
    7.12  
    7.13  	/* Check for PPro style MCA; our caller has confirmed MCE support. */
    7.14  	if (!cpu_has(c, X86_FEATURE_MCA))
    7.15 -		return 0;
    7.16 +		return mcheck_none;
    7.17  
    7.18  	quirkflag = mcequirk_lookup_amd_quirkdata(c);
    7.19  
    7.20 @@ -102,9 +102,6 @@ int amd_k8_mcheck_init(struct cpuinfo_x8
    7.21  	}
    7.22  
    7.23  	set_in_cr4(X86_CR4_MCE);
    7.24 -	if (c->x86 < 0x10 || c->x86 > 0x11)
    7.25 -		printk("CPU%i: AMD K8 machine check reporting enabled\n",
    7.26 -		       smp_processor_id());
    7.27  
    7.28 -	return 1;
    7.29 +	return mcheck_amd_k8;
    7.30  }
     8.1 --- a/xen/arch/x86/cpu/mcheck/k7.c	Wed Mar 17 08:31:17 2010 +0000
     8.2 +++ b/xen/arch/x86/cpu/mcheck/k7.c	Wed Mar 17 08:34:16 2010 +0000
     8.3 @@ -68,14 +68,14 @@ static fastcall void k7_machine_check(st
     8.4  
     8.5  
     8.6  /* AMD K7 machine check */
     8.7 -int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
     8.8 +enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c)
     8.9  {
    8.10  	u32 l, h;
    8.11  	int i;
    8.12  
    8.13  	/* Check for PPro style MCA; our caller has confirmed MCE support. */
    8.14  	if (!cpu_has(c, X86_FEATURE_MCA))
    8.15 -		return 0;
    8.16 +		return mcheck_none;
    8.17  
    8.18  	x86_mce_vector_register(k7_machine_check);
    8.19  
    8.20 @@ -93,8 +93,6 @@ int amd_k7_mcheck_init(struct cpuinfo_x8
    8.21  	}
    8.22  
    8.23  	set_in_cr4 (X86_CR4_MCE);
    8.24 -	printk (KERN_INFO "CPU%d: AMD K7 machine check reporting enabled.\n",
    8.25 -		smp_processor_id());
    8.26  
    8.27 -	return 1;
    8.28 +	return mcheck_amd_k7;
    8.29  }
     9.1 --- a/xen/arch/x86/cpu/mcheck/mce.c	Wed Mar 17 08:31:17 2010 +0000
     9.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c	Wed Mar 17 08:34:16 2010 +0000
     9.3 @@ -562,9 +562,9 @@ void mcheck_mca_clearbanks(cpu_banks_t b
     9.4  	}
     9.5  }
     9.6  
     9.7 -static int amd_mcheck_init(struct cpuinfo_x86 *ci)
     9.8 +static enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *ci)
     9.9  {
    9.10 -	int rc = 0;
    9.11 +	enum mcheck_type rc = mcheck_none;
    9.12  
    9.13  	switch (ci->x86) {
    9.14  	case 6:
    9.15 @@ -628,7 +628,9 @@ int mce_firstbank(struct cpuinfo_x86 *c)
    9.16  /* This has to be run for each processor */
    9.17  void mcheck_init(struct cpuinfo_x86 *c)
    9.18  {
    9.19 -	int inited = 0, i, broadcast;
    9.20 +	int i, broadcast;
    9.21 +	enum mcheck_type inited = mcheck_none;
    9.22 +	static enum mcheck_type g_type = mcheck_unset;
    9.23      static int broadcast_check;
    9.24  
    9.25  	if (mce_disabled == 1) {
    9.26 @@ -694,9 +696,37 @@ void mcheck_init(struct cpuinfo_x86 *c)
    9.27      if (g_mcg_cap & MCG_CTL_P)
    9.28          rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl);
    9.29      set_poll_bankmask(c);
    9.30 -	if (!inited)
    9.31 -		printk(XENLOG_INFO "CPU%i: No machine check initialization\n",
    9.32 -		    smp_processor_id());
    9.33 +
    9.34 +	if (inited != g_type) {
    9.35 +		char prefix[20];
    9.36 +		static const char *const type_str[] = {
    9.37 +			[mcheck_amd_famXX] = "AMD",
    9.38 +			[mcheck_amd_k7] = "AMD K7",
    9.39 +			[mcheck_amd_k8] = "AMD K8",
    9.40 +			[mcheck_intel] = "Intel"
    9.41 +		};
    9.42 +
    9.43 +		snprintf(prefix, ARRAY_SIZE(prefix),
    9.44 +			 g_type != mcheck_unset ? XENLOG_WARNING "CPU%i: "
    9.45 +						: XENLOG_INFO,
    9.46 +			 smp_processor_id());
    9.47 +		BUG_ON(inited >= ARRAY_SIZE(type_str));
    9.48 +		switch (inited) {
    9.49 +		default:
    9.50 +			printk("%s%s machine check reporting enabled\n",
    9.51 +			       prefix, type_str[inited]);
    9.52 +			break;
    9.53 +		case mcheck_amd_famXX:
    9.54 +			printk("%s%s Fam%xh machine check reporting enabled\n",
    9.55 +			       prefix, type_str[inited], c->x86);
    9.56 +			break;
    9.57 +		case mcheck_none:
    9.58 +			printk("%sNo machine check initialization\n", prefix);
    9.59 +			break;
    9.60 +		}
    9.61 +
    9.62 +		g_type = inited;
    9.63 +	}
    9.64  }
    9.65  
    9.66  u64 mce_cap_init(void)
    10.1 --- a/xen/arch/x86/cpu/mcheck/mce.h	Wed Mar 17 08:31:17 2010 +0000
    10.2 +++ b/xen/arch/x86/cpu/mcheck/mce.h	Wed Mar 17 08:34:16 2010 +0000
    10.3 @@ -28,13 +28,21 @@ extern int mce_verbosity;
    10.4              printk(s, ##a);       \
    10.5          } while (0)
    10.6  
    10.7 +enum mcheck_type {
    10.8 +	mcheck_unset = -1,
    10.9 +	mcheck_none,
   10.10 +	mcheck_amd_famXX,
   10.11 +	mcheck_amd_k7,
   10.12 +	mcheck_amd_k8,
   10.13 +	mcheck_intel
   10.14 +};
   10.15  
   10.16  /* Init functions */
   10.17 -int amd_k7_mcheck_init(struct cpuinfo_x86 *c);
   10.18 -int amd_k8_mcheck_init(struct cpuinfo_x86 *c);
   10.19 -int amd_f10_mcheck_init(struct cpuinfo_x86 *c);
   10.20 +enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c);
   10.21 +enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c);
   10.22 +enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c);
   10.23  
   10.24 -int intel_mcheck_init(struct cpuinfo_x86 *c);
   10.25 +enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c);
   10.26  
   10.27  void intel_mcheck_timer(struct cpuinfo_x86 *c);
   10.28  void mce_intel_feature_init(struct cpuinfo_x86 *c);
    11.1 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c	Wed Mar 17 08:31:17 2010 +0000
    11.2 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c	Wed Mar 17 08:34:16 2010 +0000
    11.3 @@ -141,8 +141,9 @@ static void intel_init_thermal(struct cp
    11.4  
    11.5      l = apic_read (APIC_LVTTHMR);
    11.6      apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
    11.7 -    printk (KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", 
    11.8 -            cpu, tm2 ? "TM2" : "TM1");
    11.9 +    if (opt_cpu_info)
   11.10 +        printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n",
   11.11 +                cpu, tm2 ? "TM2" : "TM1");
   11.12      return;
   11.13  }
   11.14  #endif /* CONFIG_X86_MCE_THERMAL */
   11.15 @@ -946,7 +947,8 @@ static void intel_init_cmci(struct cpuin
   11.16      int cpu = smp_processor_id();
   11.17  
   11.18      if (!mce_available(c) || !cmci_support) {
   11.19 -        mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
   11.20 +        if (opt_cpu_info)
   11.21 +            mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
   11.22          return;
   11.23      }
   11.24  
   11.25 @@ -1068,11 +1070,9 @@ static void mce_init(void)
   11.26  }
   11.27  
   11.28  /* p4/p6 family have similar MCA initialization process */
   11.29 -int intel_mcheck_init(struct cpuinfo_x86 *c)
   11.30 +enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c)
   11.31  {
   11.32      _mce_cap_init(c);
   11.33 -    mce_printk(MCE_QUIET, "Intel machine check reporting enabled on CPU#%d.\n",
   11.34 -            smp_processor_id());
   11.35  
   11.36      /* machine check is available */
   11.37      x86_mce_vector_register(intel_machine_check);
   11.38 @@ -1085,7 +1085,7 @@ int intel_mcheck_init(struct cpuinfo_x86
   11.39      mce_set_owner();
   11.40  
   11.41      open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq);
   11.42 -    return 1;
   11.43 +    return mcheck_intel;
   11.44  }
   11.45  
   11.46  int intel_mce_wrmsr(uint32_t msr, uint64_t val)
    12.1 --- a/xen/arch/x86/hvm/asid.c	Wed Mar 17 08:31:17 2010 +0000
    12.2 +++ b/xen/arch/x86/hvm/asid.c	Wed Mar 17 08:34:16 2010 +0000
    12.3 @@ -59,6 +59,7 @@ static DEFINE_PER_CPU(struct hvm_asid_da
    12.4  
    12.5  void hvm_asid_init(int nasids)
    12.6  {
    12.7 +    static s8 g_disabled = -1;
    12.8      struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
    12.9  
   12.10      /*
   12.11 @@ -72,8 +73,12 @@ void hvm_asid_init(int nasids)
   12.12      data->max_asid = nasids - 1;
   12.13      data->disabled = (nasids <= 1);
   12.14  
   12.15 -    printk("HVM: ASIDs %s \n",
   12.16 -           (data->disabled ? "disabled." : "enabled."));
   12.17 +    if ( g_disabled != data->disabled )
   12.18 +    {
   12.19 +        printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
   12.20 +        if ( g_disabled < 0 )
   12.21 +            g_disabled = data->disabled;
   12.22 +    }
   12.23  
   12.24      /* Zero indicates 'invalid generation', so we start the count at one. */
   12.25      data->core_asid_generation = 1;
    13.1 --- a/xen/arch/x86/setup.c	Wed Mar 17 08:31:17 2010 +0000
    13.2 +++ b/xen/arch/x86/setup.c	Wed Mar 17 08:34:16 2010 +0000
    13.3 @@ -269,8 +269,8 @@ void __devinit srat_detect_node(int cpu)
    13.4          node = 0;
    13.5      numa_set_node(cpu, node);
    13.6  
    13.7 -    if ( acpi_numa > 0 )
    13.8 -        printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
    13.9 +    if ( opt_cpu_info && acpi_numa > 0 )
   13.10 +        printk("CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
   13.11  }
   13.12  
   13.13  /*
    14.1 --- a/xen/arch/x86/smpboot.c	Wed Mar 17 08:31:17 2010 +0000
    14.2 +++ b/xen/arch/x86/smpboot.c	Wed Mar 17 08:34:16 2010 +0000
    14.3 @@ -877,7 +877,9 @@ static int __devinit do_boot_cpu(int api
    14.4  	start_eip = setup_trampoline();
    14.5  
    14.6  	/* So we see what's up   */
    14.7 -	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
    14.8 +	if (opt_cpu_info)
    14.9 +		printk("Booting processor %d/%d eip %lx\n",
   14.10 +		       cpu, apicid, start_eip);
   14.11  
   14.12  	stack_start.esp = prepare_idle_stack(cpu);
   14.13  
   14.14 @@ -960,8 +962,7 @@ static int __devinit do_boot_cpu(int api
   14.15  		if (cpu_isset(cpu, cpu_callin_map)) {
   14.16  			/* number CPUs logically, starting from 1 (BSP is 0) */
   14.17  			Dprintk("OK.\n");
   14.18 -			printk("CPU%d: ", cpu);
   14.19 -			print_cpu_info(&cpu_data[cpu]);
   14.20 +			print_cpu_info(cpu);
   14.21  			Dprintk("CPU has booted.\n");
   14.22  		} else {
   14.23  			boot_error = 1;
   14.24 @@ -1062,8 +1063,7 @@ static void __init smp_boot_cpus(unsigne
   14.25  	 * Setup boot CPU information
   14.26  	 */
   14.27  	smp_store_cpu_info(0); /* Final full version of the data */
   14.28 -	printk("CPU%d: ", 0);
   14.29 -	print_cpu_info(&cpu_data[0]);
   14.30 +	print_cpu_info(0);
   14.31  
   14.32  	boot_cpu_physical_apicid = get_apic_id();
   14.33  	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
    15.1 --- a/xen/drivers/char/console.c	Wed Mar 17 08:31:17 2010 +0000
    15.2 +++ b/xen/drivers/char/console.c	Wed Mar 17 08:34:16 2010 +0000
    15.3 @@ -65,7 +65,12 @@ size_param("conring_size", opt_conring_s
    15.4  
    15.5  #define _CONRING_SIZE 16384
    15.6  #define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
    15.7 -static char _conring[_CONRING_SIZE], *__read_mostly conring = _conring;
    15.8 +static char
    15.9 +#if _CONRING_SIZE >= PAGE_SIZE
   15.10 +    __attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
   15.11 +#endif
   15.12 +    _conring[_CONRING_SIZE];
   15.13 +static char *__read_mostly conring = _conring;
   15.14  static uint32_t __read_mostly conring_size = _CONRING_SIZE;
   15.15  static uint32_t conringc, conringp;
   15.16  
   15.17 @@ -595,6 +600,8 @@ void __init console_init_postirq(void)
   15.18  
   15.19      serial_init_postirq();
   15.20  
   15.21 +    if ( !opt_conring_size )
   15.22 +        opt_conring_size = num_present_cpus() << (9 + xenlog_lower_thresh);
   15.23      /* Round size down to a power of two. */
   15.24      while ( opt_conring_size & (opt_conring_size - 1) )
   15.25          opt_conring_size &= opt_conring_size - 1;
   15.26 @@ -618,6 +625,8 @@ void __init console_init_postirq(void)
   15.27      spin_unlock_irq(&console_lock);
   15.28  
   15.29      printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
   15.30 +
   15.31 +    init_xenheap_pages(__pa(_conring), __pa(_conring + _CONRING_SIZE));
   15.32  }
   15.33  
   15.34  void __init console_endboot(void)
    16.1 --- a/xen/include/asm-x86/processor.h	Wed Mar 17 08:31:17 2010 +0000
    16.2 +++ b/xen/include/asm-x86/processor.h	Wed Mar 17 08:34:16 2010 +0000
    16.3 @@ -194,10 +194,11 @@ extern struct cpuinfo_x86 cpu_data[];
    16.4  extern u64 host_pat;
    16.5  extern int phys_proc_id[NR_CPUS];
    16.6  extern int cpu_core_id[NR_CPUS];
    16.7 +extern int opt_cpu_info;
    16.8  
    16.9  extern void identify_cpu(struct cpuinfo_x86 *);
   16.10  extern void setup_clear_cpu_cap(unsigned int);
   16.11 -extern void print_cpu_info(struct cpuinfo_x86 *);
   16.12 +extern void print_cpu_info(unsigned int cpu);
   16.13  extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
   16.14  extern void dodgy_tsc(void);
   16.15