debuggers.hg

view xen/common/trace.c @ 22613:f5f3cf4e001f

xentrace: Clean up initialisation.

Allocate no memory and print no debug messages when disabled.

Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Thu Dec 16 20:07:03 2010 +0000 (2010-12-16)
parents 542e8cd16a6c
children
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Authors: Mark Williamson, mark.a.williamson@intel.com
9 * Rob Gardner, rob.gardner@hp.com
10 * Date: October 2005
11 *
12 * Copyright (C) 2005 Bin Ren
13 *
14 * The trace buffer code is designed to allow debugging traces of Xen to be
15 * generated on UP / SMP machines. Each trace entry is timestamped so that
16 * it's possible to reconstruct a chronological record of trace events.
17 */
19 #include <xen/config.h>
20 #include <asm/types.h>
21 #include <asm/io.h>
22 #include <xen/lib.h>
23 #include <xen/sched.h>
24 #include <xen/smp.h>
25 #include <xen/trace.h>
26 #include <xen/errno.h>
27 #include <xen/event.h>
28 #include <xen/tasklet.h>
29 #include <xen/init.h>
30 #include <xen/mm.h>
31 #include <xen/percpu.h>
32 #include <xen/cpu.h>
33 #include <asm/atomic.h>
34 #include <public/sysctl.h>
36 #ifdef CONFIG_COMPAT
37 #include <compat/trace.h>
38 #define xen_t_buf t_buf
39 CHECK_t_buf;
40 #undef xen_t_buf
41 #else
42 #define compat_t_rec t_rec
43 #endif
45 /* opt_tbuf_size: trace buffer size (in pages) */
46 static unsigned int opt_tbuf_size = 0;
47 integer_param("tbuf_size", opt_tbuf_size);
49 /* Pointers to the meta-data objects for all system trace buffers */
50 static struct t_info *t_info;
51 #define T_INFO_PAGES 2 /* Size fixed at 2 pages for now. */
52 #define T_INFO_SIZE ((T_INFO_PAGES)*(PAGE_SIZE))
53 static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
54 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
55 static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
56 static u32 data_size;
57 static u32 t_info_first_offset __read_mostly;
59 /* High water mark for trace buffers; */
60 /* Send virtual interrupt when buffer level reaches this point */
61 static u32 t_buf_highwater;
63 /* Number of records lost due to per-CPU trace buffer being full. */
64 static DEFINE_PER_CPU(unsigned long, lost_records);
65 static DEFINE_PER_CPU(unsigned long, lost_records_first_tsc);
67 /* a flag recording whether initialization has been done */
68 /* or more properly, if the tbuf subsystem is enabled right now */
69 int tb_init_done __read_mostly;
71 /* which CPUs tracing is enabled on */
72 static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
74 /* which tracing events are enabled */
75 static u32 tb_event_mask = TRC_ALL;
77 /* Return the number of elements _type necessary to store at least _x bytes of data
78 * i.e., sizeof(_type) * ans >= _x. */
79 #define fit_to_type(_type, _x) (((_x)+sizeof(_type)-1) / sizeof(_type))
81 static void calc_tinfo_first_offset(void)
82 {
83 int offset_in_bytes = offsetof(struct t_info, mfn_offset[NR_CPUS]);
84 t_info_first_offset = fit_to_type(uint32_t, offset_in_bytes);
85 }
87 /**
88 * check_tbuf_size - check to make sure that the proposed size will fit
89 * in the currently sized struct t_info and allows prod and cons to
90 * reach double the value without overflow.
91 */
92 static int check_tbuf_size(u32 pages)
93 {
94 struct t_buf dummy;
95 typeof(dummy.prod) size;
97 size = ((typeof(dummy.prod))pages) * PAGE_SIZE;
99 return (size / PAGE_SIZE != pages)
100 || (size + size < size)
101 || (num_online_cpus() * pages + t_info_first_offset > T_INFO_SIZE / sizeof(uint32_t));
102 }
104 /**
105 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
106 *
107 * This function is called at start of day in order to initialize the per-cpu
108 * trace buffers. The trace buffers are then available for debugging use, via
109 * the %TRACE_xD macros exported in <xen/trace.h>.
110 *
111 * This function may also be called later when enabling trace buffers
112 * via the SET_SIZE hypercall.
113 */
114 static int alloc_trace_bufs(void)
115 {
116 int i, cpu, order;
117 unsigned long nr_pages;
118 /* Start after a fixed-size array of NR_CPUS */
119 uint32_t *t_info_mfn_list;
120 int offset;
122 if ( opt_tbuf_size == 0 )
123 return -EINVAL;
125 if ( check_tbuf_size(opt_tbuf_size) )
126 {
127 printk("Xen trace buffers: tb size %d too large. "
128 "Tracing disabled.\n",
129 opt_tbuf_size);
130 return -EINVAL;
131 }
133 /* t_info size is fixed for now. Currently this works great, so there
134 * seems to be no need to make it dynamic. */
135 t_info = alloc_xenheap_pages(get_order_from_pages(T_INFO_PAGES), 0);
136 if ( t_info == NULL )
137 {
138 printk("Xen trace buffers: t_info allocation failed! "
139 "Tracing disabled.\n");
140 return -ENOMEM;
141 }
143 for ( i = 0; i < T_INFO_PAGES; i++ )
144 share_xen_page_with_privileged_guests(
145 virt_to_page(t_info) + i, XENSHARE_readonly);
147 t_info_mfn_list = (uint32_t *)t_info;
148 offset = t_info_first_offset;
150 t_info->tbuf_size = opt_tbuf_size;
151 printk(XENLOG_INFO "tbuf_size %d\n", t_info->tbuf_size);
153 nr_pages = opt_tbuf_size;
154 order = get_order_from_pages(nr_pages);
156 /*
157 * First, allocate buffers for all of the cpus. If any
158 * fails, deallocate what you have so far and exit.
159 */
160 for_each_online_cpu(cpu)
161 {
162 int flags;
163 char *rawbuf;
164 struct t_buf *buf;
166 if ( (rawbuf = alloc_xenheap_pages(
167 order, MEMF_bits(32 + PAGE_SHIFT))) == NULL )
168 {
169 printk("Xen trace buffers: memory allocation failed\n");
170 opt_tbuf_size = 0;
171 goto out_dealloc;
172 }
174 spin_lock_irqsave(&per_cpu(t_lock, cpu), flags);
176 per_cpu(t_bufs, cpu) = buf = (struct t_buf *)rawbuf;
177 buf->cons = buf->prod = 0;
178 per_cpu(t_data, cpu) = (unsigned char *)(buf + 1);
180 spin_unlock_irqrestore(&per_cpu(t_lock, cpu), flags);
182 }
184 /*
185 * Now share the pages to xentrace can map them, and write them in
186 * the global t_info structure.
187 */
188 for_each_online_cpu(cpu)
189 {
190 /* Share pages so that xentrace can map them. */
191 char *rawbuf;
193 if ( (rawbuf = (char *)per_cpu(t_bufs, cpu)) )
194 {
195 struct page_info *p = virt_to_page(rawbuf);
196 uint32_t mfn = virt_to_mfn(rawbuf);
198 for ( i = 0; i < nr_pages; i++ )
199 {
200 share_xen_page_with_privileged_guests(
201 p + i, XENSHARE_writable);
203 t_info_mfn_list[offset + i]=mfn + i;
204 }
205 /* Write list first, then write per-cpu offset. */
206 wmb();
207 t_info->mfn_offset[cpu]=offset;
208 printk(XENLOG_INFO "p%d mfn %"PRIx32" offset %d\n",
209 cpu, mfn, offset);
210 offset+=i;
211 }
212 }
214 data_size = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf));
215 t_buf_highwater = data_size >> 1; /* 50% high water */
217 return 0;
218 out_dealloc:
219 for_each_online_cpu(cpu)
220 {
221 int flags;
222 char * rawbuf;
224 spin_lock_irqsave(&per_cpu(t_lock, cpu), flags);
225 if ( (rawbuf = (char *)per_cpu(t_bufs, cpu)) )
226 {
227 per_cpu(t_bufs, cpu) = NULL;
228 ASSERT(!(virt_to_page(rawbuf)->count_info & PGC_allocated));
229 free_xenheap_pages(rawbuf, order);
230 }
231 spin_unlock_irqrestore(&per_cpu(t_lock, cpu), flags);
232 }
234 return -ENOMEM;
235 }
238 /**
239 * tb_set_size - handle the logic involved with dynamically
240 * allocating and deallocating tbufs
241 *
242 * This function is called when the SET_SIZE hypercall is done.
243 */
244 static int tb_set_size(int size)
245 {
246 /*
247 * Setting size is a one-shot operation. It can be done either at
248 * boot time or via control tools, but not by both. Once buffers
249 * are created they cannot be destroyed.
250 */
251 int ret = 0;
253 if ( opt_tbuf_size != 0 )
254 {
255 if ( size != opt_tbuf_size )
256 gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
257 opt_tbuf_size, size);
258 return -EINVAL;
259 }
261 if ( size <= 0 )
262 return -EINVAL;
264 opt_tbuf_size = size;
266 if ( (ret = alloc_trace_bufs()) != 0 )
267 {
268 opt_tbuf_size = 0;
269 return ret;
270 }
272 printk("Xen trace buffers: initialized\n");
273 return 0;
274 }
276 int trace_will_trace_event(u32 event)
277 {
278 if ( !tb_init_done )
279 return 0;
281 /*
282 * Copied from __trace_var()
283 */
284 if ( (tb_event_mask & event) == 0 )
285 return 0;
287 /* match class */
288 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
289 return 0;
291 /* then match subclass */
292 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
293 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
294 return 0;
296 if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
297 return 0;
299 return 1;
300 }
302 static int cpu_callback(
303 struct notifier_block *nfb, unsigned long action, void *hcpu)
304 {
305 unsigned int cpu = (unsigned long)hcpu;
307 if ( action == CPU_UP_PREPARE )
308 spin_lock_init(&per_cpu(t_lock, cpu));
310 return NOTIFY_DONE;
311 }
313 static struct notifier_block cpu_nfb = {
314 .notifier_call = cpu_callback
315 };
317 /**
318 * init_trace_bufs - performs initialization of the per-cpu trace buffers.
319 *
320 * This function is called at start of day in order to initialize the per-cpu
321 * trace buffers. The trace buffers are then available for debugging use, via
322 * the %TRACE_xD macros exported in <xen/trace.h>.
323 */
324 void __init init_trace_bufs(void)
325 {
326 int i;
328 /* Calculate offset in u32 of first mfn */
329 calc_tinfo_first_offset();
331 /* Per-cpu t_lock initialisation. */
332 for_each_online_cpu ( i )
333 spin_lock_init(&per_cpu(t_lock, i));
334 register_cpu_notifier(&cpu_nfb);
336 if ( opt_tbuf_size == 0 )
337 {
338 printk("Xen trace buffers: disabled\n");
339 goto fail;
340 }
342 if ( alloc_trace_bufs() != 0 )
343 {
344 dprintk(XENLOG_INFO, "Xen trace buffers: "
345 "allocation size %d failed, disabling\n",
346 opt_tbuf_size);
347 goto fail;
348 }
350 printk("Xen trace buffers: initialised\n");
351 wmb(); /* above must be visible before tb_init_done flag set */
352 tb_init_done = 1;
353 return;
355 fail:
356 opt_tbuf_size = 0;
357 }
359 /**
360 * tb_control - sysctl operations on trace buffers.
361 * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
362 */
363 int tb_control(xen_sysctl_tbuf_op_t *tbc)
364 {
365 static DEFINE_SPINLOCK(lock);
366 int rc = 0;
368 spin_lock(&lock);
370 switch ( tbc->cmd )
371 {
372 case XEN_SYSCTL_TBUFOP_get_info:
373 tbc->evt_mask = tb_event_mask;
374 tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0;
375 tbc->size = T_INFO_PAGES * PAGE_SIZE;
376 break;
377 case XEN_SYSCTL_TBUFOP_set_cpu_mask:
378 rc = xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
379 break;
380 case XEN_SYSCTL_TBUFOP_set_evt_mask:
381 tb_event_mask = tbc->evt_mask;
382 break;
383 case XEN_SYSCTL_TBUFOP_set_size:
384 rc = tb_set_size(tbc->size);
385 break;
386 case XEN_SYSCTL_TBUFOP_enable:
387 /* Enable trace buffers. Check buffers are already allocated. */
388 if ( opt_tbuf_size == 0 )
389 rc = -EINVAL;
390 else
391 tb_init_done = 1;
392 break;
393 case XEN_SYSCTL_TBUFOP_disable:
394 {
395 /*
396 * Disable trace buffers. Just stops new records from being written,
397 * does not deallocate any memory.
398 */
399 int i;
401 tb_init_done = 0;
402 wmb();
403 /* Clear any lost-record info so we don't get phantom lost records next time we
404 * start tracing. Grab the lock to make sure we're not racing anyone. After this
405 * hypercall returns, no more records should be placed into the buffers. */
406 for_each_online_cpu(i)
407 {
408 int flags;
409 spin_lock_irqsave(&per_cpu(t_lock, i), flags);
410 per_cpu(lost_records, i)=0;
411 spin_unlock_irqrestore(&per_cpu(t_lock, i), flags);
412 }
413 }
414 break;
415 default:
416 rc = -EINVAL;
417 break;
418 }
420 spin_unlock(&lock);
422 return rc;
423 }
425 static inline unsigned int calc_rec_size(bool_t cycles, unsigned int extra)
426 {
427 unsigned int rec_size = 4;
429 if ( cycles )
430 rec_size += 8;
431 rec_size += extra;
432 return rec_size;
433 }
435 static inline bool_t bogus(u32 prod, u32 cons)
436 {
437 if ( unlikely(prod & 3) || unlikely(prod >= 2 * data_size) ||
438 unlikely(cons & 3) || unlikely(cons >= 2 * data_size) )
439 {
440 tb_init_done = 0;
441 printk(XENLOG_WARNING "trc#%u: bogus prod (%08x) and/or cons (%08x)\n",
442 smp_processor_id(), prod, cons);
443 return 1;
444 }
445 return 0;
446 }
448 static inline u32 calc_unconsumed_bytes(const struct t_buf *buf)
449 {
450 u32 prod = buf->prod, cons = buf->cons;
451 s32 x;
453 barrier(); /* must read buf->prod and buf->cons only once */
454 if ( bogus(prod, cons) )
455 return data_size;
457 x = prod - cons;
458 if ( x < 0 )
459 x += 2*data_size;
461 ASSERT(x >= 0);
462 ASSERT(x <= data_size);
464 return x;
465 }
467 static inline u32 calc_bytes_to_wrap(const struct t_buf *buf)
468 {
469 u32 prod = buf->prod, cons = buf->cons;
470 s32 x;
472 barrier(); /* must read buf->prod and buf->cons only once */
473 if ( bogus(prod, cons) )
474 return 0;
476 x = data_size - prod;
477 if ( x <= 0 )
478 x += data_size;
480 ASSERT(x > 0);
481 ASSERT(x <= data_size);
483 return x;
484 }
486 static inline u32 calc_bytes_avail(const struct t_buf *buf)
487 {
488 return data_size - calc_unconsumed_bytes(buf);
489 }
491 static inline struct t_rec *next_record(const struct t_buf *buf,
492 uint32_t *next)
493 {
494 u32 x = buf->prod, cons = buf->cons;
496 barrier(); /* must read buf->prod and buf->cons only once */
497 *next = x;
498 if ( !tb_init_done || bogus(x, cons) )
499 return NULL;
501 if ( x >= data_size )
502 x -= data_size;
504 ASSERT(x < data_size);
506 return (struct t_rec *)&this_cpu(t_data)[x];
507 }
509 static inline void __insert_record(struct t_buf *buf,
510 unsigned long event,
511 unsigned int extra,
512 bool_t cycles,
513 unsigned int rec_size,
514 const void *extra_data)
515 {
516 struct t_rec *rec;
517 unsigned char *dst;
518 unsigned int extra_word = extra / sizeof(u32);
519 unsigned int local_rec_size = calc_rec_size(cycles, extra);
520 uint32_t next;
522 BUG_ON(local_rec_size != rec_size);
523 BUG_ON(extra & 3);
525 rec = next_record(buf, &next);
526 if ( !rec )
527 return;
528 /* Double-check once more that we have enough space.
529 * Don't bugcheck here, in case the userland tool is doing
530 * something stupid. */
531 if ( (unsigned char *)rec + rec_size > this_cpu(t_data) + data_size )
532 {
533 if ( printk_ratelimit() )
534 printk(XENLOG_WARNING
535 "%s: size=%08x prod=%08x cons=%08x rec=%u\n",
536 __func__, data_size, next, buf->cons, rec_size);
537 return;
538 }
540 rec->event = event;
541 rec->extra_u32 = extra_word;
542 dst = (unsigned char *)rec->u.nocycles.extra_u32;
543 if ( (rec->cycles_included = cycles) != 0 )
544 {
545 u64 tsc = (u64)get_cycles();
546 rec->u.cycles.cycles_lo = (uint32_t)tsc;
547 rec->u.cycles.cycles_hi = (uint32_t)(tsc >> 32);
548 dst = (unsigned char *)rec->u.cycles.extra_u32;
549 }
551 if ( extra_data && extra )
552 memcpy(dst, extra_data, extra);
554 wmb();
556 next += rec_size;
557 if ( next >= 2*data_size )
558 next -= 2*data_size;
559 ASSERT(next < 2*data_size);
560 buf->prod = next;
561 }
563 static inline void insert_wrap_record(struct t_buf *buf,
564 unsigned int size)
565 {
566 u32 space_left = calc_bytes_to_wrap(buf);
567 unsigned int extra_space = space_left - sizeof(u32);
568 bool_t cycles = 0;
570 BUG_ON(space_left > size);
572 /* We may need to add cycles to take up enough space... */
573 if ( (extra_space/sizeof(u32)) > TRACE_EXTRA_MAX )
574 {
575 cycles = 1;
576 extra_space -= sizeof(u64);
577 ASSERT((extra_space/sizeof(u32)) <= TRACE_EXTRA_MAX);
578 }
580 __insert_record(buf, TRC_TRACE_WRAP_BUFFER, extra_space, cycles,
581 space_left, NULL);
582 }
584 #define LOST_REC_SIZE (4 + 8 + 16) /* header + tsc + sizeof(struct ed) */
586 static inline void insert_lost_records(struct t_buf *buf)
587 {
588 struct {
589 u32 lost_records;
590 u32 did:16, vid:16;
591 u64 first_tsc;
592 } __attribute__((packed)) ed;
594 ed.vid = current->vcpu_id;
595 ed.did = current->domain->domain_id;
596 ed.lost_records = this_cpu(lost_records);
597 ed.first_tsc = this_cpu(lost_records_first_tsc);
599 this_cpu(lost_records) = 0;
601 __insert_record(buf, TRC_LOST_RECORDS, sizeof(ed), 1 /* cycles */,
602 LOST_REC_SIZE, &ed);
603 }
605 /*
606 * Notification is performed in qtasklet to avoid deadlocks with contexts
607 * which __trace_var() may be called from (e.g., scheduler critical regions).
608 */
609 static void trace_notify_dom0(unsigned long unused)
610 {
611 send_guest_global_virq(dom0, VIRQ_TBUF);
612 }
613 static DECLARE_TASKLET(trace_notify_dom0_tasklet, trace_notify_dom0, 0);
615 /**
616 * trace - Enters a trace tuple into the trace buffer for the current CPU.
617 * @event: the event type being logged
618 * @d1...d5: the data items for the event being logged
619 *
620 * Logs a trace record into the appropriate buffer. Returns nonzero on
621 * failure, otherwise 0. Failure occurs only if the trace buffers are not yet
622 * initialised.
623 */
624 void __trace_var(u32 event, bool_t cycles, unsigned int extra,
625 const void *extra_data)
626 {
627 struct t_buf *buf;
628 unsigned long flags;
629 u32 bytes_to_tail, bytes_to_wrap;
630 unsigned int rec_size, total_size;
631 unsigned int extra_word;
632 bool_t started_below_highwater;
634 if( !tb_init_done )
635 return;
637 /* Convert byte count into word count, rounding up */
638 extra_word = (extra / sizeof(u32));
639 if ( (extra % sizeof(u32)) != 0 )
640 extra_word++;
642 ASSERT(extra_word <= TRACE_EXTRA_MAX);
643 extra_word = min_t(int, extra_word, TRACE_EXTRA_MAX);
645 /* Round size up to nearest word */
646 extra = extra_word * sizeof(u32);
648 if ( (tb_event_mask & event) == 0 )
649 return;
651 /* match class */
652 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
653 return;
655 /* then match subclass */
656 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
657 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
658 return;
660 if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
661 return;
663 /* Read tb_init_done /before/ t_bufs. */
664 rmb();
666 spin_lock_irqsave(&this_cpu(t_lock), flags);
668 buf = this_cpu(t_bufs);
670 if ( unlikely(!buf) )
671 {
672 /* Make gcc happy */
673 started_below_highwater = 0;
674 goto unlock;
675 }
677 started_below_highwater = (calc_unconsumed_bytes(buf) < t_buf_highwater);
679 /* Calculate the record size */
680 rec_size = calc_rec_size(cycles, extra);
682 /* How many bytes are available in the buffer? */
683 bytes_to_tail = calc_bytes_avail(buf);
685 /* How many bytes until the next wrap-around? */
686 bytes_to_wrap = calc_bytes_to_wrap(buf);
688 /*
689 * Calculate expected total size to commit this record by
690 * doing a dry-run.
691 */
692 total_size = 0;
694 /* First, check to see if we need to include a lost_record.
695 */
696 if ( this_cpu(lost_records) )
697 {
698 if ( LOST_REC_SIZE > bytes_to_wrap )
699 {
700 total_size += bytes_to_wrap;
701 bytes_to_wrap = data_size;
702 }
703 total_size += LOST_REC_SIZE;
704 bytes_to_wrap -= LOST_REC_SIZE;
706 /* LOST_REC might line up perfectly with the buffer wrap */
707 if ( bytes_to_wrap == 0 )
708 bytes_to_wrap = data_size;
709 }
711 if ( rec_size > bytes_to_wrap )
712 {
713 total_size += bytes_to_wrap;
714 }
715 total_size += rec_size;
717 /* Do we have enough space for everything? */
718 if ( total_size > bytes_to_tail )
719 {
720 if ( ++this_cpu(lost_records) == 1 )
721 this_cpu(lost_records_first_tsc)=(u64)get_cycles();
722 started_below_highwater = 0;
723 goto unlock;
724 }
726 /*
727 * Now, actually write information
728 */
729 bytes_to_wrap = calc_bytes_to_wrap(buf);
731 if ( this_cpu(lost_records) )
732 {
733 if ( LOST_REC_SIZE > bytes_to_wrap )
734 {
735 insert_wrap_record(buf, LOST_REC_SIZE);
736 bytes_to_wrap = data_size;
737 }
738 insert_lost_records(buf);
739 bytes_to_wrap -= LOST_REC_SIZE;
741 /* LOST_REC might line up perfectly with the buffer wrap */
742 if ( bytes_to_wrap == 0 )
743 bytes_to_wrap = data_size;
744 }
746 if ( rec_size > bytes_to_wrap )
747 insert_wrap_record(buf, rec_size);
749 /* Write the original record */
750 __insert_record(buf, event, extra, cycles, rec_size, extra_data);
752 unlock:
753 spin_unlock_irqrestore(&this_cpu(t_lock), flags);
755 /* Notify trace buffer consumer that we've crossed the high water mark. */
756 if ( likely(buf!=NULL)
757 && started_below_highwater
758 && (calc_unconsumed_bytes(buf) >= t_buf_highwater) )
759 tasklet_schedule(&trace_notify_dom0_tasklet);
760 }
762 /*
763 * Local variables:
764 * mode: C
765 * c-set-style: "BSD"
766 * c-basic-offset: 4
767 * tab-width: 4
768 * indent-tabs-mode: nil
769 * End:
770 */