4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
8 * Data type definitions, declarations, prototypes.
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * For licencing details see kernel-base/COPYING
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
21 #include "mono/utils/mono-compiler.h"
24 * User-space ABI bits:
31 PERF_TYPE_HARDWARE = 0,
32 PERF_TYPE_SOFTWARE = 1,
33 PERF_TYPE_TRACEPOINT = 2,
34 PERF_TYPE_HW_CACHE = 3,
36 PERF_TYPE_BREAKPOINT = 5,
38 PERF_TYPE_MAX, /* non-ABI */
42 * Generalized performance event event_id types, used by the
43 * attr.event_id parameter of the sys_perf_event_open()
48 * Common hardware events, generalized by the kernel:
50 PERF_COUNT_HW_CPU_CYCLES = 0,
51 PERF_COUNT_HW_INSTRUCTIONS = 1,
52 PERF_COUNT_HW_CACHE_REFERENCES = 2,
53 PERF_COUNT_HW_CACHE_MISSES = 3,
54 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
55 PERF_COUNT_HW_BRANCH_MISSES = 5,
56 PERF_COUNT_HW_BUS_CYCLES = 6,
58 PERF_COUNT_HW_MAX, /* non-ABI */
62 * Generalized hardware cache events:
64 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
65 * { read, write, prefetch } x
66 * { accesses, misses }
68 enum perf_hw_cache_id {
69 PERF_COUNT_HW_CACHE_L1D = 0,
70 PERF_COUNT_HW_CACHE_L1I = 1,
71 PERF_COUNT_HW_CACHE_LL = 2,
72 PERF_COUNT_HW_CACHE_DTLB = 3,
73 PERF_COUNT_HW_CACHE_ITLB = 4,
74 PERF_COUNT_HW_CACHE_BPU = 5,
76 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
79 enum perf_hw_cache_op_id {
80 PERF_COUNT_HW_CACHE_OP_READ = 0,
81 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
82 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
84 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
87 enum perf_hw_cache_op_result_id {
88 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
89 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
91 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
95 * Special "software" events provided by the kernel, even if the hardware
96 * does not support performance events. These events measure various
97 * physical and sw events of the kernel (and allow the profiling of them as
101 PERF_COUNT_SW_CPU_CLOCK = 0,
102 PERF_COUNT_SW_TASK_CLOCK = 1,
103 PERF_COUNT_SW_PAGE_FAULTS = 2,
104 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
105 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
106 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
107 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
108 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
109 PERF_COUNT_SW_EMULATION_FAULTS = 8,
111 PERF_COUNT_SW_MAX, /* non-ABI */
115 * Bits that can be set in attr.sample_type to request information
116 * in the overflow packets.
118 enum perf_event_sample_format {
119 PERF_SAMPLE_IP = 1U << 0,
120 PERF_SAMPLE_TID = 1U << 1,
121 PERF_SAMPLE_TIME = 1U << 2,
122 PERF_SAMPLE_ADDR = 1U << 3,
123 PERF_SAMPLE_READ = 1U << 4,
124 PERF_SAMPLE_CALLCHAIN = 1U << 5,
125 PERF_SAMPLE_ID = 1U << 6,
126 PERF_SAMPLE_CPU = 1U << 7,
127 PERF_SAMPLE_PERIOD = 1U << 8,
128 PERF_SAMPLE_STREAM_ID = 1U << 9,
129 PERF_SAMPLE_RAW = 1U << 10,
131 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
135 * The format of the data returned by read() on a perf event fd,
136 * as specified by attr.read_format:
138 * struct read_format {
140 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
141 * { u64 time_running; } && PERF_FORMAT_RUNNING
142 * { u64 id; } && PERF_FORMAT_ID
143 * } && !PERF_FORMAT_GROUP
146 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
147 * { u64 time_running; } && PERF_FORMAT_RUNNING
149 * { u64 id; } && PERF_FORMAT_ID
151 * } && PERF_FORMAT_GROUP
154 enum perf_event_read_format {
155 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
156 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
157 PERF_FORMAT_ID = 1U << 2,
158 PERF_FORMAT_GROUP = 1U << 3,
160 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
163 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
166 * Hardware event_id to monitor via a performance monitoring event:
168 struct perf_event_attr {
171 * Major type: hardware/software/tracepoint/etc.
176 * Size of the attr structure, for fwd/bwd compat.
181 * Type specific configuration information.
193 __u64 disabled : 1, /* off by default */
194 inherit : 1, /* children inherit it */
195 pinned : 1, /* must always be on PMU */
196 exclusive : 1, /* only group on PMU */
197 exclude_user : 1, /* don't count user */
198 exclude_kernel : 1, /* ditto kernel */
199 exclude_hv : 1, /* ditto hypervisor */
200 exclude_idle : 1, /* don't count when idle */
201 mmap : 1, /* include mmap data */
202 comm : 1, /* include comm data */
203 freq : 1, /* use freq, not period */
204 inherit_stat : 1, /* per task counts */
205 enable_on_exec : 1, /* next exec enables */
206 task : 1, /* trace fork/exit */
207 watermark : 1, /* wakeup_watermark */
211 * 0 - SAMPLE_IP can have arbitrary skid
212 * 1 - SAMPLE_IP must have constant skid
213 * 2 - SAMPLE_IP requested to have 0 skid
214 * 3 - SAMPLE_IP must have 0 skid
216 * See also PERF_RECORD_MISC_EXACT_IP
218 precise_ip : 2, /* skid constraint */
219 mmap_data : 1, /* non-exec mmap data */
224 __u32 wakeup_events; /* wakeup every n events */
225 __u32 wakeup_watermark; /* bytes before wakeup */
234 * Ioctls that can be done on a perf event fd:
236 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
237 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
238 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
239 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
240 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
241 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
242 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
244 enum perf_event_ioc_flags {
245 PERF_IOC_FLAG_GROUP = 1U << 0,
249 * Structure of the page that can be mapped via mmap
251 struct perf_event_mmap_page {
252 __u32 version; /* version number of this structure */
253 __u32 compat_version; /* lowest version this is compat with */
256 * Bits needed to read the hw events in user-space.
266 * count = pmc_read(pc->index - 1);
267 * count += pc->offset;
272 * } while (pc->lock != seq);
274 * NOTE: for obvious reason this only works on self-monitoring
277 __u32 lock; /* seqlock for synchronization */
278 __u32 index; /* hardware event identifier */
279 __s64 offset; /* add to hardware event value */
280 __u64 time_enabled; /* time event active */
281 __u64 time_running; /* time event on cpu */
284 * Hole for extension of the self monitor capabilities
287 __u64 __reserved[123]; /* align to 1k */
290 * Control data for the mmap() data buffer.
292 * User-space reading the @data_head value should issue an rmb(), on
293 * SMP capable platforms, after reading this value -- see
294 * perf_event_wakeup().
296 * When the mapping is PROT_WRITE the @data_tail value should be
297 * written by userspace to reflect the last read data. In this case
298 * the kernel will not over-write unread data.
300 __u64 data_head; /* head in the data section */
301 __u64 data_tail; /* user-space written tail */
304 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
305 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
306 #define PERF_RECORD_MISC_KERNEL (1 << 0)
307 #define PERF_RECORD_MISC_USER (2 << 0)
308 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
309 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
310 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
313 * Indicates that the content of PERF_SAMPLE_IP points to
314 * the actual instruction that triggered the event. See also
315 * perf_event_attr::precise_ip.
317 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
319 * Reserve the last bit to indicate some extended misc field
321 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
323 struct perf_event_header {
329 enum perf_event_type {
332 * The MMAP events record the PROT_EXEC mappings so that we can
333 * correlate userspace IPs to code. They have the following structure:
336 * struct perf_event_header header;
345 PERF_RECORD_MMAP = 1,
349 * struct perf_event_header header;
354 PERF_RECORD_LOST = 2,
358 * struct perf_event_header header;
364 PERF_RECORD_COMM = 3,
368 * struct perf_event_header header;
374 PERF_RECORD_EXIT = 4,
378 * struct perf_event_header header;
384 PERF_RECORD_THROTTLE = 5,
385 PERF_RECORD_UNTHROTTLE = 6,
389 * struct perf_event_header header;
395 PERF_RECORD_FORK = 7,
399 * struct perf_event_header header;
402 * struct read_format values;
405 PERF_RECORD_READ = 8,
409 * struct perf_event_header header;
411 * { u64 ip; } && PERF_SAMPLE_IP
412 * { u32 pid, tid; } && PERF_SAMPLE_TID
413 * { u64 time; } && PERF_SAMPLE_TIME
414 * { u64 addr; } && PERF_SAMPLE_ADDR
415 * { u64 id; } && PERF_SAMPLE_ID
416 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
417 * { u32 cpu, res; } && PERF_SAMPLE_CPU
418 * { u64 period; } && PERF_SAMPLE_PERIOD
420 * { struct read_format values; } && PERF_SAMPLE_READ
423 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
426 * # The RAW record below is opaque data wrt the ABI
428 * # That is, the ABI doesn't make any promises wrt to
429 * # the stability of its content, it may vary depending
430 * # on event, hardware, kernel version and phase of
433 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
437 * char data[size];}&& PERF_SAMPLE_RAW
440 PERF_RECORD_SAMPLE = 9,
442 PERF_RECORD_MAX, /* non-ABI */
445 enum perf_callchain_context {
446 PERF_CONTEXT_HV = (__u64)-32,
447 PERF_CONTEXT_KERNEL = (__u64)-128,
448 PERF_CONTEXT_USER = (__u64)-512,
450 PERF_CONTEXT_GUEST = (__u64)-2048,
451 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
452 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
454 PERF_CONTEXT_MAX = (__u64)-4095,
457 #define PERF_FLAG_FD_NO_GROUP (1U << 0)
458 #define PERF_FLAG_FD_OUTPUT (1U << 1)
462 * Kernel-internal data types and definitions:
465 #ifdef CONFIG_PERF_EVENTS
466 # include <asm/perf_event.h>
467 # include <asm/local64.h>
470 struct perf_guest_info_callbacks {
471 int (*is_in_guest) (void);
472 int (*is_user_mode) (void);
473 unsigned long (*get_guest_ip) (void);
476 #ifdef CONFIG_HAVE_HW_BREAKPOINT
477 #include <asm/hw_breakpoint.h>
480 #include <linux/list.h>
481 #include <linux/mutex.h>
482 #include <linux/rculist.h>
483 #include <linux/rcupdate.h>
484 #include <linux/spinlock.h>
485 #include <linux/hrtimer.h>
486 #include <linux/fs.h>
487 #include <linux/pid_namespace.h>
488 #include <linux/workqueue.h>
489 #include <linux/ftrace.h>
490 #include <linux/cpu.h>
491 #include <linux/irq_work.h>
492 #include <linux/jump_label_ref.h>
493 #include <asm/atomic.h>
494 #include <asm/local.h>
496 #define PERF_MAX_STACK_DEPTH 255
498 struct perf_callchain_entry {
500 __u64 ip[PERF_MAX_STACK_DEPTH];
503 struct perf_raw_record {
508 struct perf_branch_entry {
514 struct perf_branch_stack {
516 struct perf_branch_entry entries[0];
522 * struct hw_perf_event - performance event hardware details:
524 struct hw_perf_event {
525 #ifdef CONFIG_PERF_EVENTS
527 struct { /* hardware */
530 unsigned long config_base;
531 unsigned long event_base;
535 struct { /* software */
536 struct hrtimer hrtimer;
538 #ifdef CONFIG_HAVE_HW_BREAKPOINT
539 struct { /* breakpoint */
540 struct arch_hw_breakpoint info;
541 struct list_head bp_list;
543 * Crufty hack to avoid the chicken and egg
544 * problem hw_breakpoint has with context
545 * creation and event initalization.
547 struct task_struct *bp_target;
552 local64_t prev_count;
555 local64_t period_left;
559 u64 freq_count_stamp;
564 * hw_perf_event::state flags
566 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
567 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
568 #define PERF_HES_ARCH 0x04
573 * Common implementation detail of pmu::{start,commit,cancel}_txn
575 #define PERF_EVENT_TXN 0x1
578 * struct pmu - generic performance monitoring unit
581 struct list_head entry;
583 int * __percpu pmu_disable_count;
584 struct perf_cpu_context * __percpu pmu_cpu_context;
588 * Fully disable/enable this PMU, can be used to protect from the PMI
589 * as well as for lazy/batch writing of the MSRs.
591 void (*pmu_enable) (struct pmu *pmu); /* optional */
592 void (*pmu_disable) (struct pmu *pmu); /* optional */
595 * Try and initialize the event for this PMU.
596 * Should return -ENOENT when the @event doesn't match this PMU.
598 int (*event_init) (struct perf_event *event);
600 #define PERF_EF_START 0x01 /* start the counter when adding */
601 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
602 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
605 * Adds/Removes a counter to/from the PMU, can be done inside
606 * a transaction, see the ->*_txn() methods.
608 int (*add) (struct perf_event *event, int flags);
609 void (*del) (struct perf_event *event, int flags);
612 * Starts/Stops a counter present on the PMU. The PMI handler
613 * should stop the counter when perf_event_overflow() returns
614 * !0. ->start() will be used to continue.
616 void (*start) (struct perf_event *event, int flags);
617 void (*stop) (struct perf_event *event, int flags);
620 * Updates the counter value of the event.
622 void (*read) (struct perf_event *event);
625 * Group events scheduling is treated as a transaction, add
626 * group events as a whole and perform one schedulability test.
627 * If the test fails, roll back the whole group
629 * Start the transaction, after this ->add() doesn't need to
630 * do schedulability tests.
632 void (*start_txn) (struct pmu *pmu); /* optional */
634 * If ->start_txn() disabled the ->add() schedulability test
635 * then ->commit_txn() is required to perform one. On success
636 * the transaction is closed. On error the transaction is kept
637 * open until ->cancel_txn() is called.
639 int (*commit_txn) (struct pmu *pmu); /* optional */
641 * Will cancel the transaction, assumes ->del() is called
642 * for each successfull ->add() during the transaction.
644 void (*cancel_txn) (struct pmu *pmu); /* optional */
648 * enum perf_event_active_state - the states of a event
650 enum perf_event_active_state {
651 PERF_EVENT_STATE_ERROR = -2,
652 PERF_EVENT_STATE_OFF = -1,
653 PERF_EVENT_STATE_INACTIVE = 0,
654 PERF_EVENT_STATE_ACTIVE = 1,
659 #define PERF_BUFFER_WRITABLE 0x01
663 struct rcu_head rcu_head;
664 #ifdef CONFIG_PERF_USE_VMALLOC
665 struct work_struct work;
666 int page_order; /* allocation order */
668 int nr_pages; /* nr of data pages */
669 int writable; /* are we writable */
671 atomic_t poll; /* POLL_ for wakeups */
673 local_t head; /* write position */
674 local_t nest; /* nested writers */
675 local_t events; /* event limit */
676 local_t wakeup; /* wakeup stamp */
677 local_t lost; /* nr records lost */
679 long watermark; /* wakeup watermark */
681 struct perf_event_mmap_page *user_page;
685 struct perf_sample_data;
687 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
688 struct perf_sample_data *,
689 struct pt_regs *regs);
691 enum perf_group_flag {
692 PERF_GROUP_SOFTWARE = 0x1,
695 #define SWEVENT_HLIST_BITS 8
696 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
698 struct swevent_hlist {
699 struct hlist_head heads[SWEVENT_HLIST_SIZE];
700 struct rcu_head rcu_head;
703 #define PERF_ATTACH_CONTEXT 0x01
704 #define PERF_ATTACH_GROUP 0x02
705 #define PERF_ATTACH_TASK 0x04
708 * struct perf_event - performance event kernel representation:
711 #ifdef CONFIG_PERF_EVENTS
712 struct list_head group_entry;
713 struct list_head event_entry;
714 struct list_head sibling_list;
715 struct hlist_node hlist_entry;
718 struct perf_event *group_leader;
721 enum perf_event_active_state state;
722 unsigned int attach_state;
724 atomic64_t child_count;
727 * These are the total time in nanoseconds that the event
728 * has been enabled (i.e. eligible to run, and the task has
729 * been scheduled in, if this is a per-task event)
730 * and running (scheduled onto the CPU), respectively.
732 * They are computed from tstamp_enabled, tstamp_running and
733 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
735 u64 total_time_enabled;
736 u64 total_time_running;
739 * These are timestamps used for computing total_time_enabled
740 * and total_time_running when the event is in INACTIVE or
741 * ACTIVE state, measured in nanoseconds from an arbitrary point
743 * tstamp_enabled: the notional time when the event was enabled
744 * tstamp_running: the notional time when the event was scheduled on
745 * tstamp_stopped: in INACTIVE state, the notional time when the
746 * event was scheduled off.
752 struct perf_event_attr attr;
753 struct hw_perf_event hw;
755 struct perf_event_context *ctx;
759 * These accumulate total time (in nanoseconds) that children
760 * events have been enabled and running, respectively.
762 atomic64_t child_total_time_enabled;
763 atomic64_t child_total_time_running;
766 * Protect attach/detach and child_list:
768 struct mutex child_mutex;
769 struct list_head child_list;
770 struct perf_event *parent;
775 struct list_head owner_entry;
776 struct task_struct *owner;
779 struct mutex mmap_mutex;
782 struct user_struct *mmap_user;
783 struct perf_buffer *buffer;
786 wait_queue_head_t waitq;
787 struct fasync_struct *fasync;
789 /* delayed work for NMIs and such */
793 struct irq_work pending;
795 atomic_t event_limit;
797 void (*destroy)(struct perf_event *);
798 struct rcu_head rcu_head;
800 struct pid_namespace *ns;
803 perf_overflow_handler_t overflow_handler;
805 #ifdef CONFIG_EVENT_TRACING
806 struct ftrace_event_call *tp_event;
807 struct event_filter *filter;
810 #endif /* CONFIG_PERF_EVENTS */
813 enum perf_event_context_type {
819 * struct perf_event_context - event context structure
821 * Used as a container for task events and CPU events as well:
823 struct perf_event_context {
824 enum perf_event_context_type type;
827 * Protect the states of the events in the list,
828 * nr_active, and the list:
832 * Protect the list of events. Locking either mutex or lock
833 * is sufficient to ensure the list doesn't change; to change
834 * the list you need to lock both the mutex and the spinlock.
838 struct list_head pinned_groups;
839 struct list_head flexible_groups;
840 struct list_head event_list;
846 struct task_struct *task;
849 * Context clock, runs when context enabled.
855 * These fields let us detect when two contexts have both
856 * been cloned (inherited) from a common ancestor.
858 struct perf_event_context *parent_ctx;
862 struct rcu_head rcu_head;
866 * Number of contexts where an event can trigger:
867 * task, softirq, hardirq, nmi.
869 #define PERF_NR_CONTEXTS 4
872 * struct perf_event_cpu_context - per cpu event context structure
874 struct perf_cpu_context {
875 struct perf_event_context ctx;
876 struct perf_event_context *task_ctx;
879 struct list_head rotation_list;
880 int jiffies_interval;
883 struct perf_output_handle {
884 struct perf_event *event;
885 struct perf_buffer *buffer;
886 unsigned long wakeup;
894 #ifdef CONFIG_PERF_EVENTS
896 extern int perf_pmu_register(struct pmu *pmu);
897 extern void perf_pmu_unregister(struct pmu *pmu);
899 extern int perf_num_counters(void);
900 extern const char *perf_pmu_name(void);
901 extern void __perf_event_task_sched_in(struct task_struct *task);
902 extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
904 extern atomic_t perf_task_events;
906 static inline void perf_event_task_sched_in(struct task_struct *task)
908 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
912 void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
914 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
917 extern int perf_event_init_task(struct task_struct *child);
918 extern void perf_event_exit_task(struct task_struct *child);
919 extern void perf_event_free_task(struct task_struct *task);
920 extern void perf_event_delayed_put(struct task_struct *task);
921 extern void perf_event_print_debug(void);
922 extern void perf_pmu_disable(struct pmu *pmu);
923 extern void perf_pmu_enable(struct pmu *pmu);
924 extern int perf_event_task_disable(void);
925 extern int perf_event_task_enable(void);
926 extern void perf_event_update_userpage(struct perf_event *event);
927 extern int perf_event_release_kernel(struct perf_event *event);
928 extern struct perf_event *
929 perf_event_create_kernel_counter(struct perf_event_attr *attr,
931 struct task_struct *task,
932 perf_overflow_handler_t callback);
933 extern u64 perf_event_read_value(struct perf_event *event,
934 u64 *enabled, u64 *running);
936 struct perf_sample_data {
953 struct perf_callchain_entry *callchain;
954 struct perf_raw_record *raw;
958 void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
964 extern void perf_output_sample(struct perf_output_handle *handle,
965 struct perf_event_header *header,
966 struct perf_sample_data *data,
967 struct perf_event *event);
968 extern void perf_prepare_sample(struct perf_event_header *header,
969 struct perf_sample_data *data,
970 struct perf_event *event,
971 struct pt_regs *regs);
973 extern int perf_event_overflow(struct perf_event *event, int nmi,
974 struct perf_sample_data *data,
975 struct pt_regs *regs);
978 * Return 1 for a software event, 0 for a hardware event
980 static inline int is_software_event(struct perf_event *event)
982 return event->pmu->task_ctx_nr == perf_sw_context;
985 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
987 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
989 #ifndef perf_arch_fetch_caller_regs
991 perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
995 * Take a snapshot of the regs. Skip ip and frame pointer to
996 * the nth caller. We only need a few of the regs:
997 * - ip for PERF_SAMPLE_IP
998 * - cs for user_mode() tests
999 * - bp for callchains
1000 * - eflags, for future purposes, just in case
1002 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1004 memset(regs, 0, sizeof(*regs));
1006 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1009 static MONO_ALWAYS_INLINE void
1010 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1012 struct pt_regs hot_regs;
1014 JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
1019 perf_fetch_caller_regs(&hot_regs);
1022 __perf_sw_event(event_id, nr, nmi, regs, addr);
1025 extern void perf_event_mmap(struct vm_area_struct *vma);
1026 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1027 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1028 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1030 extern void perf_event_comm(struct task_struct *tsk);
1031 extern void perf_event_fork(struct task_struct *tsk);
1034 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1036 extern void perf_callchain_user(struct perf_callchain_entry *entry,
1037 struct pt_regs *regs);
1038 extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
1039 struct pt_regs *regs);
1043 perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1045 if (entry->nr < PERF_MAX_STACK_DEPTH)
1046 entry->ip[entry->nr++] = ip;
1049 extern int sysctl_perf_event_paranoid;
1050 extern int sysctl_perf_event_mlock;
1051 extern int sysctl_perf_event_sample_rate;
1053 static inline bool perf_paranoid_tracepoint_raw(void)
1055 return sysctl_perf_event_paranoid > -1;
1058 static inline bool perf_paranoid_cpu(void)
1060 return sysctl_perf_event_paranoid > 0;
1063 static inline bool perf_paranoid_kernel(void)
1065 return sysctl_perf_event_paranoid > 1;
1068 extern void perf_event_init(void);
1069 extern void perf_tp_event(u64 addr, u64 count, void *record,
1070 int entry_size, struct pt_regs *regs,
1071 struct hlist_head *head, int rctx);
1072 extern void perf_bp_event(struct perf_event *event, void *data);
1074 #ifndef perf_misc_flags
1075 #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1076 PERF_RECORD_MISC_KERNEL)
1077 #define perf_instruction_pointer(regs) instruction_pointer(regs)
1080 extern int perf_output_begin(struct perf_output_handle *handle,
1081 struct perf_event *event, unsigned int size,
1082 int nmi, int sample);
1083 extern void perf_output_end(struct perf_output_handle *handle);
1084 extern void perf_output_copy(struct perf_output_handle *handle,
1085 const void *buf, unsigned int len);
1086 extern int perf_swevent_get_recursion_context(void);
1087 extern void perf_swevent_put_recursion_context(int rctx);
1088 extern void perf_event_enable(struct perf_event *event);
1089 extern void perf_event_disable(struct perf_event *event);
1090 extern void perf_event_task_tick(void);
1093 perf_event_task_sched_in(struct task_struct *task) { }
1095 perf_event_task_sched_out(struct task_struct *task,
1096 struct task_struct *next) { }
1097 static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1098 static inline void perf_event_exit_task(struct task_struct *child) { }
1099 static inline void perf_event_free_task(struct task_struct *task) { }
1100 static inline void perf_event_delayed_put(struct task_struct *task) { }
1101 static inline void perf_event_print_debug(void) { }
1102 static inline int perf_event_task_disable(void) { return -EINVAL; }
1103 static inline int perf_event_task_enable(void) { return -EINVAL; }
1106 perf_sw_event(u32 event_id, u64 nr, int nmi,
1107 struct pt_regs *regs, u64 addr) { }
1109 perf_bp_event(struct perf_event *event, void *data) { }
1111 static inline int perf_register_guest_info_callbacks
1112 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1113 static inline int perf_unregister_guest_info_callbacks
1114 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1116 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1117 static inline void perf_event_comm(struct task_struct *tsk) { }
1118 static inline void perf_event_fork(struct task_struct *tsk) { }
1119 static inline void perf_event_init(void) { }
1120 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1121 static inline void perf_swevent_put_recursion_context(int rctx) { }
1122 static inline void perf_event_enable(struct perf_event *event) { }
1123 static inline void perf_event_disable(struct perf_event *event) { }
1124 static inline void perf_event_task_tick(void) { }
1127 #define perf_output_put(handle, x) \
1128 perf_output_copy((handle), &(x), sizeof(x))
1131 * This has to have a higher priority than migration_notifier in sched.c.
1133 #define perf_cpu_notifier(fn) \
1135 static struct notifier_block fn##_nb __cpuinitdata = \
1136 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1137 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1138 (void *)(unsigned long)smp_processor_id()); \
1139 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1140 (void *)(unsigned long)smp_processor_id()); \
1141 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1142 (void *)(unsigned long)smp_processor_id()); \
1143 register_cpu_notifier(&fn##_nb); \
1146 #endif /* __KERNEL__ */
1147 #endif /* _LINUX_PERF_EVENT_H */