1
0
Files
irix-657m-src/irix/kern/sys/rtmon.h
2022-09-29 17:59:04 +03:00

544 lines
20 KiB
C

/**************************************************************************
* *
* Copyright (C) 1995, Silicon Graphics, Inc. *
* *
* These coded instructions, statements, and computer programs contain *
* unpublished proprietary information of Silicon Graphics, Inc., and *
* are protected by Federal copyright law. They may not be disclosed *
* to third parties or copied or duplicated in any form, in whole or *
* in part, without the prior written consent of Silicon Graphics, Inc. *
* *
**************************************************************************/
#ifndef __SYS_RTMON_H__
#define __SYS_RTMON_H__
#include <sys/types.h>
/* include files for wind river prorgams */
#include <sys/wr_eventP.h>
/*
* Event classes for the LOG_TSTAMP_EVENT() macro:
*
* If you add a new event class you'll need to add corresponding code in XXX
* other places in order to complete the addition:
*
* 1. Add code to rtmond's event_to_class() function to translate events
* belonging to the new event class to the new class.
* 2. Add code to rtmon-dump/rtmon-client in util.c to allow the new event
* class to be selected. If the new event class is to be part of par's
* event classes, you should also add the new event class to the RTMON_PAR
* in util.h.
* 3. You should think about adding code to par/padc to allow the new event
* class to be selected.
* 4. Add code to librtmon print.c to support selecting reporting events
* associated with the new event class.
* 5. Similar changes for items number 2-4 above will need to be done for
* IRIXview.
*/
#define RTMON_DEBUG 0x0000000000000001LL
#define RTMON_SIGNAL 0x0000000000000002LL /* signal handling */
#define RTMON_SYSCALL 0x0000000000000004LL /* trace system calls */
#define RTMON_XXX 0x0000000000000008LL /* was rescheds */
#define RTMON_TASK 0x0000000000000010LL /* task state changes */
#define RTMON_INTR 0x0000000000000020LL /* trace interrupts */
#define RTMON_FRAMESCHED 0x0000000000000040LL
#define RTMON_PROFILE 0x0000000000000080LL /* profile trace */
#define RTMON_VM 0x0000000000000100LL /* VM event tracing */
#define RTMON_SCHEDULER 0x0000000000000120LL /* Scheduler Event Tracing */
#define RTMON_DISK 0x0000000000000200LL /* disk i/o tracing */
#define RTMON_NETSCHED 0x0000000000000400LL /* network scheduling */
#define RTMON_NETFLOW 0x0000000000000800LL /* network data flow */
#define RTMON_ALLOC 0x0000000000001000LL /* mem. alloc/free */
/*
* RTMON_TASKPROC is a subset of RTMON_TASK. RTMON_TASK applies to all
* execution vehicles (sthread, xthreads and uthreads) while the event
* tracing mode is active. RTMON_TASKPROC only applies to uthreads of
* processes which have been specially marked (SPARSWTCH). Note that we
* could have implemented this as a more general ``process-based event
* filter'' where only events being generated by marked processes would be
* logged but we really only care about filtering system call and context
* switch events and system calls are already implicitly filtered.
* Developing a more general scheme would have required modifying every
* event log point or complicating the event logging routine.
*/
#define RTMON_TASKPROC 0x0000000000002000LL /* process task state changes */
/*
* The following aren't really event masks per se but are really client flags
* to rtmond to parameterize the client/server connection.
*/
#define RTMON_NOTASKNAMES 0x8000000000000000LL /* don't synthesize task names */
#define RTMON_ALL 0xffffffffffffffffLL
/*
* The following event classes include per-process information.
* When any of these classes are enabled for tracing the kernel
* also emits events when a process forks, execs, or exits.
*/
#define RTMON_PIDAWARE (RTMON_SIGNAL|RTMON_SYSCALL|RTMON_TASK|RTMON_TASKPROC)
#ifdef _KERNEL
/*
* This macro checks to see if we are looking to log time stamp events on a
* cpu before we call log_tstamp_event to avoid the procedure call overhead.
*/
#define IS_TSTAMP_EVENT_ACTIVE(class) \
(private.p_tstamp_mask & class)
extern void log_tstamp_event(event_t evt, int64_t, int64_t, int64_t, int64_t);
static __inline void
LOG_TSTAMP_EVENT(uint64_t class, event_t evt,
uint64_t qual0, uint64_t qual1,
uint64_t qual2, uint64_t qual3)
{
if (IS_TSTAMP_EVENT_ACTIVE(class)) {
#pragma mips_frequency_hint NEVER
log_tstamp_event(evt, qual0, qual1, qual2, qual3);
}
}
extern void log_tstamp_vevent(event_t evt, const void*, uint);
extern int tstamp_user_create(int cpu, int nentries, paddr_t* buffer_paddr);
extern int tstamp_user_delete(int cpu);
extern int tstamp_user_addr(int cpu, paddr_t* buffer_paddr);
extern int tstamp_user_mask(int cpu, uint64_t mask, uint64_t* omask);
extern int tstamp_user_eob_mode(int cpu, uint mode, uint* omode);
extern int tstamp_user_wait(int cpu, int tout);
extern int tstamp_user_update(int cpu, int nread);
#endif /* _KERNEL */
/*
* Kernel interfaces for REACT timestamps.
*/
/*
* stucture of user/kernel timestamps
*/
#define TSTAMP_NUM_QUALS 4
typedef struct tstamp_event_entry {
event_t evt;
ushort cpu;
ushort jumbocnt; /* count of following events that
* are part of this event
*/
__int64_t tstamp;
__uint64_t qual[TSTAMP_NUM_QUALS];
} tstamp_event_entry_t;
#if defined(_KERNEL) || defined(_KMEMUSER)
/*
* System call event data format.
*
* System call events are variable length. The call number and
* parameters follow the tstamp field in a normal event record.
* The fixed block of info is followed by a variable length set
* of parameters and, possibly, indirect parameters. The numparams
* field has the count of params in the top 4 bits and the count
* of indirect parameters in the low 4 bits. Parameters that
* follow come as records of the form:
* descriptor (short)
* length (short)
* data value ..
*
* The amount of data returned for indirect parameters (like
* read/write) is restricted. An ioctl is available to manipulate
* the amount of indirect data that may be returned; within a
* fixed range.
*/
typedef struct {
__int64_t k_id; /* kthread ID */
__int64_t cookie; /* distribution cookie */
pid_t pid; /* process ID */
unchar abi; /* execution ABI */
unchar pad1;
ushort callno; /* system call number */
unchar numparams; /* # of direct params that follow */
unchar numiparams; /* # of indirect params that follow */
uint pad2; /* ...to 64-bit boundary */
usysarg_t params[1]; /* more of these ... */
} tstamp_event_syscall_data_t;
/*
* Thread reschedule event w/ wait channel.
*/
typedef struct {
__int64_t k_id; /* kthread ID */
__int64_t stuff; /* packed priority and flags */
__int64_t wchan; /* wait channel */
__int64_t wchan2; /* wait channel2 */
char wchanname[16]; /* wait channel name */
} tstamp_event_resched_data_t;
/*
* Process fork event w/ process name.
*/
typedef struct {
__int64_t pkid; /* parent kthread ID */
__int64_t ckid; /* child kthread ID */
char name[16]; /* parent/child process name */
} tstamp_event_fork_data_t;
/*
* Process exec event w/ process name.
*/
typedef struct {
__int64_t k_id; /* kthread ID */
pid_t pid; /* Process ID */
char name[20]; /* parent/child process name */
} tstamp_event_exec_data_t;
#endif /* _KERNEL || _KMEMUSER */
typedef struct tstamp_shared_state {
int lost_counter; /* count lost timestamps */
int tstamp_counter; /* outstanding timestamps */
int curr_index; /* current fifo buffer ins idx */
} tstamp_shared_state_t;
#define NUMB_USER_TSTAMPS 2048
#define NUMB_KERNEL_TSTAMPS 2048
typedef struct {
tstamp_shared_state_t state;
uint water_mark;
uint nentries;
/* all entries will have their evt cleared to 0 at initialization and
* after the merging daemon reads them, only non-0 events will be
* considered valid, evt 0 is the control evt BEGIN and will only
* occur once and never put put into either the kernel or user queue
* so this is fine
*/
tstamp_event_entry_t tstamp_event_entry[NUMB_USER_TSTAMPS];
uint enabled; /* if timestamps are being entered */
} user_queue_t;
typedef struct {
tstamp_event_entry_t *tstamp_event_entry;
tstamp_shared_state_t *state;
} kern_queue_t;
/*
* Translate between rtmon-style event numbers and utrace-style event names
*/
typedef struct utr_trtbl_s {
unsigned int evt;
char *name;
} utrace_trtbl_t;
#define TSTAMP_SHARED_STATE_LEN 256
#define RT_TSTAMP_EOB_STOP 0
#define RT_TSTAMP_EOB_WRAP 1
#define RT_TSTAMP_MAX_ENTRIES (32*1024)
#if !defined(_KERNEL)
/*
* The following is the same size as the tstamp_event_entry for ease
* of reading and writing
*/
typedef struct tstamp_config_event {
event_t evt; /* TSTAMP_EV_CONFIG */
ushort cpu;
ushort jumbocnt;
__int64_t tstamp; /* start of trace */
__int32_t revision; /* protocol revision */
ushort cputype; /* type of cpu */
ushort cpufreq; /* speed of the cpu */
__int64_t eventmask; /* mask of events traced */
__uint32_t tstampfreq; /* tstamp ticks per sec */
unchar kabi; /* kernel ABI */
unchar spare1;
ushort spare2;
__int64_t spare[TSTAMP_NUM_QUALS-3];
} tstamp_config_event_t;
#define TSTAMP_REVISION 19970320 /* protocol revision: March 20, 1997 */
#define TSTAMP_CPUTYPE_MIPS 40 /* CPU_FAMILY */
#define TSTAMP_CPUTYPE_R3000 41 /* CPU */
/* #define TSTAMP_CPUTYPE_R33000 42 ** reserved */
/* #define TSTAMP_CPUTYPE_R33020 43 ** reserved */
#define TSTAMP_CPUTYPE_R4000 44 /* CPU */
#define TSTAMP_CPUTYPE_R4300 45 /* CPU */
#define TSTAMP_CPUTYPE_R4400 46 /* CPU */
#define TSTAMP_CPUTYPE_R4600 47 /* CPU */
#define TSTAMP_CPUTYPE_R8000 48 /* CPU */
#define TSTAMP_CPUTYPE_R10000 49 /* CPU */
#define TSTAMP_CPUTYPE_R5000 50 /* CPU */
#define TSTAMP_CPUTYPE_R12000 51 /* CPU */
#define TSTAMP_CPUTYPE_R4650 52 /* CPU */
#define TSTAMP_CPUTYPE_R4700 53 /* CPU */
#define TSTAMP_CPUTYPE_R6000 54 /* CPU */
/*
* Task name message for mapping PID to string.
*/
typedef struct {
event_t evt; /* TSTAMP_EV_TASKNAME */
ushort cpu;
ushort jumbocnt;
__int64_t tstamp;
__int64_t k_id;
__int32_t pid; /* process ID */
char name[20];
} tstamp_taskname_event_t;
#endif /* !_KERNEL */
/*
* All kernel user events are assumed to have all 6 qualifiers filled
*
*/
#define MIN_KERNEL_ID 60000
#define KERNEL_EVENT(id) (MIN_KERNEL_ID + id)
#define IS_KERNEL_USER_EVENT(event) \
((event >= MIN_KERNEL_ID) && (event <= MAX_USER_ID))
#define UNDEFINED_TSTAMP_SYS KERNEL_EVENT(0) /* currently unused */
#define UNDEFINED_TSTAMP_DAEMON KERNEL_EVENT(1)
/* pack kthread priority info and swtch flags into a 64-bit qualifier */
#define RTMON_PACKPRI(kt,flags) \
(((__int64_t)(flags)<<32)|(((kt)->k_basepri<<16)&0xffff0000LL)|((kt)->k_pri&0xffffLL))
/*
* Event types for the LOG_TSTAMP_EVENT() macro:
*
* If you add a new event type you'll need to add corresponding code in three
* other places in order to complete the addition:
*
* 1. Add an entry to the UTRACE event type name translation table,
* utrace_trtbl[], in os/tstamp.c.
* 2. Add code to rtmond's event_to_class() function to translate the new
* event type to the event class that it is emitted under.
* 3. Add code to print out this new event type in librtmon in print.c.
* 4. Similar changes for item number 3 above will need to be done for
* IRIXview.
*/
#define TSTAMP_EV_INTRENTRY EVENT_INT_ENT(0)
#define TSTAMP_EV_INTREXIT EVENT_INT_EXIT
#define TSTAMP_EV_EVINTRENTRY EVENT_INT_ENT(1)
#define TSTAMP_EV_EVINTREXIT EVENT_INT_EXIT
#define TSTAMP_EV_EODISP KERNEL_EVENT(2)
#define TSTAMP_EV_EOSWITCH EVENT_WIND_EXIT_DISPATCH
#define TSTAMP_EV_EOSWITCH_RTPRI EVENT_WIND_EXIT_DISPATCH_PI
#define TSTAMP_EV_START_OF_MAJOR KERNEL_EVENT(16)
#define TSTAMP_EV_START_OF_MINOR KERNEL_EVENT(17)
#define TSTAMP_EV_ULI EVENT_INT_ENT(4)
#define TSTAMP_EV_YIELD EVENT_INT_ENT(5)
#define TSTAMP_EV_CPUCOUNTER_INTR EVENT_INT_ENT(6)
#define TSTAMP_EV_RTCCOUNTER_INTR EVENT_INT_ENT(7)
#define TSTAMP_EV_TIMEOUT EVENT_WINDTICKTIMEOUT /* XXX unused */
#define TSTAMP_EV_PROFCOUNTER_INTR EVENT_INT_ENT(8)
#define TSTAMP_EV_GROUP_INTR EVENT_INT_ENT(9)
#define TSTAMP_EV_EOPRESUME LEVEL1_EVENT(5)
#define TSTAMP_EV_YIELD2 KERNEL_EVENT(3)
#define TSTAMP_EV_CPUINTR EVENT_INT_ENT(11)
#define TSTAMP_EV_NETINTR EVENT_INT_ENT(12)
#define TSTAMP_EV_VSYNC_INTR EVENT_INT_ENT(13)
#define TSTAMP_EV_XRUN KERNEL_EVENT(4)
#define TSTAMP_EV_INTRQUAL KERNEL_EVENT(5)
#define TSTAMP_EV_SIGSEND EVENT_KILL
#define TSTAMP_EV_SIGRECV EVENT_SIGNAL
#define TSTAMP_EV_EXIT EVENT_WINDDELETE
#define TSTAMP_EV_TIMEIN KERNEL_EVENT(6)
#define TSTAMP_EV_QUEUERUN KERNEL_EVENT(7)
#define TSTAMP_EV_PROF_STACK32 KERNEL_EVENT(18)
#define TSTAMP_EV_PROF_STACK64 KERNEL_EVENT(19)
#define TSTAMP_EV_JOB_MIGRATION KERNEL_EVENT(20)
#define TSTAMP_EV_PROC_NAME KERNEL_EVENT(21)
#define TSTAMP_EV_SYSCALL_BEGIN KERNEL_EVENT(22)
#define TSTAMP_EV_SYSCALL_END KERNEL_EVENT(23)
#define TSTAMP_EV_EXEC KERNEL_EVENT(24)
#define TSTAMP_EV_CONFIG KERNEL_EVENT(25)
#define TSTAMP_EV_TASKNAME KERNEL_EVENT(26)
#define TSTAMP_EV_FORK KERNEL_EVENT(27)
#define TSTAMP_EV_ALLOC KERNEL_EVENT(28)
#define START_KERN_FUNCTION KERNEL_EVENT(1000)
#define END_KERN_FUNCTION KERNEL_EVENT(1001)
#define TSTAMP_EV_SORECORD KERNEL_EVENT(2000)
/*
* VM events - reserved KERNEL_EVENT(100) through KERNEL_EVENT(199)
*/
#ifndef VM_TSTAMP_OFF
#define VM_LOG_TSTAMP_EVENT(evt, qual0, qual1, qual2, qual3) \
LOG_TSTAMP_EVENT(RTMON_VM, evt, (__int64_t)qual0, (__int64_t)qual1, \
(__int64_t)qual2, (__int64_t)qual3)
#else /* !VM_TSTAMP_OFF */
#define VM_LOG_TSTAMP_EVENT(evt, qual0, qual1, qual2, qual3) ;
#endif /* !VM_TSTAMP_OFF */
#define VM_EVENT_TFAULT_ENTRY KERNEL_EVENT(100)
#define VM_EVENT_TFAULT_EXIT KERNEL_EVENT(101)
#define VM_EVENT_PFAULT_ENTRY KERNEL_EVENT(102)
#define VM_EVENT_PFAULT_EXIT KERNEL_EVENT(103)
#define VM_EVENT_PFAULT_RLACQ KERNEL_EVENT(104)
#define VM_EVENT_PFAULT_NOTHV KERNEL_EVENT(105)
#define VM_EVENT_PFAULT_ISMOD KERNEL_EVENT(106)
#define VM_EVENT_PFAULT_STARTF KERNEL_EVENT(107)
#define VM_EVENT_PFAULT_NOTCW KERNEL_EVENT(108)
#define VM_EVENT_PFAULT_CW KERNEL_EVENT(109)
#define VM_EVENT_VFAULT_ENTRY KERNEL_EVENT(110)
#define VM_EVENT_VFAULT_EXIT KERNEL_EVENT(111)
#define VM_EVENT_VFAULT_DFILLSTART KERNEL_EVENT(112)
#define VM_EVENT_VFAULT_DFILLEND KERNEL_EVENT(113)
#define VM_EVENT_VFAULT_RLACQ KERNEL_EVENT(114)
#define VM_EVENT_MIGR_PLIST_ENTRY KERNEL_EVENT(115)
#define VM_EVENT_MIGR_PLIST_EXIT KERNEL_EVENT(116)
#define VM_EVENT_MIGR_PLIST_FAIL KERNEL_EVENT(117)
#define VM_EVENT_MIGR_PMOVE_ENTRY KERNEL_EVENT(118)
#define VM_EVENT_MIGR_PMOVE_EXIT KERNEL_EVENT(119)
#define VM_EVENT_MIGR_PAGE_ENTRY KERNEL_EVENT(120)
#define VM_EVENT_MIGR_PAGE_EXIT KERNEL_EVENT(121)
#define VM_EVENT_MIGR_PAGE_FAIL KERNEL_EVENT(122)
#define VM_EVENT_MIGR_FRAME_ENTRY KERNEL_EVENT(123)
#define VM_EVENT_MIGR_FRAME_TLBSTART KERNEL_EVENT(124)
#define VM_EVENT_MIGR_FRAME_TLBEND KERNEL_EVENT(125)
#define VM_EVENT_MIGR_FRAME_CACHESTART KERNEL_EVENT(126)
#define VM_EVENT_MIGR_FRAME_CACHEEND KERNEL_EVENT(127)
#define VM_EVENT_MIGR_FRAME_EXIT KERNEL_EVENT(128)
#define VM_EVENT_VFAULT_ANONINS KERNEL_EVENT(129)
#define VM_EVENT_VFAULT_ADDMAP_START KERNEL_EVENT(130)
#define VM_EVENT_VFAULT_ADDMAP_END KERNEL_EVENT(131)
#define VM_EVENT_VFAULT_DROPIN KERNEL_EVENT(132)
/*
* Scheduler event - reserved KERNEL_EVENT(200) through KERNEL_EVENT(299)
*/
#define SCHED_EVENT_SAME_JOB KERNEL_EVENT(200)
#define SCHED_EVENT_DIFFERENT_JOB KERNEL_EVENT(201)
#define SCHED_EVENT_LOAD_BALANCE KERNEL_EVENT(202)
#define SCHED_EVENT_VQFACT_SET KERNEL_EVENT(203)
#define SCHED_EVENT_BAD_DATA KERNEL_EVENT(204)
/*
* Disk i/o event - reserved KERNEL_EVENT(300) through KERNEL_EVENT(399)
*/
#define DISK_EVENT_BASE KERNEL_EVENT(300)
#define DISK_EVENT_QUEUED (DISK_EVENT_BASE+0)
#define DISK_EVENT_START (DISK_EVENT_BASE+1)
#define DISK_EVENT_DONE (DISK_EVENT_BASE+2)
/*
* Network events - reserved KERNEL_EVENT(400) through KERNEL_EVENT(499)
*/
#define NET_EVENT_BASE KERNEL_EVENT(400)
#define NET_EVENT_NEW (NET_EVENT_BASE+0)
#define NET_EVENT_SLEEP (NET_EVENT_BASE+1)
#define NET_EVENT_WAKEUP (NET_EVENT_BASE+2)
#define NET_EVENT_WAKING (NET_EVENT_BASE+3)
#define NET_EVENT_FLOW (NET_EVENT_BASE+4)
#define NET_EVENT_DROP (NET_EVENT_BASE+5)
#define NET_EVENT_EVENT_DONE (NET_EVENT_BASE+6)
/* pack network subtoken, reasons, and cpu id into a 64-bit qualifier */
#define RTMON_PACKNET(s,r) \
(((__int64_t)(s)<<48)|((__int64_t)(r)<<32)|cpuid())
/*
* Over/Underrun Recovery Timestamps.
* DETECTED_UNDERRUN and DETECTED OVERRUN are logged whenever
* the system detects an under/overrun, whether the process in
* the current context is ignoring them or not. The current number
* of over/underruns is returned in qual2.
* NORECOVERY, INJECTFRAME, EFRAME_STRETCH, and EFRAME_STEAL
* identify the kind of recovery that's taking place for a
* process not ignoring under/overruns.
* TOOMANY is logged when the recovery procedure has detected
* a nukber of consecutive errors greater than a max set by
* the user.
*/
#define TSTAMP_EV_DETECTED_UNDERRUN KERNEL_EVENT(8) /* qual2: # underruns */
#define TSTAMP_EV_DETECTED_OVERRUN KERNEL_EVENT(9) /* qual2: # overruns */
#define TSTAMP_EV_RECV_NORECOVERY KERNEL_EVENT(10)
#define TSTAMP_EV_RECV_INJECTFRAME KERNEL_EVENT(11)
#define TSTAMP_EV_RECV_EFRAME_STRETCH KERNEL_EVENT(12)
#define TSTAMP_EV_RECV_EFRAME_STEAL KERNEL_EVENT(13)
#define TSTAMP_EV_RECV_TOOMANY KERNEL_EVENT(14)
/* we'll use TSTAMP_EV_LOST_TSTAMP to have the daemon indicate that
* it noticed that there were timestamps that were dropped */
#define TSTAMP_EV_LOST_TSTAMP KERNEL_EVENT(15)
/* when instrumenting applications we will want to add more
we should do so off of this base*/
#define USER_EVENT_BASE MIN_USER_ID
/*
* UTRACE stuff.
*/
/*
* UTRACE()s have a special event code, so icrash/idbg can decode them
* and rtmond can ignore them. Also, in wrap mode, "jumbo" events are
* stored as multiple events, with all but the first labled "JUMBO_EVENT"
*/
#define UTRACE_EVENT KERNEL_EVENT(3000)
#define JUMBO_EVENT KERNEL_EVENT(3001)
#ifdef _KERNEL
extern int utrace_bufsize;
extern long long utrace_mask;
/* Pack two 32-bit numbers into 64 */
#define UTPACK(hi,lo) ((__uint64_t)(hi)<<32 | (__uint32_t)(lo))
#define UTN(hi,lo) ((__uint64_t)(hi)<<32 | (__uint64_t)(lo))
static __inline void
UTRACE(uint64_t class, uint64_t evt, uint64_t qual1, uint64_t qual2)
{
if (IS_TSTAMP_EVENT_ACTIVE(class)) {
#pragma mips_frequency_hint NEVER
log_tstamp_event(UTRACE_EVENT, qual1, qual2, evt, 0);
}
}
static __inline void
utrace(uint64_t evt, uint64_t qual1, uint64_t qual2)
{
log_tstamp_event(UTRACE_EVENT, qual1, qual2, evt, 0);
}
#endif /* _KERNEL */
/*
* These are the flags to the EVENT_TASK_STATECHANGE event
*/
#define WIND_READY 0x00 /* ready to run */
#define WIND_SUSPEND 0x01 /* explicitly suspended */
#define WIND_PEND 0x02 /* pending on semaphore */
#define WIND_DELAY 0x04 /* task delay (or timeout) */
#define WIND_DEAD 0x08 /* dead task */
#define WIND_LOCKED 0x10 /* locked task */
#define WIND_INHERITED 0x20 /* priority inherited task */
#define WIND_RUNNING 0x40 /* running */
#if !defined(_KERNEL)
/*
* These don't belong here, but are present in lieu of shipping <rtmon.h>.
*/
#if defined(__cplusplus)
extern "C" {
#endif
extern void rtmon_log_user_tstamp(event_t /*event*/,
unsigned long long /*arg0*/, unsigned long long /*arg1*/,
unsigned long long /*arg2*/, unsigned long long /*arg3*/);
/* XXX these don't belong here and shouldn't be exported by the library */
extern int atomicIncWithWrap(int */*addr*/, int /*mod_val*/);
extern int atomicInc(volatile int */*addr*/);
extern int atomicDec(volatile int */*addr*/);
#if defined(__cplusplus)
}
#endif
#endif /* !_KERNEL */
#endif /* __SYS_RTMON_H__ */