
This functionality is required for BSD top(1), as exposed through the CTL_KERN KERN_CP_TIME sysctl(2) call. The idea is that the overall time spent in the system is divided into five categories. While NetBSD uses a separate category for the kernel ("system") and interrupts, we redefine "system" to mean userspace system services and "interrupts" to mean time spent in the kernel, thereby providing the same categories as MINIX3's own top(1), while adding the "nice" category which, like on NetBSD, is used for time spent by processes with a priority lowered by the system administrator. Change-Id: I2114148d1e07d9635055ceca7b163f337c53c43a
180 lines
3.5 KiB
C
180 lines
3.5 KiB
C
/* ARM-specific clock functions. */
|
|
|
|
#include "kernel/kernel.h"
|
|
|
|
#include "kernel/clock.h"
|
|
#include "kernel/proc.h"
|
|
#include "kernel/interrupt.h"
|
|
#include <minix/u64.h>
|
|
#include <minix/board.h>
|
|
#include "kernel/glo.h"
|
|
#include "kernel/profile.h"
|
|
|
|
#include <assert.h>
|
|
|
|
#include <sys/sched.h> /* for CP_*, CPUSTATES */
|
|
#if CPUSTATES != MINIX_CPUSTATES
|
|
/* If this breaks, the code in this file may have to be adapted accordingly. */
|
|
#error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
|
|
#endif
|
|
|
|
#include "kernel/spinlock.h"
|
|
|
|
#ifdef CONFIG_SMP
|
|
#include "kernel/smp.h"
|
|
#endif
|
|
|
|
#include "bsp_timer.h"
|
|
#include "bsp_intr.h"
|
|
|
|
static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
|
|
static uint64_t tsc_per_state[CONFIG_MAX_CPUS][CPUSTATES];
|
|
|
|
int init_local_timer(unsigned freq)
|
|
{
|
|
bsp_timer_init(freq);
|
|
|
|
if (BOARD_IS_BBXM(machine.board_id)) {
|
|
tsc_per_ms[0] = 16250;
|
|
} else if (BOARD_IS_BB(machine.board_id)) {
|
|
tsc_per_ms[0] = 15000;
|
|
} else {
|
|
panic("Can not do the clock setup. machine (0x%08x) is unknown\n",machine.board_id);
|
|
};
|
|
|
|
return 0;
|
|
}
|
|
|
|
void stop_local_timer(void)
|
|
{
|
|
bsp_timer_stop();
|
|
}
|
|
|
|
void arch_timer_int_handler(void)
|
|
{
|
|
bsp_timer_int_handler();
|
|
}
|
|
|
|
void cycles_accounting_init(void)
|
|
{
|
|
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
|
|
|
get_cpu_var(cpu, cpu_last_tsc) = 0;
|
|
get_cpu_var(cpu, cpu_last_idle) = 0;
|
|
}
|
|
|
|
void context_stop(struct proc * p)
|
|
{
|
|
u64_t tsc;
|
|
u32_t tsc_delta;
|
|
unsigned int counter;
|
|
u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
|
|
|
|
read_tsc_64(&tsc);
|
|
assert(tsc >= *__tsc_ctr_switch);
|
|
tsc_delta = tsc - *__tsc_ctr_switch;
|
|
p->p_cycles += tsc_delta;
|
|
|
|
if(kbill_ipc) {
|
|
kbill_ipc->p_kipc_cycles += tsc_delta;
|
|
kbill_ipc = NULL;
|
|
}
|
|
|
|
if(kbill_kcall) {
|
|
kbill_kcall->p_kcall_cycles += tsc_delta;
|
|
kbill_kcall = NULL;
|
|
}
|
|
|
|
/*
|
|
* deduct the just consumed cpu cycles from the cpu time left for this
|
|
* process during its current quantum. Skip IDLE and other pseudo kernel
|
|
* tasks, except for accounting purposes.
|
|
*/
|
|
if (p->p_endpoint >= 0) {
|
|
/* On MINIX3, the "system" counter covers system processes. */
|
|
if (p->p_priv != priv_addr(USER_PRIV_ID))
|
|
counter = CP_SYS;
|
|
else if (p->p_misc_flags & MF_NICED)
|
|
counter = CP_NICE;
|
|
else
|
|
counter = CP_USER;
|
|
|
|
#if DEBUG_RACE
|
|
p->p_cpu_time_left = 0;
|
|
#else
|
|
if (tsc_delta < p->p_cpu_time_left) {
|
|
p->p_cpu_time_left -= tsc_delta;
|
|
} else p->p_cpu_time_left = 0;
|
|
#endif
|
|
} else {
|
|
/* On MINIX3, the "interrupts" counter covers the kernel. */
|
|
if (p->p_endpoint == IDLE)
|
|
counter = CP_IDLE;
|
|
else
|
|
counter = CP_INTR;
|
|
|
|
}
|
|
|
|
*__tsc_ctr_switch = tsc;
|
|
}
|
|
|
|
void context_stop_idle(void)
|
|
{
|
|
int is_idle;
|
|
#ifdef CONFIG_SMP
|
|
unsigned cpu = cpuid;
|
|
#endif
|
|
|
|
is_idle = get_cpu_var(cpu, cpu_is_idle);
|
|
get_cpu_var(cpu, cpu_is_idle) = 0;
|
|
|
|
context_stop(get_cpulocal_var_ptr(idle_proc));
|
|
|
|
if (is_idle)
|
|
restart_local_timer();
|
|
#if SPROFILE
|
|
if (sprofiling)
|
|
get_cpulocal_var(idle_interrupted) = 1;
|
|
#endif
|
|
}
|
|
|
|
void restart_local_timer(void)
|
|
{
|
|
}
|
|
|
|
int register_local_timer_handler(const irq_handler_t handler)
|
|
{
|
|
return bsp_register_timer_handler(handler);
|
|
}
|
|
|
|
u64_t ms_2_cpu_time(unsigned ms)
|
|
{
|
|
return (u64_t)(tsc_per_ms[cpuid]) * ms;
|
|
}
|
|
|
|
unsigned cpu_time_2_ms(u64_t cpu_time)
|
|
{
|
|
return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
|
|
}
|
|
|
|
short cpu_load(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Return the number of clock ticks spent in each of a predefined number of
|
|
* CPU states.
|
|
*/
|
|
void
|
|
get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
|
|
{
|
|
unsigned int tsc_per_tick;
|
|
int i;
|
|
|
|
tsc_per_tick = tsc_per_ms[0] * 1000 / system_hz;
|
|
|
|
for (i = 0; i < CPUSTATES; i++)
|
|
ticks[i] = tsc_per_state[0][i] / tsc_per_tick;
|
|
}
|