Kernel: synchronize i386 and ARM clock code
Change-Id: Ie5c4653299e47aaefc9d35a8af491ad0b2eab1ab
This commit is contained in:
parent
6077d1ad24
commit
f12160c14d
@ -3,15 +3,12 @@
|
||||
#include "kernel/kernel.h"
|
||||
|
||||
#include "kernel/clock.h"
|
||||
#include "kernel/proc.h"
|
||||
#include "kernel/interrupt.h"
|
||||
#include <minix/u64.h>
|
||||
#include <minix/board.h>
|
||||
#include "kernel/glo.h"
|
||||
#include "kernel/profile.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <sys/sched.h> /* for CP_*, CPUSTATES */
|
||||
#if CPUSTATES != MINIX_CPUSTATES
|
||||
/* If this breaks, the code in this file may have to be adapted accordingly. */
|
||||
@ -22,6 +19,7 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include "kernel/smp.h"
|
||||
#error CONFIG_SMP is unsupported on ARM
|
||||
#endif
|
||||
|
||||
#include "bsp_timer.h"
|
||||
@ -60,6 +58,10 @@ void arch_timer_int_handler(void)
|
||||
|
||||
void cycles_accounting_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned cpu = cpuid;
|
||||
#endif
|
||||
|
||||
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
||||
|
||||
get_cpu_var(cpu, cpu_last_tsc) = 0;
|
||||
@ -68,22 +70,26 @@ void cycles_accounting_init(void)
|
||||
|
||||
void context_stop(struct proc * p)
|
||||
{
|
||||
u64_t tsc;
|
||||
u32_t tsc_delta;
|
||||
unsigned int counter, tpt;
|
||||
u64_t tsc, tsc_delta;
|
||||
u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
|
||||
unsigned int cpu, tpt, counter;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#error CONFIG_SMP is unsupported on ARM
|
||||
#else
|
||||
read_tsc_64(&tsc);
|
||||
assert(tsc >= *__tsc_ctr_switch);
|
||||
tsc_delta = tsc - *__tsc_ctr_switch;
|
||||
p->p_cycles += tsc_delta;
|
||||
p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
|
||||
cpu = 0;
|
||||
#endif
|
||||
|
||||
if(kbill_ipc) {
|
||||
tsc_delta = tsc - *__tsc_ctr_switch;
|
||||
|
||||
if (kbill_ipc) {
|
||||
kbill_ipc->p_kipc_cycles += tsc_delta;
|
||||
kbill_ipc = NULL;
|
||||
}
|
||||
|
||||
if(kbill_kcall) {
|
||||
if (kbill_kcall) {
|
||||
kbill_kcall->p_kcall_cycles += tsc_delta;
|
||||
kbill_kcall = NULL;
|
||||
}
|
||||
@ -98,7 +104,7 @@ void context_stop(struct proc * p)
|
||||
* the code below is a loop, but the loop will in by far most cases not
|
||||
* be executed more than once, and often be skipped at all.
|
||||
*/
|
||||
tpt = tsc_per_tick[0];
|
||||
tpt = tsc_per_tick[cpu];
|
||||
|
||||
p->p_tick_cycles += tsc_delta;
|
||||
while (tpt > 0 && p->p_tick_cycles >= tpt) {
|
||||
@ -116,7 +122,7 @@ void context_stop(struct proc * p)
|
||||
/*
|
||||
* deduct the just consumed cpu cycles from the cpu time left for this
|
||||
* process during its current quantum. Skip IDLE and other pseudo kernel
|
||||
* tasks, except for accounting purposes.
|
||||
* tasks, except for global accounting purposes.
|
||||
*/
|
||||
if (p->p_endpoint >= 0) {
|
||||
/* On MINIX3, the "system" counter covers system processes. */
|
||||
@ -132,7 +138,9 @@ void context_stop(struct proc * p)
|
||||
#else
|
||||
if (tsc_delta < p->p_cpu_time_left) {
|
||||
p->p_cpu_time_left -= tsc_delta;
|
||||
} else p->p_cpu_time_left = 0;
|
||||
} else {
|
||||
p->p_cpu_time_left = 0;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* On MINIX3, the "interrupts" counter covers the kernel. */
|
||||
@ -140,9 +148,10 @@ void context_stop(struct proc * p)
|
||||
counter = CP_IDLE;
|
||||
else
|
||||
counter = CP_INTR;
|
||||
|
||||
}
|
||||
|
||||
tsc_per_state[cpu][counter] += tsc_delta;
|
||||
|
||||
*__tsc_ctr_switch = tsc;
|
||||
}
|
||||
|
||||
@ -177,7 +186,7 @@ int register_local_timer_handler(const irq_handler_t handler)
|
||||
|
||||
u64_t ms_2_cpu_time(unsigned ms)
|
||||
{
|
||||
return (u64_t)(tsc_per_ms[cpuid]) * ms;
|
||||
return (u64_t)tsc_per_ms[cpuid] * ms;
|
||||
}
|
||||
|
||||
unsigned cpu_time_2_ms(u64_t cpu_time)
|
||||
@ -187,7 +196,40 @@ unsigned cpu_time_2_ms(u64_t cpu_time)
|
||||
|
||||
short cpu_load(void)
|
||||
{
|
||||
return 0;
|
||||
u64_t current_tsc, *current_idle;
|
||||
u64_t tsc_delta, idle_delta, busy;
|
||||
struct proc *idle;
|
||||
short load;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned cpu = cpuid;
|
||||
#endif
|
||||
|
||||
u64_t *last_tsc, *last_idle;
|
||||
|
||||
last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
|
||||
last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
|
||||
|
||||
idle = get_cpu_var_ptr(cpu, idle_proc);;
|
||||
read_tsc_64(¤t_tsc);
|
||||
current_idle = &idle->p_cycles; /* ptr to idle proc */
|
||||
|
||||
/* calculate load since last cpu_load invocation */
|
||||
if (*last_tsc) {
|
||||
tsc_delta = current_tsc - *last_tsc;
|
||||
idle_delta = *current_idle - *last_idle;
|
||||
|
||||
busy = tsc_delta - idle_delta;
|
||||
busy = busy * 100;
|
||||
load = ex64lo(busy / tsc_delta);
|
||||
|
||||
if (load > 100)
|
||||
load = 100;
|
||||
} else
|
||||
load = 0;
|
||||
|
||||
*last_tsc = current_tsc;
|
||||
*last_idle = *current_idle;
|
||||
return load;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -199,6 +241,7 @@ get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
|
||||
{
|
||||
int i;
|
||||
|
||||
/* TODO: make this inter-CPU safe! */
|
||||
for (i = 0; i < CPUSTATES; i++)
|
||||
ticks[i] = tsc_per_state[0][i] / tsc_per_tick[0];
|
||||
ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
/* i386-specific clock functions. */
|
||||
|
||||
#include <machine/ports.h>
|
||||
@ -9,7 +8,7 @@
|
||||
#include "kernel/clock.h"
|
||||
#include "kernel/interrupt.h"
|
||||
#include <minix/u64.h>
|
||||
#include "glo.h"
|
||||
#include "kernel/glo.h"
|
||||
#include "kernel/profile.h"
|
||||
|
||||
#include <sys/sched.h> /* for CP_*, CPUSTATES */
|
||||
@ -207,8 +206,8 @@ void cycles_accounting_init(void)
|
||||
|
||||
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
||||
|
||||
get_cpu_var(cpu, cpu_last_tsc) = 0;
|
||||
get_cpu_var(cpu, cpu_last_idle) = 0;
|
||||
get_cpu_var(cpu, cpu_last_tsc) = 0;
|
||||
get_cpu_var(cpu, cpu_last_idle) = 0;
|
||||
}
|
||||
|
||||
void context_stop(struct proc * p)
|
||||
@ -274,18 +273,16 @@ void context_stop(struct proc * p)
|
||||
p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
|
||||
cpu = 0;
|
||||
#endif
|
||||
|
||||
|
||||
tsc_delta = tsc - *__tsc_ctr_switch;
|
||||
|
||||
if (kbill_ipc) {
|
||||
kbill_ipc->p_kipc_cycles =
|
||||
kbill_ipc->p_kipc_cycles + tsc_delta;
|
||||
kbill_ipc->p_kipc_cycles += tsc_delta;
|
||||
kbill_ipc = NULL;
|
||||
}
|
||||
|
||||
if (kbill_kcall) {
|
||||
kbill_kcall->p_kcall_cycles =
|
||||
kbill_kcall->p_kcall_cycles + tsc_delta;
|
||||
kbill_kcall->p_kcall_cycles += tsc_delta;
|
||||
kbill_kcall = NULL;
|
||||
}
|
||||
|
||||
@ -331,12 +328,9 @@ void context_stop(struct proc * p)
|
||||
#if DEBUG_RACE
|
||||
p->p_cpu_time_left = 0;
|
||||
#else
|
||||
/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
|
||||
if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
|
||||
(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
|
||||
ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
|
||||
p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
|
||||
else {
|
||||
if (tsc_delta < p->p_cpu_time_left) {
|
||||
p->p_cpu_time_left -= tsc_delta;
|
||||
} else {
|
||||
p->p_cpu_time_left = 0;
|
||||
}
|
||||
#endif
|
||||
@ -365,7 +359,7 @@ void context_stop_idle(void)
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned cpu = cpuid;
|
||||
#endif
|
||||
|
||||
|
||||
is_idle = get_cpu_var(cpu, cpu_is_idle);
|
||||
get_cpu_var(cpu, cpu_is_idle) = 0;
|
||||
|
||||
@ -421,7 +415,7 @@ short cpu_load(void)
|
||||
load = 100;
|
||||
} else
|
||||
load = 0;
|
||||
|
||||
|
||||
*last_tsc = current_tsc;
|
||||
*last_idle = *current_idle;
|
||||
return load;
|
||||
|
Loading…
x
Reference in New Issue
Block a user