mirror of
https://github.com/vlang/v.git
synced 2025-09-15 18:36:37 -04:00
tools: add v repeat 'sleep 1' 'sleep 2'
(rename repeat.v to vrepeat.v), improve its output, add its flags to vcomplete.v (#20114)
This commit is contained in:
parent
c121f56ffa
commit
e2b75bde9e
6
.github/workflows/other_ci.yml
vendored
6
.github/workflows/other_ci.yml
vendored
@ -59,11 +59,11 @@ jobs:
|
||||
- name: V versions
|
||||
run: ./v version && ./vmaster/v version
|
||||
- name: Build the repeat tool
|
||||
run: ./v cmd/tools/repeat.v
|
||||
run: ./v cmd/tools/vrepeat.v
|
||||
- name: Repeat -o hw.c examples/hello_world.v
|
||||
run: cmd/tools/repeat --max_time 251 --series 3 --count 20 --nmins 2 --nmaxs 5 --warmup 3 --fail_percent 10 -t 'cd {T} ; ./v -show-timings -o hw.c examples/hello_world.v' . ./vmaster
|
||||
run: ./v repeat --max_time 251 --series 3 --runs 20 --nmins 2 --nmaxs 5 --warmup 3 --fail_percent 10 -t 'cd {T} ; ./v -o hw.c examples/hello_world.v' . ./vmaster
|
||||
- name: Repeat -o v.c cmd/v
|
||||
run: cmd/tools/repeat --max_time 1731 --series 3 --count 20 --nmins 2 --nmaxs 5 --warmup 3 --fail_percent 10 -t 'cd {T} ; ./v -show-timings -o v.c cmd/v' . ./vmaster
|
||||
run: ./v repeat --max_time 1731 --series 3 --runs 20 --nmins 2 --nmaxs 5 --warmup 3 --fail_percent 10 -t 'cd {T} ; ./v -o v.c cmd/v' . ./vmaster
|
||||
|
||||
misc-tooling:
|
||||
runs-on: ubuntu-20.04
|
||||
|
@ -95,6 +95,7 @@ const auto_complete_commands = [
|
||||
'gret',
|
||||
'ls',
|
||||
'repl',
|
||||
'repeat',
|
||||
'self',
|
||||
'setup-freetype',
|
||||
'shader',
|
||||
@ -271,6 +272,34 @@ const auto_complete_flags_bump = [
|
||||
const auto_complete_flags_self = [
|
||||
'-prod',
|
||||
]
|
||||
const auto_complete_flags_repeat = [
|
||||
'--help',
|
||||
'-h',
|
||||
'--runs',
|
||||
'-r',
|
||||
'--series',
|
||||
'-s',
|
||||
'--warmup',
|
||||
'-w',
|
||||
'--newline',
|
||||
'-n',
|
||||
'--output',
|
||||
'-O',
|
||||
'--max_time',
|
||||
'-m',
|
||||
'--fail_percent',
|
||||
'-f',
|
||||
'--template',
|
||||
'-t',
|
||||
'--parameter',
|
||||
'-p',
|
||||
'--nmins',
|
||||
'-i',
|
||||
'--nmaxs',
|
||||
'-a',
|
||||
'--ignore',
|
||||
'-e',
|
||||
]
|
||||
const auto_complete_compilers = [
|
||||
'cc',
|
||||
'gcc',
|
||||
@ -444,6 +473,9 @@ fn auto_complete_request(args []string) []string {
|
||||
'bump' { // 'v bump -<tab>' -> flags.
|
||||
list = get_flags(auto_complete_flags_bump, part)
|
||||
}
|
||||
'repeat' { // 'v repeat -<tab>' -> flags.
|
||||
list = get_flags(auto_complete_flags_repeat, part)
|
||||
}
|
||||
else {
|
||||
for flag in auto_complete_flags {
|
||||
if flag == part {
|
||||
|
@ -5,7 +5,22 @@ import flag
|
||||
import time
|
||||
import term
|
||||
import math
|
||||
import scripting
|
||||
|
||||
const max_fail_percent = 100 * 1000
|
||||
const max_time = 60 * 1000 // ms
|
||||
|
||||
const performance_regression_label = 'Performance regression detected, failing since '
|
||||
|
||||
struct Aints {
|
||||
values []i64 // uS
|
||||
mut:
|
||||
imin i64 // uS
|
||||
imax i64 // uS
|
||||
average f64
|
||||
stddev f64
|
||||
nmins int // number of discarded fastest results
|
||||
nmaxs int // number of discarded slowest results
|
||||
}
|
||||
|
||||
struct CmdResult {
|
||||
mut:
|
||||
@ -13,15 +28,15 @@ mut:
|
||||
cmd string
|
||||
icmd int
|
||||
outputs []string
|
||||
oms map[string][]int
|
||||
ous map[string][]i64 // uS
|
||||
summary map[string]Aints
|
||||
timings []int
|
||||
timings []i64
|
||||
atiming Aints
|
||||
}
|
||||
|
||||
struct Context {
|
||||
mut:
|
||||
count int
|
||||
run_count int
|
||||
series int
|
||||
warmup int
|
||||
show_help bool
|
||||
@ -38,69 +53,31 @@ mut:
|
||||
cgoback string
|
||||
nmins int // number of minimums to discard
|
||||
nmaxs int // number of maximums to discard
|
||||
ignore_failed bool // ignore commands that exit with != 0 exit code
|
||||
}
|
||||
|
||||
@[unsafe]
|
||||
fn (mut result CmdResult) free() {
|
||||
unsafe {
|
||||
result.cmd.free()
|
||||
result.outputs.free()
|
||||
result.oms.free()
|
||||
result.summary.free()
|
||||
result.timings.free()
|
||||
result.atiming.free()
|
||||
}
|
||||
}
|
||||
|
||||
@[unsafe]
|
||||
fn (mut context Context) free() {
|
||||
unsafe {
|
||||
context.commands.free()
|
||||
context.results.free()
|
||||
context.cmd_template.free()
|
||||
context.cmd_params.free()
|
||||
context.cline.free()
|
||||
context.cgoback.free()
|
||||
}
|
||||
}
|
||||
|
||||
struct Aints {
|
||||
values []int
|
||||
mut:
|
||||
imin int
|
||||
imax int
|
||||
average f64
|
||||
stddev f64
|
||||
nmins int // number of discarded fastest results
|
||||
nmaxs int // number of discarded slowest results
|
||||
}
|
||||
|
||||
@[unsafe]
|
||||
fn (mut a Aints) free() {
|
||||
unsafe { a.values.free() }
|
||||
}
|
||||
|
||||
fn new_aints(ovals []int, extreme_mins int, extreme_maxs int) Aints {
|
||||
fn new_aints(ovals []i64, extreme_mins int, extreme_maxs int) Aints {
|
||||
mut res := Aints{
|
||||
values: ovals // remember the original values
|
||||
nmins: extreme_mins
|
||||
nmaxs: extreme_maxs
|
||||
}
|
||||
mut sum := i64(0)
|
||||
mut imin := int(max_i32)
|
||||
mut imax := int(-max_i32)
|
||||
// discard the extremes:
|
||||
mut vals := []int{}
|
||||
// discard the extremes, if needed:
|
||||
mut vals := []i64{}
|
||||
for x in ovals {
|
||||
vals << x
|
||||
}
|
||||
vals.sort()
|
||||
if vals.len > extreme_mins + extreme_maxs {
|
||||
vals = vals[extreme_mins..vals.len - extreme_maxs].clone()
|
||||
} else {
|
||||
vals = []
|
||||
if extreme_mins > 0 {
|
||||
vals = vals#[extreme_mins..].clone()
|
||||
}
|
||||
if extreme_maxs > 0 {
|
||||
vals = vals#[..-extreme_maxs].clone()
|
||||
}
|
||||
// statistical processing of the remaining values:
|
||||
mut sum := i64(0)
|
||||
mut imin := i64(max_i64)
|
||||
mut imax := i64(-max_i64)
|
||||
for i in vals {
|
||||
sum += i
|
||||
if i < imin {
|
||||
@ -127,76 +104,16 @@ fn new_aints(ovals []int, extreme_mins int, extreme_maxs int) Aints {
|
||||
}
|
||||
|
||||
fn bold(s string) string {
|
||||
return term.colorize(term.bold, s)
|
||||
return term.colorize(term.green, term.colorize(term.bold, s))
|
||||
}
|
||||
|
||||
fn (a Aints) str() string {
|
||||
return bold('${a.average:6.2f}') +
|
||||
'ms ± σ: ${a.stddev:4.1f}ms, min: ${a.imin:4}ms, max: ${a.imax:4}ms, runs:${a.values.len:3}, nmins:${a.nmins:2}, nmaxs:${a.nmaxs:2}'
|
||||
}
|
||||
|
||||
const max_fail_percent = 100 * 1000
|
||||
const max_time = 60 * 1000 // ms
|
||||
|
||||
const performance_regression_label = 'Performance regression detected, failing since '
|
||||
|
||||
fn main() {
|
||||
mut context := Context{}
|
||||
context.parse_options()!
|
||||
context.run()
|
||||
context.show_diff_summary()
|
||||
}
|
||||
|
||||
fn (mut context Context) parse_options() ! {
|
||||
mut fp := flag.new_flag_parser(os.args)
|
||||
fp.application(os.file_name(os.executable()))
|
||||
fp.version('0.0.1')
|
||||
fp.description('Repeat command(s) and collect statistics. Note: you have to quote each command, if it contains spaces.')
|
||||
fp.arguments_description('CMD1 CMD2 ...')
|
||||
fp.skip_executable()
|
||||
fp.limit_free_args_to_at_least(1)!
|
||||
context.count = fp.int('count', `c`, 10, 'Repetition count.')
|
||||
context.series = fp.int('series', `s`, 2, 'Series count. `-s 2 -c 4 a b` => aaaabbbbaaaabbbb, while `-s 3 -c 2 a b` => aabbaabbaabb.')
|
||||
context.warmup = fp.int('warmup', `w`, 2, 'Warmup runs. These are done *only at the start*, and are ignored.')
|
||||
context.show_help = fp.bool('help', `h`, false, 'Show this help screen.')
|
||||
context.use_newline = fp.bool('newline', `n`, false, 'Use \\n, do not overwrite the last line. Produces more output, but easier to diagnose.')
|
||||
context.show_output = fp.bool('output', `O`, false, 'Show command stdout/stderr in the progress indicator for each command. Note: slower, for verbose commands.')
|
||||
context.verbose = fp.bool('verbose', `v`, false, 'Be more verbose.')
|
||||
context.fail_on_maxtime = fp.int('max_time', `m`, max_time, 'Fail with exit code 2, when first cmd takes above M milliseconds (regression).')
|
||||
context.fail_on_regress_percent = fp.int('fail_percent', `f`, max_fail_percent, 'Fail with exit code 3, when first cmd is X% slower than the rest (regression).')
|
||||
context.cmd_template = fp.string('template', `t`, '{T}', 'Command template. {T} will be substituted with the current command.')
|
||||
cmd_params := fp.string_multi('parameter', `p`, 'A parameter substitution list. `{p}=val1,val2,val2` means that {p} in the template, will be substituted with each of val1, val2, val3.')
|
||||
context.nmins = fp.int('nmins', `i`, 0, 'Ignore the BOTTOM X results (minimum execution time). Makes the results more robust to performance flukes.')
|
||||
context.nmaxs = fp.int('nmaxs', `a`, 1, 'Ignore the TOP X results (maximum execution time). Makes the results more robust to performance flukes.')
|
||||
for p in cmd_params {
|
||||
parts := p.split(':')
|
||||
if parts.len > 1 {
|
||||
context.cmd_params[parts[0]] = parts[1].split(',')
|
||||
}
|
||||
}
|
||||
if context.show_help {
|
||||
println(fp.usage())
|
||||
exit(0)
|
||||
}
|
||||
if context.verbose {
|
||||
scripting.set_verbose(true)
|
||||
}
|
||||
commands := fp.finalize() or {
|
||||
eprintln('Error: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
context.commands = context.expand_all_commands(commands)
|
||||
context.results = []CmdResult{len: context.commands.len, cap: 20, init: CmdResult{
|
||||
outputs: []string{cap: 500}
|
||||
timings: []int{cap: 500}
|
||||
}}
|
||||
if context.use_newline {
|
||||
context.cline = '\n'
|
||||
context.cgoback = '\n'
|
||||
} else {
|
||||
context.cline = '\r' + term.h_divider('')
|
||||
context.cgoback = '\r'
|
||||
}
|
||||
avg := bold('${a.average / 1000:5.1f}ms')
|
||||
tdev := term.colorize(term.red, '${a.stddev / 1000:5.1f}ms')
|
||||
baseline := '${avg} ± σ: ${tdev},'
|
||||
tmin := term.colorize(term.bright_cyan, '${f64(a.imin) / 1000:5.1f}ms')
|
||||
tmax := term.colorize(term.bright_blue, '${f64(a.imax) / 1000:5.1f}ms')
|
||||
return '${baseline:-46s} ${tmin} … ${tmax}'
|
||||
}
|
||||
|
||||
fn flushed_print(s string) {
|
||||
@ -241,40 +158,50 @@ fn (mut context Context) expand_all_commands(commands []string) []string {
|
||||
}
|
||||
|
||||
fn (mut context Context) run() {
|
||||
mut run_warmups := 0
|
||||
for si in 1 .. context.series + 1 {
|
||||
for si in 0 .. context.series {
|
||||
for icmd, cmd in context.commands {
|
||||
mut runs := 0
|
||||
mut duration := 0
|
||||
mut sum := 0
|
||||
mut sum := i64(0)
|
||||
mut avg := f64(0)
|
||||
mut duration := i64(0)
|
||||
mut oldres := ''
|
||||
println('Series: ${si:4}/${context.series:-4}, command: ${cmd}')
|
||||
if context.warmup > 0 && run_warmups < context.commands.len {
|
||||
for i in 1 .. context.warmup + 1 {
|
||||
flushed_print('${context.cgoback}warming up run: ${i:4}/${context.warmup:-4} for ${cmd:-50s} took ${duration:6} ms ...')
|
||||
series_label := '${icmd + 1}/${context.commands.len}, ${si + 1}/${context.series}'
|
||||
line_prefix := '${context.cgoback}Command: ${term.colorize(term.gray, cmd)}, ${series_label:9}'
|
||||
if context.series != 1 || context.commands.len != 1 {
|
||||
flushed_print(line_prefix)
|
||||
}
|
||||
if context.warmup > 0 {
|
||||
for i in 0 .. context.warmup {
|
||||
flushed_print('${line_prefix}, warm up run: ${i + 1:4}/${context.warmup:-4} took ${f64(duration) / 1000:6.1f}ms ...')
|
||||
mut sw := time.new_stopwatch()
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code != 0 {
|
||||
duration = i64(sw.elapsed().microseconds())
|
||||
if res.exit_code != 0 && !context.ignore_failed {
|
||||
eprintln('')
|
||||
eprintln('Command exited with exit code: ${res.exit_code} in ${f64(duration) / 1000:6.1f}ms .')
|
||||
eprintln('The failed command was: `${cmd}` .')
|
||||
eprintln('Use -e or --ignore to ignore the failed commands.')
|
||||
eprintln('')
|
||||
continue
|
||||
}
|
||||
duration = int(sw.elapsed().milliseconds())
|
||||
}
|
||||
run_warmups++
|
||||
}
|
||||
context.clear_line()
|
||||
for i in 1 .. (context.count + 1) {
|
||||
avg := f64(sum) / f64(i)
|
||||
flushed_print('${context.cgoback}Average: ${avg:9.3f}ms | run: ${i:4}/${context.count:-4} | took ${duration:6} ms')
|
||||
for i in 0 .. context.run_count {
|
||||
mut sw := time.new_stopwatch()
|
||||
res := os.execute(cmd)
|
||||
duration = i64(sw.elapsed().microseconds())
|
||||
//
|
||||
if res.exit_code != 0 && !context.ignore_failed {
|
||||
eprintln('${i + 1:10} non 0 exit code for cmd: ${cmd}')
|
||||
continue
|
||||
}
|
||||
sum += duration
|
||||
runs++
|
||||
avg = (f64(sum) / f64(i + 1))
|
||||
flushed_print('${line_prefix}, current average: ${avg / 1000:9.3f}ms, run ${i + 1:4}/${context.run_count:-4} took ${f64(duration) / 1000:6} ms')
|
||||
if context.show_output {
|
||||
flushed_print(' | result: ${oldres:s}')
|
||||
}
|
||||
mut sw := time.new_stopwatch()
|
||||
res := scripting.exec(cmd) or { continue }
|
||||
duration = int(sw.elapsed().milliseconds())
|
||||
if res.exit_code != 0 {
|
||||
eprintln('${i:10} non 0 exit code for cmd: ${cmd}')
|
||||
continue
|
||||
}
|
||||
trimmed_output := res.output.trim_right('\r\n')
|
||||
trimmed_normalized := trimmed_output.replace('\r\n', '\n')
|
||||
lines := trimmed_normalized.split('\n')
|
||||
@ -282,8 +209,6 @@ fn (mut context Context) run() {
|
||||
context.results[icmd].outputs << line
|
||||
}
|
||||
context.results[icmd].timings << duration
|
||||
sum += duration
|
||||
runs++
|
||||
oldres = res.output.replace('\n', ' ')
|
||||
}
|
||||
context.results[icmd].cmd = cmd
|
||||
@ -293,50 +218,61 @@ fn (mut context Context) run() {
|
||||
context.nmaxs)
|
||||
context.clear_line()
|
||||
flushed_print(context.cgoback)
|
||||
mut m := map[string][]int{}
|
||||
ioutputs := context.results[icmd].outputs
|
||||
for o in ioutputs {
|
||||
x := o.split(':')
|
||||
if x.len > 1 {
|
||||
k := x[0]
|
||||
v := x[1].trim_left(' ').int()
|
||||
m[k] << v
|
||||
}
|
||||
}
|
||||
mut summary := map[string]Aints{}
|
||||
for k, v in m {
|
||||
// show a temporary summary for the current series/cmd cycle
|
||||
s := new_aints(v, context.nmins, context.nmaxs)
|
||||
println(' ${k}: ${s}')
|
||||
summary[k] = s
|
||||
}
|
||||
// merge current raw results to the previous ones
|
||||
old_oms := context.results[icmd].oms.move()
|
||||
mut new_oms := map[string][]int{}
|
||||
for k, v in m {
|
||||
if old_oms[k].len == 0 {
|
||||
new_oms[k] = v
|
||||
} else {
|
||||
new_oms[k] << old_oms[k]
|
||||
new_oms[k] << v
|
||||
}
|
||||
}
|
||||
context.results[icmd].oms = new_oms.move()
|
||||
// println('')
|
||||
context.show_timings_details(si, icmd, cmd)
|
||||
}
|
||||
}
|
||||
// create full summaries, taking account of all runs
|
||||
for icmd in 0 .. context.results.len {
|
||||
mut new_full_summary := map[string]Aints{}
|
||||
for k, v in context.results[icmd].oms {
|
||||
for k, v in context.results[icmd].ous {
|
||||
new_full_summary[k] = new_aints(v, context.nmins, context.nmaxs)
|
||||
}
|
||||
context.results[icmd].summary = new_full_summary.move()
|
||||
context.results[icmd].summary = new_full_summary.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut context Context) show_diff_summary() {
|
||||
context.results.sort_with_compare(fn (a &CmdResult, b &CmdResult) int {
|
||||
fn (mut context Context) show_timings_details(si int, icmd int, cmd string) {
|
||||
if !cmd.contains('-show-timings') {
|
||||
return
|
||||
}
|
||||
// This is specific to V compilations, when they are run with `-show-timings`,
|
||||
// which will show more details about each compilation stage.
|
||||
mut m := map[string][]i64{}
|
||||
ioutputs := context.results[icmd].outputs
|
||||
for o in ioutputs {
|
||||
x := o.split('ms ')
|
||||
if x.len > 1 {
|
||||
v := i64(x[0].trim_left(' ').f64() * 1000)
|
||||
k := x[1].all_before(' ')
|
||||
m[k] << v
|
||||
}
|
||||
}
|
||||
if m.len == 0 {
|
||||
return
|
||||
}
|
||||
println('> Timing details for series ${si + 1}, icmd: ${icmd + 1}, cmd: `${cmd}`:')
|
||||
mut summary := map[string]Aints{}
|
||||
for k, v in m {
|
||||
// show a temporary summary for the current series/cmd cycle
|
||||
s := new_aints(v, context.nmins, context.nmaxs)
|
||||
println(' ${k:-40s}: ${s}')
|
||||
summary[k] = s
|
||||
}
|
||||
// merge current raw results to the previous ones
|
||||
old_ous := context.results[icmd].ous.clone()
|
||||
mut new_ous := map[string][]i64{}
|
||||
for k, v in m {
|
||||
if old_ous[k].len == 0 {
|
||||
new_ous[k] = v
|
||||
} else {
|
||||
new_ous[k] << old_ous[k]
|
||||
new_ous[k] << v
|
||||
}
|
||||
}
|
||||
context.results[icmd].ous = new_ous.clone()
|
||||
}
|
||||
|
||||
fn compare_by_average(a &CmdResult, b &CmdResult) int {
|
||||
if a.atiming.average < b.atiming.average {
|
||||
return -1
|
||||
}
|
||||
@ -344,11 +280,17 @@ fn (mut context Context) show_diff_summary() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
println('Summary (commands are ordered by ascending mean time), after ${context.series} series of ${context.count} repetitions:')
|
||||
}
|
||||
|
||||
fn (mut context Context) show_diff_summary() {
|
||||
base := context.results[0].atiming.average
|
||||
context.results.sort_with_compare(compare_by_average)
|
||||
mut first_cmd_percentage := f64(100.0)
|
||||
mut first_marker := ''
|
||||
if context.results.len == 1 {
|
||||
context.show_summary_title('${term.colorize(term.yellow, context.results[0].cmd):-57s}\n${context.results[0].atiming}, ${context.series} series, ${context.run_count} runs')
|
||||
} else {
|
||||
context.show_summary_title('Summary after ${context.series} series x ${context.run_count} runs (`>` is the first cmd)')
|
||||
for i, r in context.results {
|
||||
first_marker = ' '
|
||||
cpercent := (r.atiming.average / base) * 100 - 100
|
||||
@ -356,14 +298,20 @@ fn (mut context Context) show_diff_summary() {
|
||||
first_marker = bold('>')
|
||||
first_cmd_percentage = cpercent
|
||||
}
|
||||
println(' ${first_marker}${(i + 1):3} | ${cpercent:5.1f}% slower | ${r.cmd:-57s} | ${r.atiming}')
|
||||
mut comparison := '==='
|
||||
if r.atiming.average != base {
|
||||
comparison = '${cpercent:+7.1f}%'
|
||||
}
|
||||
println(' ${first_marker}${(i + 1):3} ${comparison:9} `${r.cmd}`')
|
||||
println(' ${r.atiming}')
|
||||
}
|
||||
}
|
||||
$if debugcontext ? {
|
||||
println('context: ${context}')
|
||||
}
|
||||
if int(base) > context.fail_on_maxtime {
|
||||
if base > f64(context.fail_on_maxtime * 1000) {
|
||||
flushed_print(performance_regression_label)
|
||||
println('average time: ${base:6.1f} ms > ${context.fail_on_maxtime} ms threshold.')
|
||||
println('average time: ${base / 1000:6.1f} ms > ${context.fail_on_maxtime} ms threshold.')
|
||||
exit(2)
|
||||
}
|
||||
if context.fail_on_regress_percent == max_fail_percent || context.results.len < 2 {
|
||||
@ -376,3 +324,71 @@ fn (mut context Context) show_diff_summary() {
|
||||
exit(3)
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut context Context) show_summary_title(line string) {
|
||||
mut msg := [line]
|
||||
if context.nmins > 0 {
|
||||
msg << 'discard mins: ${context.nmins:2}'
|
||||
}
|
||||
if context.nmaxs > 0 {
|
||||
msg << 'discard maxs: ${context.nmaxs:2}'
|
||||
}
|
||||
println(msg.join(', '))
|
||||
}
|
||||
|
||||
fn (mut context Context) parse_options() ! {
|
||||
mut fp := flag.new_flag_parser(os.args#[1..])
|
||||
fp.application(os.file_name(os.executable()))
|
||||
fp.version('0.0.2')
|
||||
fp.description('Repeat command(s) and collect statistics.\nNote: quote each command (argument), when it contains spaces.')
|
||||
fp.arguments_description('CMD1 CMD2 ...')
|
||||
fp.skip_executable()
|
||||
fp.limit_free_args_to_at_least(1)!
|
||||
context.show_help = fp.bool('help', `h`, false, 'Show this help screen.')
|
||||
context.run_count = fp.int('runs', `r`, 10, 'Run count. Default: 10')
|
||||
context.warmup = fp.int('warmup', `w`, 2, 'Warmup run count. These are done *at the start* of each series, and the timings are ignored. Default: 2')
|
||||
context.series = fp.int('series', `s`, 1, 'Series count. `-s 2 -r 4 a b` => aaaabbbbaaaabbbb, while `-s 3 -r 2 a b` => aabbaabbaabb. Default: 1')
|
||||
context.ignore_failed = fp.bool('ignore', `e`, false, 'Ignore failed commands (returning a non 0 exit code).')
|
||||
context.use_newline = fp.bool('newline', `n`, false, 'Use \\n, do not overwrite the last line. Produces more output, but easier to diagnose.')
|
||||
context.show_output = fp.bool('output', `O`, false, 'Show command stdout/stderr in the progress indicator for each command. Note: slower, for verbose commands.')
|
||||
context.verbose = fp.bool('verbose', `v`, false, 'Be more verbose.')
|
||||
context.fail_on_maxtime = fp.int('max_time', `m`, max_time, 'Fail with exit code 2, when first cmd takes above M milliseconds (regression). Default: ${max_time}')
|
||||
context.fail_on_regress_percent = fp.int('fail_percent', `f`, max_fail_percent, 'Fail with exit code 3, when first cmd is X% slower than the rest (regression). Default: ${max_fail_percent}')
|
||||
context.cmd_template = fp.string('template', `t`, '{T}', 'Command template. {T} will be substituted with the current command. Default: {T}')
|
||||
cmd_params := fp.string_multi('parameter', `p`, 'A parameter substitution list. `{p}=val1,val2,val2` means that {p} in the template, will be substituted with each of val1, val2, val3.')
|
||||
context.nmins = fp.int('nmins', `i`, 0, 'Ignore the BOTTOM X results (minimum execution time). Makes the results more robust to performance flukes. Default: 0')
|
||||
context.nmaxs = fp.int('nmaxs', `a`, 0, 'Ignore the TOP X results (maximum execution time). Makes the results more robust to performance flukes. Default: 0')
|
||||
for p in cmd_params {
|
||||
parts := p.split(':')
|
||||
if parts.len > 1 {
|
||||
context.cmd_params[parts[0]] = parts[1].split(',')
|
||||
}
|
||||
}
|
||||
if context.show_help {
|
||||
println(fp.usage())
|
||||
exit(0)
|
||||
}
|
||||
commands := fp.finalize() or {
|
||||
eprintln('Error: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
context.commands = context.expand_all_commands(commands)
|
||||
context.results = []CmdResult{len: context.commands.len, cap: 20, init: CmdResult{
|
||||
outputs: []string{cap: 500}
|
||||
timings: []i64{cap: 500}
|
||||
}}
|
||||
if context.use_newline {
|
||||
context.cline = '\n'
|
||||
context.cgoback = '\n'
|
||||
} else {
|
||||
context.cline = '\r' + term.h_divider('') + '\r'
|
||||
context.cgoback = '\r'
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
mut context := Context{}
|
||||
context.parse_options()!
|
||||
context.run()
|
||||
context.show_diff_summary()
|
||||
}
|
@ -30,6 +30,7 @@ const external_tools = [
|
||||
'ls',
|
||||
'missdoc',
|
||||
'repl',
|
||||
'repeat',
|
||||
'self',
|
||||
'setup-freetype',
|
||||
'shader',
|
||||
|
@ -2,8 +2,8 @@ module help
|
||||
|
||||
import os
|
||||
|
||||
// Topics whose module uses the cli module.
|
||||
const cli_topics = ['new', 'init']
|
||||
// Topics whose module uses the cli or flag modules (both support --help).
|
||||
const cli_topics = ['new', 'init', 'repeat']
|
||||
|
||||
fn hdir(base string) string {
|
||||
return os.join_path(base, 'vlib', 'v', 'help')
|
||||
|
Loading…
x
Reference in New Issue
Block a user