diff --git a/cmd/tools/modules/testing/common.v b/cmd/tools/modules/testing/common.v index 8b9a823cf7..b86aa65b55 100644 --- a/cmd/tools/modules/testing/common.v +++ b/cmd/tools/modules/testing/common.v @@ -48,7 +48,7 @@ pub const all_processes = get_all_processes() pub const header_bytes_to_search_for_module_main = 500 -pub const separator = '-'.repeat(max_header_len) +pub const separator = '-'.repeat(max_header_len) + '\n' pub const max_compilation_retries = get_max_compilation_retries() @@ -531,6 +531,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } else { fname_without_extension } + reproduce_options := cmd_options.clone() generated_binary_fpath := os.join_path_single(test_folder_path, generated_binary_fname) if produces_file_output { if ts.rm_binaries { @@ -548,6 +549,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { if ts.show_stats { skip_running = '' } + reproduce_cmd := '${os.quoted_path(ts.vexe)} ${reproduce_options.join(' ')} ${os.quoted_path(file)}' cmd := '${os.quoted_path(ts.vexe)} ${skip_running} ${cmd_options.join(' ')} ${os.quoted_path(file)}' run_cmd := if run_js { 'node ${os.quoted_path(generated_binary_fpath)}' @@ -620,7 +622,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } ts.benchmark.fail() tls_bench.fail() - ts.add_failed_cmd(cmd) + ts.add_failed_cmd(reproduce_cmd) return pool.no_result } } else { @@ -650,7 +652,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { cmd_duration, preparation: compile_cmd_duration ), cmd_duration, mtc) - ts.add_failed_cmd(cmd) + ts.add_failed_cmd(reproduce_cmd) return pool.no_result } tls_bench.step_restart() @@ -678,8 +680,10 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { // retry running at least 1 more time, to avoid CI false positives as much as possible details.retry++ } - failure_output.write_string(separator) - failure_output.writeln('\n retry: 0 ; max_retry: ${details.retry} ; r.exit_code: ${r.exit_code} ; trimmed_output.len: ${trimmed_output.len}') + if details.retry != 0 { + failure_output.write_string(separator) + failure_output.writeln(' retry: 0 ; max_retry: ${details.retry} ; r.exit_code: ${r.exit_code} ; trimmed_output.len: ${trimmed_output.len}') + } failure_output.writeln(trimmed_output) os.setenv('VTEST_RETRY_MAX', '${details.retry}', true) for retry = 1; retry <= details.retry; retry++ { @@ -702,7 +706,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } trimmed_output = r.output.trim_space() failure_output.write_string(separator) - failure_output.writeln('\n retry: ${retry} ; max_retry: ${details.retry} ; r.exit_code: ${r.exit_code} ; trimmed_output.len: ${trimmed_output.len}') + failure_output.writeln(' retry: ${retry} ; max_retry: ${details.retry} ; r.exit_code: ${r.exit_code} ; trimmed_output.len: ${trimmed_output.len}') failure_output.writeln(trimmed_output) time.sleep(fail_retry_delay_ms) } @@ -722,11 +726,10 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { tls_bench.fail() cmd_duration = d_cmd.elapsed() - (fail_retry_delay_ms * details.retry) ts.append_message_with_duration(.fail, tls_bench.step_message_with_label_and_duration(benchmark.b_fail, - '${normalised_relative_file}\n retry: ${retry}\n comp_cmd: ${cmd}\n run_cmd: ${run_cmd}\nfailure code: ${r.exit_code}; foutput.len: ${full_failure_output.len}; failure output:\n${full_failure_output}', - cmd_duration, + '${normalised_relative_file}\n${full_failure_output}', cmd_duration, preparation: compile_cmd_duration ), cmd_duration, mtc) - ts.add_failed_cmd(cmd) + ts.add_failed_cmd(reproduce_cmd) return pool.no_result } } diff --git a/cmd/tools/modules/testing/output_normal.v b/cmd/tools/modules/testing/output_normal.v index 2d2f3072be..f789b27b60 100644 --- a/cmd/tools/modules/testing/output_normal.v +++ b/cmd/tools/modules/testing/output_normal.v @@ -78,6 +78,6 @@ pub fn (r NormalReporter) worker_threads_finish(mut ts TestSession) { pub fn (r NormalReporter) list_of_failed_commands(failed_cmds []string) { for i, cmd in failed_cmds { - eprintln(term.failed('Failed command ${i + 1}:') + ' ${cmd}') + eprintln(term.failed('To reproduce just failure ${i + 1} run:') + ' ${cmd}') } } diff --git a/cmd/tools/vcreate/vcreate_new_test.v b/cmd/tools/vcreate/vcreate_new_test.v index a2fbc18e24..cd1728c3a1 100644 --- a/cmd/tools/vcreate/vcreate_new_test.v +++ b/cmd/tools/vcreate/vcreate_new_test.v @@ -32,7 +32,9 @@ fn prepare_test_path() ! { fn test_new_with_no_arg_input() { prepare_test_path()! project_name := 'my_project' - os.execute_opt('${expect_exe} ${os.join_path(expect_tests_path, 'new_with_no_arg.expect')} ${vroot} ${project_name}') or { + cmd := '${expect_exe} ${os.join_path(expect_tests_path, 'new_with_no_arg.expect')} ${vroot} ${project_name}' + os.execute_opt(cmd) or { + dump(cmd) assert false, err.msg() } // Assert mod data set in `new_no_arg.expect`. @@ -49,7 +51,9 @@ fn test_new_with_no_arg_input() { fn test_new_with_name_arg_input() { prepare_test_path()! project_name := 'my_other_project' - os.execute_opt('${expect_exe} ${os.join_path(expect_tests_path, 'new_with_name_arg.expect')} ${vroot} ${project_name}') or { + cmd := '${expect_exe} ${os.join_path(expect_tests_path, 'new_with_name_arg.expect')} ${vroot} ${project_name}' + os.execute_opt(cmd) or { + dump(cmd) assert false, err.msg() } // Assert mod data set in `new_with_name_arg.expect`. @@ -67,7 +71,9 @@ fn test_new_with_model_arg_input() { prepare_test_path()! project_name := 'my_lib' model := '--lib' - os.execute_opt('${expect_exe} ${os.join_path(expect_tests_path, 'new_with_model_arg.expect')} ${vroot} ${model} ${project_name}') or { + cmd := '${expect_exe} ${os.join_path(expect_tests_path, 'new_with_model_arg.expect')} ${vroot} ${model} ${project_name}' + os.execute_opt(cmd) or { + dump(cmd) assert false, err.msg() } project_path := os.join_path(test_module_path, project_name) diff --git a/cmd/tools/vtest_test.v b/cmd/tools/vtest_test.v index 02f16814fc..955d2fe7e5 100644 --- a/cmd/tools/vtest_test.v +++ b/cmd/tools/vtest_test.v @@ -71,7 +71,7 @@ fn test_partial_failure() { assert res.exit_code == 1 assert res.output.contains('assert 5 == 7'), res.output assert res.output.contains(' 1 failed, 1 passed, 2 total'), res.output - assert res.output.contains('Failed command'), res.output + assert res.output.contains('To reproduce just failure'), res.output } fn test_with_stats_and_partial_failure() { @@ -79,5 +79,5 @@ fn test_with_stats_and_partial_failure() { assert res.exit_code == 1 assert res.output.contains('assert 5 == 7'), res.output assert res.output.contains(' 1 failed, 1 passed, 2 total'), res.output - assert res.output.contains('Failed command'), res.output + assert res.output.contains('To reproduce just failure'), res.output }