mirror of
				https://github.com/cuberite/polarssl.git
				synced 2025-11-04 04:32:24 -05:00 
			
		
		
		
	Move test running and reporting functions into their own class
This makes the structure of the code more apparent. No behavior change.
This commit is contained in:
		
							parent
							
								
									b86b6d32f9
								
							
						
					
					
						commit
						2460933a6f
					
				@ -338,48 +338,52 @@ def collect_values(inputs, type_word, include_path=None, keep_c=False):
 | 
				
			|||||||
                   include_path=include_path, keep_c=keep_c)
 | 
					                   include_path=include_path, keep_c=keep_c)
 | 
				
			||||||
    return expressions, values
 | 
					    return expressions, values
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def do_test(options, inputs, type_word):
 | 
					class Tests:
 | 
				
			||||||
 | 
					    """An object representing tests and their results."""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __init__(self, options):
 | 
				
			||||||
 | 
					        self.options = options
 | 
				
			||||||
 | 
					        self.count = 0
 | 
				
			||||||
 | 
					        self.errors = []
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def run_one(self, inputs, type_word):
 | 
				
			||||||
        """Test psa_constant_names for the specified type.
 | 
					        """Test psa_constant_names for the specified type.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    Run program on names.
 | 
					        Run the program on the names for this type.
 | 
				
			||||||
    Use inputs to figure out what arguments to pass to macros that
 | 
					        Use the inputs to figure out what arguments to pass to macros that
 | 
				
			||||||
        take arguments.
 | 
					        take arguments.
 | 
				
			||||||
 | 
					 | 
				
			||||||
    Return ``(count, errors)`` where ``count`` is the number of expressions
 | 
					 | 
				
			||||||
    that have been tested and ``errors`` is the list of errors that were
 | 
					 | 
				
			||||||
    encountered.
 | 
					 | 
				
			||||||
        """
 | 
					        """
 | 
				
			||||||
        expressions, values = collect_values(inputs, type_word,
 | 
					        expressions, values = collect_values(inputs, type_word,
 | 
				
			||||||
                                         include_path=options.include,
 | 
					                                             include_path=self.options.include,
 | 
				
			||||||
                                         keep_c=options.keep_c)
 | 
					                                             keep_c=self.options.keep_c)
 | 
				
			||||||
    output = subprocess.check_output([options.program, type_word] + values)
 | 
					        output = subprocess.check_output([self.options.program, type_word] +
 | 
				
			||||||
 | 
					                                         values)
 | 
				
			||||||
        outputs = output.decode('ascii').strip().split('\n')
 | 
					        outputs = output.decode('ascii').strip().split('\n')
 | 
				
			||||||
    errors = [(type_word, expr, value, output)
 | 
					        self.count += len(expressions)
 | 
				
			||||||
              for (expr, value, output) in zip(expressions, values, outputs)
 | 
					        for expr, value, output in zip(expressions, values, outputs):
 | 
				
			||||||
              if normalize(expr) != normalize(output)]
 | 
					            if normalize(expr) != normalize(output):
 | 
				
			||||||
    return len(expressions), errors
 | 
					                self.errors.append((type_word, expr, value, output))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def report_errors(errors):
 | 
					    def run_all(self, inputs):
 | 
				
			||||||
    """Describe each case where the output is not as expected."""
 | 
					        """Run psa_constant_names on all the gathered inputs."""
 | 
				
			||||||
    for type_word, name, value, output in errors:
 | 
					 | 
				
			||||||
        print('For {} "{}", got "{}" (value: {})'
 | 
					 | 
				
			||||||
              .format(type_word, name, output, value))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
def run_tests(options, inputs):
 | 
					 | 
				
			||||||
    """Run psa_constant_names on all the gathered inputs.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    Return a tuple (count, errors) where count is the total number of inputs
 | 
					 | 
				
			||||||
    that were tested and errors is the list of cases where the output was
 | 
					 | 
				
			||||||
    not as expected.
 | 
					 | 
				
			||||||
    """
 | 
					 | 
				
			||||||
    count = 0
 | 
					 | 
				
			||||||
    errors = []
 | 
					 | 
				
			||||||
        for type_word in ['status', 'algorithm', 'ecc_curve', 'dh_group',
 | 
					        for type_word in ['status', 'algorithm', 'ecc_curve', 'dh_group',
 | 
				
			||||||
                          'key_type', 'key_usage']:
 | 
					                          'key_type', 'key_usage']:
 | 
				
			||||||
        c, e = do_test(options, inputs, type_word)
 | 
					            self.run_one(inputs, type_word)
 | 
				
			||||||
        count += c
 | 
					
 | 
				
			||||||
        errors += e
 | 
					    def report(self, out):
 | 
				
			||||||
    return count, errors
 | 
					        """Describe each case where the output is not as expected.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Write the errors to ``out``.
 | 
				
			||||||
 | 
					        Also write a total.
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        for type_word, name, value, output in self.errors:
 | 
				
			||||||
 | 
					            out.write('For {} "{}", got "{}" (value: {})\n'
 | 
				
			||||||
 | 
					                      .format(type_word, name, output, value))
 | 
				
			||||||
 | 
					        out.write('{} test cases'.format(self.count))
 | 
				
			||||||
 | 
					        if self.errors:
 | 
				
			||||||
 | 
					            out.write(', {} FAIL\n'.format(len(self.errors)))
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            out.write(' PASS\n')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
HEADERS = ['psa/crypto.h', 'psa/crypto_extra.h', 'psa/crypto_values.h']
 | 
					HEADERS = ['psa/crypto.h', 'psa/crypto_extra.h', 'psa/crypto_values.h']
 | 
				
			||||||
TEST_SUITES = ['tests/suites/test_suite_psa_crypto_metadata.data']
 | 
					TEST_SUITES = ['tests/suites/test_suite_psa_crypto_metadata.data']
 | 
				
			||||||
@ -401,12 +405,10 @@ def main():
 | 
				
			|||||||
    options = parser.parse_args()
 | 
					    options = parser.parse_args()
 | 
				
			||||||
    headers = [os.path.join(options.include[0], h) for h in HEADERS]
 | 
					    headers = [os.path.join(options.include[0], h) for h in HEADERS]
 | 
				
			||||||
    inputs = gather_inputs(headers, TEST_SUITES)
 | 
					    inputs = gather_inputs(headers, TEST_SUITES)
 | 
				
			||||||
    count, errors = run_tests(options, inputs)
 | 
					    tests = Tests(options)
 | 
				
			||||||
    report_errors(errors)
 | 
					    tests.run_all(inputs)
 | 
				
			||||||
    if errors == []:
 | 
					    tests.report(sys.stdout)
 | 
				
			||||||
        print('{} test cases PASS'.format(count))
 | 
					    if tests.errors:
 | 
				
			||||||
    else:
 | 
					 | 
				
			||||||
        print('{} test cases, {} FAIL'.format(count, len(errors)))
 | 
					 | 
				
			||||||
        exit(1)
 | 
					        exit(1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
if __name__ == '__main__':
 | 
					if __name__ == '__main__':
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user