|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +# This file is part of the MicroPython project, http://micropython.org/ |
| 4 | +# The MIT License (MIT) |
| 5 | +# Copyright (c) 2019 Damien P. George |
| 6 | + |
| 7 | +import os |
| 8 | +import subprocess |
| 9 | +import sys |
| 10 | +import argparse |
| 11 | + |
| 12 | +sys.path.append('../tools') |
| 13 | +import pyboard |
| 14 | + |
| 15 | +# Paths for host executables |
| 16 | +CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3') |
| 17 | +MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/unix/micropython_coverage') |
| 18 | + |
| 19 | +NATMOD_EXAMPLE_DIR = '../examples/natmod/' |
| 20 | + |
| 21 | +# Supported tests and their corresponding mpy module |
| 22 | +TEST_MAPPINGS = { |
| 23 | + 'btree': 'btree/btree_$(ARCH).mpy', |
| 24 | + 'framebuf': 'framebuf/framebuf_$(ARCH).mpy', |
| 25 | + 'uheapq': 'uheapq/uheapq_$(ARCH).mpy', |
| 26 | + 'ure': 'ure/ure_$(ARCH).mpy', |
| 27 | + 'uzlib': 'uzlib/uzlib_$(ARCH).mpy', |
| 28 | +} |
| 29 | + |
| 30 | +# Code to allow a target MicroPython to import an .mpy from RAM |
| 31 | +injected_import_hook_code = """\ |
| 32 | +import sys, uos, uio |
| 33 | +class __File(uio.IOBase): |
| 34 | + def __init__(self): |
| 35 | + self.off = 0 |
| 36 | + def ioctl(self, request, arg): |
| 37 | + return 0 |
| 38 | + def readinto(self, buf): |
| 39 | + buf[:] = memoryview(__buf)[self.off:self.off + len(buf)] |
| 40 | + self.off += len(buf) |
| 41 | + return len(buf) |
| 42 | +class __FS: |
| 43 | + def mount(self, readonly, mkfs): |
| 44 | + pass |
| 45 | + def chdir(self, path): |
| 46 | + pass |
| 47 | + def stat(self, path): |
| 48 | + if path == '__injected.mpy': |
| 49 | + return tuple(0 for _ in range(10)) |
| 50 | + else: |
| 51 | + raise OSError(-2) # ENOENT |
| 52 | + def open(self, path, mode): |
| 53 | + return __File() |
| 54 | +uos.mount(__FS(), '/__remote') |
| 55 | +uos.chdir('/__remote') |
| 56 | +sys.modules['{}'] = __import__('__injected') |
| 57 | +""" |
| 58 | + |
| 59 | +class TargetSubprocess: |
| 60 | + def __init__(self, cmd): |
| 61 | + self.cmd = cmd |
| 62 | + |
| 63 | + def close(self): |
| 64 | + pass |
| 65 | + |
| 66 | + def run_script(self, script): |
| 67 | + try: |
| 68 | + p = subprocess.run(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script) |
| 69 | + return p.stdout, None |
| 70 | + except subprocess.CalledProcessError as er: |
| 71 | + return b'', er |
| 72 | + |
| 73 | +class TargetPyboard: |
| 74 | + def __init__(self, pyb): |
| 75 | + self.pyb = pyb |
| 76 | + self.pyb.enter_raw_repl() |
| 77 | + |
| 78 | + def close(self): |
| 79 | + self.pyb.exit_raw_repl() |
| 80 | + self.pyb.close() |
| 81 | + |
| 82 | + def run_script(self, script): |
| 83 | + try: |
| 84 | + self.pyb.enter_raw_repl() |
| 85 | + output = self.pyb.exec_(script) |
| 86 | + output = output.replace(b'\r\n', b'\n') |
| 87 | + return output, None |
| 88 | + except pyboard.PyboardError as er: |
| 89 | + return b'', er |
| 90 | + |
| 91 | +def run_tests(target_truth, target, args, stats): |
| 92 | + for test_file in args.files: |
| 93 | + # Find supported test |
| 94 | + for k, v in TEST_MAPPINGS.items(): |
| 95 | + if test_file.find(k) != -1: |
| 96 | + test_module = k |
| 97 | + test_mpy = v.replace('$(ARCH)', args.arch) |
| 98 | + break |
| 99 | + else: |
| 100 | + print('---- {} - no matching mpy'.format(test_file)) |
| 101 | + continue |
| 102 | + |
| 103 | + # Read test script |
| 104 | + with open(test_file, 'rb') as f: |
| 105 | + test_file_data = f.read() |
| 106 | + |
| 107 | + # Create full test with embedded .mpy |
| 108 | + try: |
| 109 | + with open(NATMOD_EXAMPLE_DIR + test_mpy, 'rb') as f: |
| 110 | + test_script = b'__buf=' + bytes(repr(f.read()), 'ascii') + b'\n' |
| 111 | + except OSError: |
| 112 | + print('---- {} - mpy file not compiled'.format(test_file)) |
| 113 | + continue |
| 114 | + test_script += bytes(injected_import_hook_code.format(test_module), 'ascii') |
| 115 | + test_script += test_file_data |
| 116 | + |
| 117 | + # Run test under MicroPython |
| 118 | + result_out, error = target.run_script(test_script) |
| 119 | + |
| 120 | + # Work out result of test |
| 121 | + extra = '' |
| 122 | + if error is None and result_out == b'SKIP\n': |
| 123 | + result = 'SKIP' |
| 124 | + elif error is not None: |
| 125 | + result = 'FAIL' |
| 126 | + extra = ' - ' + str(error) |
| 127 | + else: |
| 128 | + # Check result against truth |
| 129 | + try: |
| 130 | + with open(test_file + '.exp', 'rb') as f: |
| 131 | + result_exp = f.read() |
| 132 | + error = None |
| 133 | + except OSError: |
| 134 | + result_exp, error = target_truth.run_script(test_file_data) |
| 135 | + if error is not None: |
| 136 | + result = 'TRUTH FAIL' |
| 137 | + elif result_out != result_exp: |
| 138 | + result = 'FAIL' |
| 139 | + print(result_out) |
| 140 | + else: |
| 141 | + result = 'pass' |
| 142 | + |
| 143 | + # Accumulate statistics |
| 144 | + stats['total'] += 1 |
| 145 | + if result == 'pass': |
| 146 | + stats['pass'] += 1 |
| 147 | + elif result == 'SKIP': |
| 148 | + stats['skip'] += 1 |
| 149 | + else: |
| 150 | + stats['fail'] += 1 |
| 151 | + |
| 152 | + # Print result |
| 153 | + print('{:4} {}{}'.format(result, test_file, extra)) |
| 154 | + |
| 155 | +def main(): |
| 156 | + cmd_parser = argparse.ArgumentParser(description='Run dynamic-native-module tests under MicroPython') |
| 157 | + cmd_parser.add_argument('-p', '--pyboard', action='store_true', help='run tests via pyboard.py') |
| 158 | + cmd_parser.add_argument('-d', '--device', default='/dev/ttyACM0', help='the device for pyboard.py') |
| 159 | + cmd_parser.add_argument('-a', '--arch', default='x64', help='native architecture of the target') |
| 160 | + cmd_parser.add_argument('files', nargs='*', help='input test files') |
| 161 | + args = cmd_parser.parse_args() |
| 162 | + |
| 163 | + target_truth = TargetSubprocess([CPYTHON3]) |
| 164 | + |
| 165 | + if args.pyboard: |
| 166 | + target = TargetPyboard(pyboard.Pyboard(args.device)) |
| 167 | + else: |
| 168 | + target = TargetSubprocess([MICROPYTHON]) |
| 169 | + |
| 170 | + stats = {'total': 0, 'pass': 0, 'fail':0, 'skip': 0} |
| 171 | + run_tests(target_truth, target, args, stats) |
| 172 | + |
| 173 | + target.close() |
| 174 | + target_truth.close() |
| 175 | + |
| 176 | + print('{} tests performed'.format(stats['total'])) |
| 177 | + print('{} tests passed'.format(stats['pass'])) |
| 178 | + if stats['fail']: |
| 179 | + print('{} tests failed'.format(stats['fail'])) |
| 180 | + if stats['skip']: |
| 181 | + print('{} tests skipped'.format(stats['skip'])) |
| 182 | + |
| 183 | + if stats['fail']: |
| 184 | + sys.exit(1) |
| 185 | + |
| 186 | +if __name__ == "__main__": |
| 187 | + main() |
0 commit comments