parallel-vm.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. #!/usr/bin/env python2
  2. #
  3. # Parallel VM test case executor
  4. # Copyright (c) 2014, Jouni Malinen <j@w1.fi>
  5. #
  6. # This software may be distributed under the terms of the BSD license.
  7. # See README for more details.
  8. import curses
  9. import fcntl
  10. import os
  11. import subprocess
  12. import sys
  13. import time
  14. def get_results():
  15. global vm
  16. started = []
  17. passed = []
  18. failed = []
  19. skipped = []
  20. for i in range(0, num_servers):
  21. lines = vm[i]['out'].splitlines()
  22. started += [ l for l in lines if l.startswith('START ') ]
  23. passed += [ l for l in lines if l.startswith('PASS ') ]
  24. failed += [ l for l in lines if l.startswith('FAIL ') ]
  25. skipped += [ l for l in lines if l.startswith('SKIP ') ]
  26. return (started, passed, failed, skipped)
  27. def show_progress(scr):
  28. global num_servers
  29. global vm
  30. global dir
  31. global timestamp
  32. global tests
  33. total_tests = len(tests)
  34. scr.leaveok(1)
  35. scr.addstr(0, 0, "Parallel test execution status", curses.A_BOLD)
  36. for i in range(0, num_servers):
  37. scr.addstr(i + 1, 0, "VM %d:" % (i + 1), curses.A_BOLD)
  38. scr.addstr(i + 1, 10, "starting VM")
  39. scr.addstr(num_servers + 1, 0, "Total:", curses.A_BOLD)
  40. scr.addstr(num_servers + 1, 20, "TOTAL={} STARTED=0 PASS=0 FAIL=0 SKIP=0".format(total_tests))
  41. scr.refresh()
  42. while True:
  43. running = False
  44. updated = False
  45. for i in range(0, num_servers):
  46. if not vm[i]['proc']:
  47. continue
  48. if vm[i]['proc'].poll() is not None:
  49. vm[i]['proc'] = None
  50. scr.move(i + 1, 10)
  51. scr.clrtoeol()
  52. log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
  53. with open(log, 'r') as f:
  54. if "Kernel panic" in f.read():
  55. scr.addstr("kernel panic")
  56. else:
  57. scr.addstr("completed run")
  58. updated = True
  59. continue
  60. running = True
  61. try:
  62. err = vm[i]['proc'].stderr.read()
  63. vm[i]['err'] += err
  64. except:
  65. pass
  66. try:
  67. out = vm[i]['proc'].stdout.read()
  68. if "READY" in out or "PASS" in out or "FAIL" in out or "SKIP" in out:
  69. if not tests:
  70. vm[i]['proc'].stdin.write('\n')
  71. else:
  72. name = tests.pop(0)
  73. vm[i]['proc'].stdin.write(name + '\n')
  74. except:
  75. continue
  76. #print("VM {}: '{}'".format(i, out))
  77. vm[i]['out'] += out
  78. lines = vm[i]['out'].splitlines()
  79. last = [ l for l in lines if l.startswith('START ') ]
  80. if len(last) > 0:
  81. try:
  82. info = last[-1].split(' ')
  83. scr.move(i + 1, 10)
  84. scr.clrtoeol()
  85. scr.addstr(info[1])
  86. updated = True
  87. except:
  88. pass
  89. if not running:
  90. break
  91. if updated:
  92. (started, passed, failed, skipped) = get_results()
  93. scr.move(num_servers + 1, 10)
  94. scr.clrtoeol()
  95. scr.addstr("{} %".format(int(100.0 * (len(passed) + len(failed) + len(skipped)) / total_tests)))
  96. scr.addstr(num_servers + 1, 20, "TOTAL={} STARTED={} PASS={} FAIL={} SKIP={}".format(total_tests, len(started), len(passed), len(failed), len(skipped)))
  97. if len(failed) > 0:
  98. scr.move(num_servers + 2, 0)
  99. scr.clrtoeol()
  100. scr.addstr("Failed test cases: ")
  101. for f in failed:
  102. scr.addstr(f.split(' ')[1])
  103. scr.addstr(' ')
  104. scr.refresh()
  105. time.sleep(0.5)
  106. scr.refresh()
  107. time.sleep(0.3)
  108. def main():
  109. global num_servers
  110. global vm
  111. global dir
  112. global timestamp
  113. global tests
  114. if len(sys.argv) < 2:
  115. sys.exit("Usage: %s <number of VMs> [--codecov] [params..]" % sys.argv[0])
  116. num_servers = int(sys.argv[1])
  117. if num_servers < 1:
  118. sys.exit("Too small number of VMs")
  119. timestamp = int(time.time())
  120. if len(sys.argv) > 2 and sys.argv[2] == "--codecov":
  121. idx = 3
  122. print "Code coverage - build separate binaries"
  123. logdir = "/tmp/hwsim-test-logs/" + str(timestamp)
  124. os.makedirs(logdir)
  125. subprocess.check_call(['./build-codecov.sh', logdir])
  126. codecov_args = ['--codecov_dir', logdir]
  127. codecov = True
  128. else:
  129. idx = 2
  130. codecov_args = []
  131. codecov = False
  132. tests = []
  133. cmd = [ '../run-tests.py', '-L' ] + sys.argv[idx:]
  134. lst = subprocess.Popen(cmd, stdout=subprocess.PIPE)
  135. for l in lst.stdout.readlines():
  136. name = l.split(' ')[0]
  137. tests.append(name)
  138. if len(tests) == 0:
  139. sys.exit("No test cases selected")
  140. if '-f' in sys.argv[idx:]:
  141. extra_args = sys.argv[idx:]
  142. else:
  143. extra_args = [x for x in sys.argv[idx:] if x not in tests]
  144. dir = '/tmp/hwsim-test-logs'
  145. try:
  146. os.mkdir(dir)
  147. except:
  148. pass
  149. if num_servers > 2 and len(tests) > 100:
  150. # Move test cases with long duration to the beginning as an
  151. # optimization to avoid last part of the test execution running a long
  152. # duration test case on a single VM while all other VMs have already
  153. # completed their work.
  154. long = [ "ap_wps_pbc_timeout",
  155. "grpform_cred_ready_timeout",
  156. "grpform_cred_ready_timeout2",
  157. "discovery_pd_retries",
  158. "ibss_wpa_none",
  159. "concurrent_p2pcli",
  160. "wpas_ap_wps",
  161. "ibss_rsn",
  162. "wext_pmksa_cache",
  163. "ap_ht_40mhz_intolerant_ap",
  164. "ap_wps_setup_locked_timeout",
  165. "ap_vht160",
  166. "dfs_radar",
  167. "dfs" ]
  168. for l in long:
  169. if l in tests:
  170. tests.remove(l)
  171. tests.insert(0, l)
  172. vm = {}
  173. for i in range(0, num_servers):
  174. print("\rStarting virtual machine {}/{}".format(i + 1, num_servers)),
  175. cmd = ['./vm-run.sh', '--timestamp', str(timestamp),
  176. '--ext', 'srv.%d' % (i + 1),
  177. '-i'] + codecov_args + extra_args
  178. vm[i] = {}
  179. vm[i]['proc'] = subprocess.Popen(cmd,
  180. stdin=subprocess.PIPE,
  181. stdout=subprocess.PIPE,
  182. stderr=subprocess.PIPE)
  183. vm[i]['out'] = ""
  184. vm[i]['err'] = ""
  185. for stream in [ vm[i]['proc'].stdout, vm[i]['proc'].stderr ]:
  186. fd = stream.fileno()
  187. fl = fcntl.fcntl(fd, fcntl.F_GETFL)
  188. fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
  189. print
  190. curses.wrapper(show_progress)
  191. with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
  192. for i in range(0, num_servers):
  193. f.write('VM {}\n{}\n{}\n'.format(i, vm[i]['out'], vm[i]['err']))
  194. (started, passed, failed, skipped) = get_results()
  195. if len(failed) > 0:
  196. print "Failed test cases:"
  197. for f in failed:
  198. print f.split(' ')[1],
  199. print
  200. print("TOTAL={} PASS={} FAIL={} SKIP={}".format(len(started), len(passed), len(failed), len(skipped)))
  201. print "Logs: " + dir + '/' + str(timestamp)
  202. for i in range(0, num_servers):
  203. log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
  204. with open(log, 'r') as f:
  205. if "Kernel panic" in f.read():
  206. print "Kernel panic in " + log
  207. if codecov:
  208. print "Code coverage - preparing report"
  209. for i in range(num_servers):
  210. subprocess.check_call(['./process-codecov.sh',
  211. logdir + ".srv.%d" % (i + 1),
  212. str(i)])
  213. subprocess.check_call(['./combine-codecov.sh', logdir])
  214. print "file://%s/index.html" % logdir
  215. if __name__ == "__main__":
  216. main()