You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

432 lines
18 KiB

1 month ago
  1. #!/usr/bin/env python3
  2. import unittest
  3. """
  4. compare.py - versatile benchmark output compare tool
  5. """
  6. import argparse
  7. from argparse import ArgumentParser
  8. import json
  9. import sys
  10. import os
  11. import gbench
  12. from gbench import util, report
  13. def check_inputs(in1, in2, flags):
  14. """
  15. Perform checking on the user provided inputs and diagnose any abnormalities
  16. """
  17. in1_kind, in1_err = util.classify_input_file(in1)
  18. in2_kind, in2_err = util.classify_input_file(in2)
  19. output_file = util.find_benchmark_flag('--benchmark_out=', flags)
  20. output_type = util.find_benchmark_flag('--benchmark_out_format=', flags)
  21. if in1_kind == util.IT_Executable and in2_kind == util.IT_Executable and output_file:
  22. print(("WARNING: '--benchmark_out=%s' will be passed to both "
  23. "benchmarks causing it to be overwritten") % output_file)
  24. if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON:
  25. # When both sides are JSON the only supported flag is
  26. # --benchmark_filter=
  27. for flag in util.remove_benchmark_flags('--benchmark_filter=', flags):
  28. print("WARNING: passing %s has no effect since both "
  29. "inputs are JSON" % flag)
  30. if output_type is not None and output_type != 'json':
  31. print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
  32. " is not supported.") % output_type)
  33. sys.exit(1)
  34. def create_parser():
  35. parser = ArgumentParser(
  36. description='versatile benchmark output compare tool')
  37. parser.add_argument(
  38. '-a',
  39. '--display_aggregates_only',
  40. dest='display_aggregates_only',
  41. action="store_true",
  42. help="If there are repetitions, by default, we display everything - the"
  43. " actual runs, and the aggregates computed. Sometimes, it is "
  44. "desirable to only view the aggregates. E.g. when there are a lot "
  45. "of repetitions. Do note that only the display is affected. "
  46. "Internally, all the actual runs are still used, e.g. for U test.")
  47. parser.add_argument(
  48. '--no-color',
  49. dest='color',
  50. default=True,
  51. action="store_false",
  52. help="Do not use colors in the terminal output"
  53. )
  54. parser.add_argument(
  55. '-d',
  56. '--dump_to_json',
  57. dest='dump_to_json',
  58. help="Additionally, dump benchmark comparison output to this file in JSON format.")
  59. utest = parser.add_argument_group()
  60. utest.add_argument(
  61. '--no-utest',
  62. dest='utest',
  63. default=True,
  64. action="store_false",
  65. help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
  66. alpha_default = 0.05
  67. utest.add_argument(
  68. "--alpha",
  69. dest='utest_alpha',
  70. default=alpha_default,
  71. type=float,
  72. help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
  73. alpha_default)
  74. subparsers = parser.add_subparsers(
  75. help='This tool has multiple modes of operation:',
  76. dest='mode')
  77. parser_a = subparsers.add_parser(
  78. 'benchmarks',
  79. help='The most simple use-case, compare all the output of these two benchmarks')
  80. baseline = parser_a.add_argument_group(
  81. 'baseline', 'The benchmark baseline')
  82. baseline.add_argument(
  83. 'test_baseline',
  84. metavar='test_baseline',
  85. type=argparse.FileType('r'),
  86. nargs=1,
  87. help='A benchmark executable or JSON output file')
  88. contender = parser_a.add_argument_group(
  89. 'contender', 'The benchmark that will be compared against the baseline')
  90. contender.add_argument(
  91. 'test_contender',
  92. metavar='test_contender',
  93. type=argparse.FileType('r'),
  94. nargs=1,
  95. help='A benchmark executable or JSON output file')
  96. parser_a.add_argument(
  97. 'benchmark_options',
  98. metavar='benchmark_options',
  99. nargs=argparse.REMAINDER,
  100. help='Arguments to pass when running benchmark executables')
  101. parser_b = subparsers.add_parser(
  102. 'filters', help='Compare filter one with the filter two of benchmark')
  103. baseline = parser_b.add_argument_group(
  104. 'baseline', 'The benchmark baseline')
  105. baseline.add_argument(
  106. 'test',
  107. metavar='test',
  108. type=argparse.FileType('r'),
  109. nargs=1,
  110. help='A benchmark executable or JSON output file')
  111. baseline.add_argument(
  112. 'filter_baseline',
  113. metavar='filter_baseline',
  114. type=str,
  115. nargs=1,
  116. help='The first filter, that will be used as baseline')
  117. contender = parser_b.add_argument_group(
  118. 'contender', 'The benchmark that will be compared against the baseline')
  119. contender.add_argument(
  120. 'filter_contender',
  121. metavar='filter_contender',
  122. type=str,
  123. nargs=1,
  124. help='The second filter, that will be compared against the baseline')
  125. parser_b.add_argument(
  126. 'benchmark_options',
  127. metavar='benchmark_options',
  128. nargs=argparse.REMAINDER,
  129. help='Arguments to pass when running benchmark executables')
  130. parser_c = subparsers.add_parser(
  131. 'benchmarksfiltered',
  132. help='Compare filter one of first benchmark with filter two of the second benchmark')
  133. baseline = parser_c.add_argument_group(
  134. 'baseline', 'The benchmark baseline')
  135. baseline.add_argument(
  136. 'test_baseline',
  137. metavar='test_baseline',
  138. type=argparse.FileType('r'),
  139. nargs=1,
  140. help='A benchmark executable or JSON output file')
  141. baseline.add_argument(
  142. 'filter_baseline',
  143. metavar='filter_baseline',
  144. type=str,
  145. nargs=1,
  146. help='The first filter, that will be used as baseline')
  147. contender = parser_c.add_argument_group(
  148. 'contender', 'The benchmark that will be compared against the baseline')
  149. contender.add_argument(
  150. 'test_contender',
  151. metavar='test_contender',
  152. type=argparse.FileType('r'),
  153. nargs=1,
  154. help='The second benchmark executable or JSON output file, that will be compared against the baseline')
  155. contender.add_argument(
  156. 'filter_contender',
  157. metavar='filter_contender',
  158. type=str,
  159. nargs=1,
  160. help='The second filter, that will be compared against the baseline')
  161. parser_c.add_argument(
  162. 'benchmark_options',
  163. metavar='benchmark_options',
  164. nargs=argparse.REMAINDER,
  165. help='Arguments to pass when running benchmark executables')
  166. return parser
  167. def main():
  168. # Parse the command line flags
  169. parser = create_parser()
  170. args, unknown_args = parser.parse_known_args()
  171. if args.mode is None:
  172. parser.print_help()
  173. exit(1)
  174. assert not unknown_args
  175. benchmark_options = args.benchmark_options
  176. if args.mode == 'benchmarks':
  177. test_baseline = args.test_baseline[0].name
  178. test_contender = args.test_contender[0].name
  179. filter_baseline = ''
  180. filter_contender = ''
  181. # NOTE: if test_baseline == test_contender, you are analyzing the stdev
  182. description = 'Comparing %s to %s' % (test_baseline, test_contender)
  183. elif args.mode == 'filters':
  184. test_baseline = args.test[0].name
  185. test_contender = args.test[0].name
  186. filter_baseline = args.filter_baseline[0]
  187. filter_contender = args.filter_contender[0]
  188. # NOTE: if filter_baseline == filter_contender, you are analyzing the
  189. # stdev
  190. description = 'Comparing %s to %s (from %s)' % (
  191. filter_baseline, filter_contender, args.test[0].name)
  192. elif args.mode == 'benchmarksfiltered':
  193. test_baseline = args.test_baseline[0].name
  194. test_contender = args.test_contender[0].name
  195. filter_baseline = args.filter_baseline[0]
  196. filter_contender = args.filter_contender[0]
  197. # NOTE: if test_baseline == test_contender and
  198. # filter_baseline == filter_contender, you are analyzing the stdev
  199. description = 'Comparing %s (from %s) to %s (from %s)' % (
  200. filter_baseline, test_baseline, filter_contender, test_contender)
  201. else:
  202. # should never happen
  203. print("Unrecognized mode of operation: '%s'" % args.mode)
  204. parser.print_help()
  205. exit(1)
  206. check_inputs(test_baseline, test_contender, benchmark_options)
  207. if args.display_aggregates_only:
  208. benchmark_options += ['--benchmark_display_aggregates_only=true']
  209. options_baseline = []
  210. options_contender = []
  211. if filter_baseline and filter_contender:
  212. options_baseline = ['--benchmark_filter=%s' % filter_baseline]
  213. options_contender = ['--benchmark_filter=%s' % filter_contender]
  214. # Run the benchmarks and report the results
  215. json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
  216. test_baseline, benchmark_options + options_baseline))
  217. json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
  218. test_contender, benchmark_options + options_contender))
  219. # Now, filter the benchmarks so that the difference report can work
  220. if filter_baseline and filter_contender:
  221. replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
  222. json1 = gbench.report.filter_benchmark(
  223. json1_orig, filter_baseline, replacement)
  224. json2 = gbench.report.filter_benchmark(
  225. json2_orig, filter_contender, replacement)
  226. diff_report = gbench.report.get_difference_report(
  227. json1, json2, args.utest)
  228. output_lines = gbench.report.print_difference_report(
  229. diff_report,
  230. args.display_aggregates_only,
  231. args.utest, args.utest_alpha, args.color)
  232. print(description)
  233. for ln in output_lines:
  234. print(ln)
  235. # Optionally, diff and output to JSON
  236. if args.dump_to_json is not None:
  237. with open(args.dump_to_json, 'w') as f_json:
  238. json.dump(diff_report, f_json)
  239. class TestParser(unittest.TestCase):
  240. def setUp(self):
  241. self.parser = create_parser()
  242. testInputs = os.path.join(
  243. os.path.dirname(
  244. os.path.realpath(__file__)),
  245. 'gbench',
  246. 'Inputs')
  247. self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
  248. self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
  249. def test_benchmarks_basic(self):
  250. parsed = self.parser.parse_args(
  251. ['benchmarks', self.testInput0, self.testInput1])
  252. self.assertFalse(parsed.display_aggregates_only)
  253. self.assertTrue(parsed.utest)
  254. self.assertEqual(parsed.mode, 'benchmarks')
  255. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  256. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  257. self.assertFalse(parsed.benchmark_options)
  258. def test_benchmarks_basic_without_utest(self):
  259. parsed = self.parser.parse_args(
  260. ['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
  261. self.assertFalse(parsed.display_aggregates_only)
  262. self.assertFalse(parsed.utest)
  263. self.assertEqual(parsed.utest_alpha, 0.05)
  264. self.assertEqual(parsed.mode, 'benchmarks')
  265. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  266. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  267. self.assertFalse(parsed.benchmark_options)
  268. def test_benchmarks_basic_display_aggregates_only(self):
  269. parsed = self.parser.parse_args(
  270. ['-a', 'benchmarks', self.testInput0, self.testInput1])
  271. self.assertTrue(parsed.display_aggregates_only)
  272. self.assertTrue(parsed.utest)
  273. self.assertEqual(parsed.mode, 'benchmarks')
  274. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  275. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  276. self.assertFalse(parsed.benchmark_options)
  277. def test_benchmarks_basic_with_utest_alpha(self):
  278. parsed = self.parser.parse_args(
  279. ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
  280. self.assertFalse(parsed.display_aggregates_only)
  281. self.assertTrue(parsed.utest)
  282. self.assertEqual(parsed.utest_alpha, 0.314)
  283. self.assertEqual(parsed.mode, 'benchmarks')
  284. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  285. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  286. self.assertFalse(parsed.benchmark_options)
  287. def test_benchmarks_basic_without_utest_with_utest_alpha(self):
  288. parsed = self.parser.parse_args(
  289. ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
  290. self.assertFalse(parsed.display_aggregates_only)
  291. self.assertFalse(parsed.utest)
  292. self.assertEqual(parsed.utest_alpha, 0.314)
  293. self.assertEqual(parsed.mode, 'benchmarks')
  294. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  295. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  296. self.assertFalse(parsed.benchmark_options)
  297. def test_benchmarks_with_remainder(self):
  298. parsed = self.parser.parse_args(
  299. ['benchmarks', self.testInput0, self.testInput1, 'd'])
  300. self.assertFalse(parsed.display_aggregates_only)
  301. self.assertTrue(parsed.utest)
  302. self.assertEqual(parsed.mode, 'benchmarks')
  303. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  304. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  305. self.assertEqual(parsed.benchmark_options, ['d'])
  306. def test_benchmarks_with_remainder_after_doubleminus(self):
  307. parsed = self.parser.parse_args(
  308. ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
  309. self.assertFalse(parsed.display_aggregates_only)
  310. self.assertTrue(parsed.utest)
  311. self.assertEqual(parsed.mode, 'benchmarks')
  312. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  313. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  314. self.assertEqual(parsed.benchmark_options, ['e'])
  315. def test_filters_basic(self):
  316. parsed = self.parser.parse_args(
  317. ['filters', self.testInput0, 'c', 'd'])
  318. self.assertFalse(parsed.display_aggregates_only)
  319. self.assertTrue(parsed.utest)
  320. self.assertEqual(parsed.mode, 'filters')
  321. self.assertEqual(parsed.test[0].name, self.testInput0)
  322. self.assertEqual(parsed.filter_baseline[0], 'c')
  323. self.assertEqual(parsed.filter_contender[0], 'd')
  324. self.assertFalse(parsed.benchmark_options)
  325. def test_filters_with_remainder(self):
  326. parsed = self.parser.parse_args(
  327. ['filters', self.testInput0, 'c', 'd', 'e'])
  328. self.assertFalse(parsed.display_aggregates_only)
  329. self.assertTrue(parsed.utest)
  330. self.assertEqual(parsed.mode, 'filters')
  331. self.assertEqual(parsed.test[0].name, self.testInput0)
  332. self.assertEqual(parsed.filter_baseline[0], 'c')
  333. self.assertEqual(parsed.filter_contender[0], 'd')
  334. self.assertEqual(parsed.benchmark_options, ['e'])
  335. def test_filters_with_remainder_after_doubleminus(self):
  336. parsed = self.parser.parse_args(
  337. ['filters', self.testInput0, 'c', 'd', '--', 'f'])
  338. self.assertFalse(parsed.display_aggregates_only)
  339. self.assertTrue(parsed.utest)
  340. self.assertEqual(parsed.mode, 'filters')
  341. self.assertEqual(parsed.test[0].name, self.testInput0)
  342. self.assertEqual(parsed.filter_baseline[0], 'c')
  343. self.assertEqual(parsed.filter_contender[0], 'd')
  344. self.assertEqual(parsed.benchmark_options, ['f'])
  345. def test_benchmarksfiltered_basic(self):
  346. parsed = self.parser.parse_args(
  347. ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
  348. self.assertFalse(parsed.display_aggregates_only)
  349. self.assertTrue(parsed.utest)
  350. self.assertEqual(parsed.mode, 'benchmarksfiltered')
  351. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  352. self.assertEqual(parsed.filter_baseline[0], 'c')
  353. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  354. self.assertEqual(parsed.filter_contender[0], 'e')
  355. self.assertFalse(parsed.benchmark_options)
  356. def test_benchmarksfiltered_with_remainder(self):
  357. parsed = self.parser.parse_args(
  358. ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
  359. self.assertFalse(parsed.display_aggregates_only)
  360. self.assertTrue(parsed.utest)
  361. self.assertEqual(parsed.mode, 'benchmarksfiltered')
  362. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  363. self.assertEqual(parsed.filter_baseline[0], 'c')
  364. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  365. self.assertEqual(parsed.filter_contender[0], 'e')
  366. self.assertEqual(parsed.benchmark_options[0], 'f')
  367. def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
  368. parsed = self.parser.parse_args(
  369. ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
  370. self.assertFalse(parsed.display_aggregates_only)
  371. self.assertTrue(parsed.utest)
  372. self.assertEqual(parsed.mode, 'benchmarksfiltered')
  373. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  374. self.assertEqual(parsed.filter_baseline[0], 'c')
  375. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  376. self.assertEqual(parsed.filter_contender[0], 'e')
  377. self.assertEqual(parsed.benchmark_options[0], 'g')
  378. if __name__ == '__main__':
  379. # unittest.main()
  380. main()
  381. # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
  382. # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
  383. # kate: indent-mode python; remove-trailing-spaces modified;