GoogleGit

blob: ea0c778dfbcbd117c8ff47cfbdf33dcc115ed6e6 [file] [log] [blame]
  1. # Shell class for a test, inherited by all individual tests
  2. #
  3. # Methods:
  4. # __init__ initialise
  5. # initialize run once for each job
  6. # setup run once for each new version of the test installed
  7. # run run the test (wrapped by job.run_test())
  8. #
  9. # Data:
  10. # job backreference to the job this test instance is part of
  11. # outputdir eg. results/<job>/<testname.tag>
  12. # resultsdir eg. results/<job>/<testname.tag>/results
  13. # profdir eg. results/<job>/<testname.tag>/profiling
  14. # debugdir eg. results/<job>/<testname.tag>/debug
  15. # bindir eg. tests/<test>
  16. # src eg. tests/<test>/src
  17. # tmpdir eg. tmp/<tempname>_<testname.tag>
  18. #pylint: disable-msg=C0111
  19. import fcntl, json, os, re, sys, shutil, stat, tempfile, time, traceback
  20. import logging
  21. from autotest_lib.client.bin import utils
  22. from autotest_lib.client.common_lib import error
  23. class base_test(object):
  24. preserve_srcdir = False
  25. network_destabilizing = False
  26. def __init__(self, job, bindir, outputdir):
  27. self.job = job
  28. self.pkgmgr = job.pkgmgr
  29. self.autodir = job.autodir
  30. self.outputdir = outputdir
  31. self.tagged_testname = os.path.basename(self.outputdir)
  32. self.resultsdir = os.path.join(self.outputdir, 'results')
  33. os.mkdir(self.resultsdir)
  34. self.profdir = os.path.join(self.outputdir, 'profiling')
  35. os.mkdir(self.profdir)
  36. self.debugdir = os.path.join(self.outputdir, 'debug')
  37. os.mkdir(self.debugdir)
  38. # TODO(ericli): figure out how autotest crash handler work with cros
  39. # Once this is re-enabled import getpass. crosbug.com/31232
  40. # crash handler, we should restore it in near term.
  41. # if getpass.getuser() == 'root':
  42. # self.configure_crash_handler()
  43. # else:
  44. self.crash_handling_enabled = False
  45. self.bindir = bindir
  46. self.srcdir = os.path.join(self.bindir, 'src')
  47. self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
  48. dir=job.tmpdir)
  49. self._keyvals = []
  50. self._new_keyval = False
  51. self.failed_constraints = []
  52. self.iteration = 0
  53. self.before_iteration_hooks = []
  54. self.after_iteration_hooks = []
  55. # Flag to indicate if the test has succeeded or failed.
  56. self.success = False
  57. def configure_crash_handler(self):
  58. pass
  59. def crash_handler_report(self):
  60. pass
  61. def assert_(self, expr, msg='Assertion failed.'):
  62. if not expr:
  63. raise error.TestError(msg)
  64. def write_test_keyval(self, attr_dict):
  65. utils.write_keyval(self.outputdir, attr_dict,
  66. tap_report=self.job._tap)
  67. @staticmethod
  68. def _append_type_to_keys(dictionary, typename):
  69. new_dict = {}
  70. for key, value in dictionary.iteritems():
  71. new_key = "%s{%s}" % (key, typename)
  72. new_dict[new_key] = value
  73. return new_dict
  74. def output_perf_value(self, description, value, units=None,
  75. higher_is_better=None, graph=None, replacement='_'):
  76. """
  77. Records a measured performance value in an output file.
  78. The output file will subsequently be parsed by the TKO parser to have
  79. the information inserted into the results database.
  80. @param description: A string describing the measured perf value. Must
  81. be maximum length 256, and may only contain letters, numbers,
  82. periods, dashes, and underscores. For example:
  83. "page_load_time", "scrolling-frame-rate".
  84. @param value: A number representing the measured perf value, or a list
  85. of measured values if a test takes multiple measurements.
  86. Measured perf values can be either ints or floats.
  87. @param units: A string describing the units associated with the
  88. measured perf value. Must be maximum length 32, and may only
  89. contain letters, numbers, periods, dashes, and underscores.
  90. For example: "msec", "fps", "score", "runs_per_second".
  91. @param higher_is_better: A boolean indicating whether or not a "higher"
  92. measured perf value is considered to be better. If False, it is
  93. assumed that a "lower" measured value is considered to be
  94. better. This impacts dashboard plotting and email notification.
  95. Pure autotests are expected to specify either True or False!
  96. This value can be set to "None" to indicate that the perf
  97. dashboard should apply the rules encoded via Chromium
  98. unit-info.json. This is only used for tracking Chromium based
  99. tests (in particular telemetry).
  100. @param graph: A string indicating the name of the graph on which
  101. the perf value will be subsequently displayed on the chrome perf
  102. dashboard. This allows multiple metrics be grouped together on
  103. the same graphs. Defaults to None, indicating that the perf
  104. value should be displayed individually on a separate graph.
  105. @param replacement: string to replace illegal characters in
  106. |description| and |units| with.
  107. """
  108. if len(description) > 256:
  109. raise ValueError('The description must be at most 256 characters.')
  110. if len(units) > 32:
  111. raise ValueError('The units must be at most 32 characters.')
  112. # If |replacement| is legal replace illegal characters with it.
  113. string_regex = re.compile(r'[^-\.\w]')
  114. if replacement is None or re.search(string_regex, replacement):
  115. raise ValueError('Invalid replacement string to mask illegal '
  116. 'characters. May only contain letters, numbers, '
  117. 'periods, dashes, and underscores. '
  118. 'replacement: %s' % replacement)
  119. description = re.sub(string_regex, replacement, description)
  120. units = re.sub(string_regex, replacement, units) if units else None
  121. entry = {
  122. 'description': description,
  123. 'value': value,
  124. 'units': units,
  125. 'higher_is_better': higher_is_better,
  126. 'graph': graph
  127. }
  128. output_path = os.path.join(self.resultsdir, 'perf_measurements')
  129. with open(output_path, 'a') as fp:
  130. fp.write(json.dumps(entry, sort_keys=True) + '\n')
  131. def write_perf_keyval(self, perf_dict):
  132. self.write_iteration_keyval({}, perf_dict,
  133. tap_report=self.job._tap)
  134. def write_attr_keyval(self, attr_dict):
  135. self.write_iteration_keyval(attr_dict, {},
  136. tap_report=self.job._tap)
  137. def write_iteration_keyval(self, attr_dict, perf_dict, tap_report=None):
  138. # append the dictionaries before they have the {perf} and {attr} added
  139. self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
  140. self._new_keyval = True
  141. if attr_dict:
  142. attr_dict = self._append_type_to_keys(attr_dict, "attr")
  143. utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr",
  144. tap_report=tap_report)
  145. if perf_dict:
  146. perf_dict = self._append_type_to_keys(perf_dict, "perf")
  147. utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf",
  148. tap_report=tap_report)
  149. keyval_path = os.path.join(self.resultsdir, "keyval")
  150. print >> open(keyval_path, "a"), ""
  151. def analyze_perf_constraints(self, constraints):
  152. if not self._new_keyval:
  153. return
  154. # create a dict from the keyvals suitable as an environment for eval
  155. keyval_env = self._keyvals[-1]['perf'].copy()
  156. keyval_env['__builtins__'] = None
  157. self._new_keyval = False
  158. failures = []
  159. # evaluate each constraint using the current keyvals
  160. for constraint in constraints:
  161. logging.info('___________________ constraint = %s', constraint)
  162. logging.info('___________________ keyvals = %s', keyval_env)
  163. try:
  164. if not eval(constraint, keyval_env):
  165. failures.append('%s: constraint was not met' % constraint)
  166. except:
  167. failures.append('could not evaluate constraint: %s'
  168. % constraint)
  169. # keep track of the errors for each iteration
  170. self.failed_constraints.append(failures)
  171. def process_failed_constraints(self):
  172. msg = ''
  173. for i, failures in enumerate(self.failed_constraints):
  174. if failures:
  175. msg += 'iteration %d:%s ' % (i, ','.join(failures))
  176. if msg:
  177. raise error.TestFail(msg)
  178. def register_before_iteration_hook(self, iteration_hook):
  179. """
  180. This is how we expect test writers to register a before_iteration_hook.
  181. This adds the method to the list of hooks which are executed
  182. before each iteration.
  183. @param iteration_hook: Method to run before each iteration. A valid
  184. hook accepts a single argument which is the
  185. test object.
  186. """
  187. self.before_iteration_hooks.append(iteration_hook)
  188. def register_after_iteration_hook(self, iteration_hook):
  189. """
  190. This is how we expect test writers to register an after_iteration_hook.
  191. This adds the method to the list of hooks which are executed
  192. after each iteration.
  193. @param iteration_hook: Method to run after each iteration. A valid
  194. hook accepts a single argument which is the
  195. test object.
  196. """
  197. self.after_iteration_hooks.append(iteration_hook)
  198. def initialize(self):
  199. pass
  200. def setup(self):
  201. pass
  202. def warmup(self, *args, **dargs):
  203. pass
  204. def drop_caches_between_iterations(self):
  205. if self.job.drop_caches_between_iterations:
  206. utils.drop_caches()
  207. def _call_run_once_with_retry(self, constraints, profile_only,
  208. postprocess_profiled_run, args, dargs):
  209. """Thin wrapper around _call_run_once that retries unsuccessful tests.
  210. If the job object's attribute test_retry is > 0 retry any tests that
  211. ran unsuccessfully X times.
  212. *Note this does not competely re-initialize the test, it only
  213. re-executes code once all the initial job set up (packages,
  214. sysinfo, etc) is complete.
  215. """
  216. if self.job.test_retry != 0:
  217. logging.info('Test will be retried a maximum of %d times',
  218. self.job.test_retry)
  219. max_runs = self.job.test_retry
  220. for retry_run in xrange(0, max_runs+1):
  221. try:
  222. self._call_run_once(constraints, profile_only,
  223. postprocess_profiled_run, args, dargs)
  224. break
  225. except error.TestFailRetry as err:
  226. if retry_run == max_runs:
  227. raise
  228. self.job.record('INFO', None, None, 'Run %s failed with %s' % (
  229. retry_run, err))
  230. if retry_run > 0:
  231. self.write_test_keyval({'test_retries_before_success': retry_run})
  232. def _call_run_once(self, constraints, profile_only,
  233. postprocess_profiled_run, args, dargs):
  234. self.drop_caches_between_iterations()
  235. # execute iteration hooks
  236. for hook in self.before_iteration_hooks:
  237. hook(self)
  238. try:
  239. if profile_only:
  240. if not self.job.profilers.present():
  241. self.job.record('WARN', None, None,
  242. 'No profilers have been added but '
  243. 'profile_only is set - nothing '
  244. 'will be run')
  245. self.run_once_profiling(postprocess_profiled_run,
  246. *args, **dargs)
  247. else:
  248. self.before_run_once()
  249. self.run_once(*args, **dargs)
  250. self.after_run_once()
  251. self.postprocess_iteration()
  252. self.analyze_perf_constraints(constraints)
  253. finally:
  254. for hook in self.after_iteration_hooks:
  255. hook(self)
  256. def execute(self, iterations=None, test_length=None, profile_only=None,
  257. _get_time=time.time, postprocess_profiled_run=None,
  258. constraints=(), *args, **dargs):
  259. """
  260. This is the basic execute method for the tests inherited from base_test.
  261. If you want to implement a benchmark test, it's better to implement
  262. the run_once function, to cope with the profiling infrastructure. For
  263. other tests, you can just override the default implementation.
  264. @param test_length: The minimum test length in seconds. We'll run the
  265. run_once function for a number of times large enough to cover the
  266. minimum test length.
  267. @param iterations: A number of iterations that we'll run the run_once
  268. function. This parameter is incompatible with test_length and will
  269. be silently ignored if you specify both.
  270. @param profile_only: If true run X iterations with profilers enabled.
  271. If false run X iterations and one with profiling if profiles are
  272. enabled. If None, default to the value of job.default_profile_only.
  273. @param _get_time: [time.time] Used for unit test time injection.
  274. @param postprocess_profiled_run: Run the postprocessing for the
  275. profiled run.
  276. """
  277. # For our special class of tests, the benchmarks, we don't want
  278. # profilers to run during the test iterations. Let's reserve only
  279. # the last iteration for profiling, if needed. So let's stop
  280. # all profilers if they are present and active.
  281. profilers = self.job.profilers
  282. if profilers.active():
  283. profilers.stop(self)
  284. if profile_only is None:
  285. profile_only = self.job.default_profile_only
  286. # If the user called this test in an odd way (specified both iterations
  287. # and test_length), let's warn them.
  288. if iterations and test_length:
  289. logging.debug('Iterations parameter ignored (timed execution)')
  290. if test_length:
  291. test_start = _get_time()
  292. time_elapsed = 0
  293. timed_counter = 0
  294. logging.debug('Test started. Specified %d s as the minimum test '
  295. 'length', test_length)
  296. while time_elapsed < test_length:
  297. timed_counter = timed_counter + 1
  298. if time_elapsed == 0:
  299. logging.debug('Executing iteration %d', timed_counter)
  300. elif time_elapsed > 0:
  301. logging.debug('Executing iteration %d, time_elapsed %d s',
  302. timed_counter, time_elapsed)
  303. self._call_run_once_with_retry(constraints, profile_only,
  304. postprocess_profiled_run, args,
  305. dargs)
  306. test_iteration_finish = _get_time()
  307. time_elapsed = test_iteration_finish - test_start
  308. logging.debug('Test finished after %d iterations, '
  309. 'time elapsed: %d s', timed_counter, time_elapsed)
  310. else:
  311. if iterations is None:
  312. iterations = 1
  313. if iterations > 1:
  314. logging.debug('Test started. Specified %d iterations',
  315. iterations)
  316. for self.iteration in xrange(1, iterations + 1):
  317. if iterations > 1:
  318. logging.debug('Executing iteration %d of %d',
  319. self.iteration, iterations)
  320. self._call_run_once_with_retry(constraints, profile_only,
  321. postprocess_profiled_run, args,
  322. dargs)
  323. if not profile_only:
  324. self.iteration += 1
  325. self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
  326. # Do any postprocessing, normally extracting performance keyvals, etc
  327. self.postprocess()
  328. self.process_failed_constraints()
  329. def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
  330. profilers = self.job.profilers
  331. # Do a profiling run if necessary
  332. if profilers.present():
  333. self.drop_caches_between_iterations()
  334. profilers.before_start(self)
  335. self.before_run_once()
  336. profilers.start(self)
  337. logging.debug('Profilers present. Profiling run started')
  338. try:
  339. self.run_once(*args, **dargs)
  340. # Priority to the run_once() argument over the attribute.
  341. postprocess_attribute = getattr(self,
  342. 'postprocess_profiled_run',
  343. False)
  344. if (postprocess_profiled_run or
  345. (postprocess_profiled_run is None and
  346. postprocess_attribute)):
  347. self.postprocess_iteration()
  348. finally:
  349. profilers.stop(self)
  350. profilers.report(self)
  351. self.after_run_once()
  352. def postprocess(self):
  353. pass
  354. def postprocess_iteration(self):
  355. pass
  356. def cleanup(self):
  357. pass
  358. def before_run_once(self):
  359. """
  360. Override in tests that need it, will be called before any run_once()
  361. call including the profiling run (when it's called before starting
  362. the profilers).
  363. """
  364. pass
  365. def after_run_once(self):
  366. """
  367. Called after every run_once (including from a profiled run when it's
  368. called after stopping the profilers).
  369. """
  370. pass
  371. @staticmethod
  372. def _make_writable_to_others(directory):
  373. mode = os.stat(directory).st_mode
  374. mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
  375. os.chmod(directory, mode)
  376. def _exec(self, args, dargs):
  377. self.job.logging.tee_redirect_debug_dir(self.debugdir,
  378. log_name=self.tagged_testname)
  379. try:
  380. if self.network_destabilizing:
  381. self.job.disable_warnings("NETWORK")
  382. # write out the test attributes into a keyval
  383. dargs = dargs.copy()
  384. run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
  385. keyvals = dargs.pop('test_attributes', {}).copy()
  386. keyvals['version'] = self.version
  387. for i, arg in enumerate(args):
  388. keyvals['param-%d' % i] = repr(arg)
  389. for name, arg in dargs.iteritems():
  390. keyvals['param-%s' % name] = repr(arg)
  391. self.write_test_keyval(keyvals)
  392. _validate_args(args, dargs, self.initialize, self.setup,
  393. self.execute, self.cleanup)
  394. try:
  395. # Make resultsdir and tmpdir accessible to everyone. We may
  396. # output data to these directories as others, e.g., chronos.
  397. self._make_writable_to_others(self.tmpdir)
  398. self._make_writable_to_others(self.resultsdir)
  399. # Initialize:
  400. _cherry_pick_call(self.initialize, *args, **dargs)
  401. lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
  402. try:
  403. fcntl.flock(lockfile, fcntl.LOCK_EX)
  404. # Setup: (compile and install the test, if needed)
  405. p_args, p_dargs = _cherry_pick_args(self.setup, args, dargs)
  406. utils.update_version(self.srcdir, self.preserve_srcdir,
  407. self.version, self.setup,
  408. *p_args, **p_dargs)
  409. finally:
  410. fcntl.flock(lockfile, fcntl.LOCK_UN)
  411. lockfile.close()
  412. # Execute:
  413. os.chdir(self.outputdir)
  414. # call self.warmup cherry picking the arguments it accepts and
  415. # translate exceptions if needed
  416. _call_test_function(_cherry_pick_call, self.warmup,
  417. *args, **dargs)
  418. if hasattr(self, 'run_once'):
  419. p_args, p_dargs = _cherry_pick_args(self.run_once,
  420. args, dargs)
  421. # pull in any non-* and non-** args from self.execute
  422. for param in _get_nonstar_args(self.execute):
  423. if param in dargs:
  424. p_dargs[param] = dargs[param]
  425. else:
  426. p_args, p_dargs = _cherry_pick_args(self.execute,
  427. args, dargs)
  428. _call_test_function(self.execute, *p_args, **p_dargs)
  429. except Exception:
  430. # Save the exception while we run our cleanup() before
  431. # reraising it, but log it to so actual time of error is known.
  432. exc_info = sys.exc_info()
  433. logging.warning('Autotest caught exception when running test:',
  434. exc_info=True)
  435. try:
  436. try:
  437. if run_cleanup:
  438. _cherry_pick_call(self.cleanup, *args, **dargs)
  439. except Exception:
  440. logging.error('Ignoring exception during cleanup() '
  441. 'phase:')
  442. traceback.print_exc()
  443. logging.error('Now raising the earlier %s error',
  444. exc_info[0])
  445. self.crash_handler_report()
  446. finally:
  447. self.job.logging.restore()
  448. try:
  449. raise exc_info[0], exc_info[1], exc_info[2]
  450. finally:
  451. # http://docs.python.org/library/sys.html#sys.exc_info
  452. # Be nice and prevent a circular reference.
  453. del exc_info
  454. else:
  455. try:
  456. if run_cleanup:
  457. _cherry_pick_call(self.cleanup, *args, **dargs)
  458. self.crash_handler_report()
  459. finally:
  460. self.job.logging.restore()
  461. except error.AutotestError:
  462. if self.network_destabilizing:
  463. self.job.enable_warnings("NETWORK")
  464. # Pass already-categorized errors on up.
  465. raise
  466. except Exception, e:
  467. if self.network_destabilizing:
  468. self.job.enable_warnings("NETWORK")
  469. # Anything else is an ERROR in our own code, not execute().
  470. raise error.UnhandledTestError(e)
  471. else:
  472. if self.network_destabilizing:
  473. self.job.enable_warnings("NETWORK")
  474. def runsubtest(self, url, *args, **dargs):
  475. """
  476. Execute another autotest test from inside the current test's scope.
  477. @param test: Parent test.
  478. @param url: Url of new test.
  479. @param tag: Tag added to test name.
  480. @param args: Args for subtest.
  481. @param dargs: Dictionary with args for subtest.
  482. @iterations: Number of subtest iterations.
  483. @profile_only: If true execute one profiled run.
  484. """
  485. dargs["profile_only"] = dargs.get("profile_only", False)
  486. test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
  487. return self.job.run_test(url, master_testpath=test_basepath,
  488. *args, **dargs)
  489. def _get_nonstar_args(func):
  490. """Extract all the (normal) function parameter names.
  491. Given a function, returns a tuple of parameter names, specifically
  492. excluding the * and ** parameters, if the function accepts them.
  493. @param func: A callable that we want to chose arguments for.
  494. @return: A tuple of parameters accepted by the function.
  495. """
  496. return func.func_code.co_varnames[:func.func_code.co_argcount]
  497. def _cherry_pick_args(func, args, dargs):
  498. """Sanitize positional and keyword arguments before calling a function.
  499. Given a callable (func), an argument tuple and a dictionary of keyword
  500. arguments, pick only those arguments which the function is prepared to
  501. accept and return a new argument tuple and keyword argument dictionary.
  502. Args:
  503. func: A callable that we want to choose arguments for.
  504. args: A tuple of positional arguments to consider passing to func.
  505. dargs: A dictionary of keyword arguments to consider passing to func.
  506. Returns:
  507. A tuple of: (args tuple, keyword arguments dictionary)
  508. """
  509. # Cherry pick args:
  510. if func.func_code.co_flags & 0x04:
  511. # func accepts *args, so return the entire args.
  512. p_args = args
  513. else:
  514. p_args = ()
  515. # Cherry pick dargs:
  516. if func.func_code.co_flags & 0x08:
  517. # func accepts **dargs, so return the entire dargs.
  518. p_dargs = dargs
  519. else:
  520. # Only return the keyword arguments that func accepts.
  521. p_dargs = {}
  522. for param in _get_nonstar_args(func):
  523. if param in dargs:
  524. p_dargs[param] = dargs[param]
  525. return p_args, p_dargs
  526. def _cherry_pick_call(func, *args, **dargs):
  527. """Cherry picks arguments from args/dargs based on what "func" accepts
  528. and calls the function with the picked arguments."""
  529. p_args, p_dargs = _cherry_pick_args(func, args, dargs)
  530. return func(*p_args, **p_dargs)
  531. def _validate_args(args, dargs, *funcs):
  532. """Verify that arguments are appropriate for at least one callable.
  533. Given a list of callables as additional parameters, verify that
  534. the proposed keyword arguments in dargs will each be accepted by at least
  535. one of the callables.
  536. NOTE: args is currently not supported and must be empty.
  537. Args:
  538. args: A tuple of proposed positional arguments.
  539. dargs: A dictionary of proposed keyword arguments.
  540. *funcs: Callables to be searched for acceptance of args and dargs.
  541. Raises:
  542. error.AutotestError: if an arg won't be accepted by any of *funcs.
  543. """
  544. all_co_flags = 0
  545. all_varnames = ()
  546. for func in funcs:
  547. all_co_flags |= func.func_code.co_flags
  548. all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
  549. # Check if given args belongs to at least one of the methods below.
  550. if len(args) > 0:
  551. # Current implementation doesn't allow the use of args.
  552. raise error.TestError('Unnamed arguments not accepted. Please '
  553. 'call job.run_test with named args only')
  554. # Check if given dargs belongs to at least one of the methods below.
  555. if len(dargs) > 0:
  556. if not all_co_flags & 0x08:
  557. # no func accepts *dargs, so:
  558. for param in dargs:
  559. if not param in all_varnames:
  560. raise error.AutotestError('Unknown parameter: %s' % param)
  561. def _installtest(job, url):
  562. (group, name) = job.pkgmgr.get_package_name(url, 'test')
  563. # Bail if the test is already installed
  564. group_dir = os.path.join(job.testdir, "download", group)
  565. if os.path.exists(os.path.join(group_dir, name)):
  566. return (group, name)
  567. # If the group directory is missing create it and add
  568. # an empty __init__.py so that sub-directories are
  569. # considered for import.
  570. if not os.path.exists(group_dir):
  571. os.makedirs(group_dir)
  572. f = file(os.path.join(group_dir, '__init__.py'), 'w+')
  573. f.close()
  574. logging.debug("%s: installing test url=%s", name, url)
  575. tarball = os.path.basename(url)
  576. tarball_path = os.path.join(group_dir, tarball)
  577. test_dir = os.path.join(group_dir, name)
  578. job.pkgmgr.fetch_pkg(tarball, tarball_path,
  579. repo_url = os.path.dirname(url))
  580. # Create the directory for the test
  581. if not os.path.exists(test_dir):
  582. os.mkdir(os.path.join(group_dir, name))
  583. job.pkgmgr.untar_pkg(tarball_path, test_dir)
  584. os.remove(tarball_path)
  585. # For this 'sub-object' to be importable via the name
  586. # 'group.name' we need to provide an __init__.py,
  587. # so link the main entry point to this.
  588. os.symlink(name + '.py', os.path.join(group_dir, name,
  589. '__init__.py'))
  590. # The test is now installed.
  591. return (group, name)
  592. def _call_test_function(func, *args, **dargs):
  593. """Calls a test function and translates exceptions so that errors
  594. inside test code are considered test failures."""
  595. try:
  596. return func(*args, **dargs)
  597. except error.AutotestError:
  598. raise
  599. except Exception, e:
  600. # Other exceptions must be treated as a FAIL when
  601. # raised during the test functions
  602. raise error.UnhandledTestFail(e)
  603. def runtest(job, url, tag, args, dargs,
  604. local_namespace={}, global_namespace={},
  605. before_test_hook=None, after_test_hook=None,
  606. before_iteration_hook=None, after_iteration_hook=None):
  607. local_namespace = local_namespace.copy()
  608. global_namespace = global_namespace.copy()
  609. # if this is not a plain test name then download and install the
  610. # specified test
  611. if url.endswith('.tar.bz2'):
  612. (testgroup, testname) = _installtest(job, url)
  613. bindir = os.path.join(job.testdir, 'download', testgroup, testname)
  614. importdir = os.path.join(job.testdir, 'download')
  615. modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
  616. classname = '%s.%s' % (modulename, testname)
  617. path = testname
  618. else:
  619. # If the test is local, it may be under either testdir or site_testdir.
  620. # Tests in site_testdir override tests defined in testdir
  621. testname = path = url
  622. testgroup = ''
  623. path = re.sub(':', '/', testname)
  624. modulename = os.path.basename(path)
  625. classname = '%s.%s' % (modulename, modulename)
  626. # Try installing the test package
  627. # The job object may be either a server side job or a client side job.
  628. # 'install_pkg' method will be present only if it's a client side job.
  629. if hasattr(job, 'install_pkg'):
  630. try:
  631. bindir = os.path.join(job.testdir, testname)
  632. job.install_pkg(testname, 'test', bindir)
  633. except error.PackageInstallError:
  634. # continue as a fall back mechanism and see if the test code
  635. # already exists on the machine
  636. pass
  637. bindir = None
  638. for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
  639. if dir is not None and os.path.exists(os.path.join(dir, path)):
  640. importdir = bindir = os.path.join(dir, path)
  641. if not bindir:
  642. raise error.TestError(testname + ': test does not exist')
  643. subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
  644. outputdir = os.path.join(job.resultdir, subdir)
  645. if tag:
  646. outputdir += '.' + tag
  647. local_namespace['job'] = job
  648. local_namespace['bindir'] = bindir
  649. local_namespace['outputdir'] = outputdir
  650. sys.path.insert(0, importdir)
  651. try:
  652. exec ('import %s' % modulename, local_namespace, global_namespace)
  653. exec ("mytest = %s(job, bindir, outputdir)" % classname,
  654. local_namespace, global_namespace)
  655. finally:
  656. sys.path.pop(0)
  657. pwd = os.getcwd()
  658. os.chdir(outputdir)
  659. try:
  660. mytest = global_namespace['mytest']
  661. mytest.success = False
  662. if before_test_hook:
  663. before_test_hook(mytest)
  664. # we use the register iteration hooks methods to register the passed
  665. # in hooks
  666. if before_iteration_hook:
  667. mytest.register_before_iteration_hook(before_iteration_hook)
  668. if after_iteration_hook:
  669. mytest.register_after_iteration_hook(after_iteration_hook)
  670. mytest._exec(args, dargs)
  671. mytest.success = True
  672. finally:
  673. os.chdir(pwd)
  674. if after_test_hook:
  675. after_test_hook(mytest)
  676. shutil.rmtree(mytest.tmpdir, ignore_errors=True)