| # Copyright 2018 The Chromium Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| # Script to automate updating existing WPR benchmarks from live versions of the |
| # sites. Only supported on Mac/Linux. |
| |
| import argparse |
| import datetime |
| import json |
| import os |
| import random |
| import re |
| import shutil |
| import subprocess |
| import tempfile |
| import time |
| import webbrowser |
| |
| from core import cli_helpers |
| from core.services import luci_auth |
| from core.services import pinpoint_service |
| from core.services import request |
| |
| |
| SRC_ROOT = os.path.abspath( |
| os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')) |
| RESULTS2JSON = os.path.join( |
| SRC_ROOT, 'third_party', 'catapult', 'tracing', 'bin', 'results2json') |
| HISTOGRAM2CSV = os.path.join( |
| SRC_ROOT, 'third_party', 'catapult', 'tracing', 'bin', 'histograms2csv') |
| RUN_BENCHMARK = os.path.join(SRC_ROOT, 'tools', 'perf', 'run_benchmark') |
| DATA_DIR = os.path.join(SRC_ROOT, 'tools', 'perf', 'page_sets', 'data') |
| RECORD_WPR = os.path.join(SRC_ROOT, 'tools', 'perf', 'record_wpr') |
| DEFAULT_REVIEWERS = ['perezju@chromium.org'] |
| |
| |
| def _GetBranchName(): |
| return subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) |
| |
| |
| def _OpenBrowser(url): |
| # Redirect I/O before invoking browser to avoid it spamming our output. |
| # Based on https://stackoverflow.com/a/2323563. |
| savout = os.dup(1) |
| saverr = os.dup(2) |
| os.close(1) |
| os.close(2) |
| os.open(os.devnull, os.O_RDWR) |
| try: |
| webbrowser.open(url) |
| finally: |
| os.dup2(savout, 1) |
| os.dup2(saverr, 2) |
| |
| |
| def _SendCLForReview(comment): |
| subprocess.check_call( |
| ['git', 'cl', 'comments', '--publish', '--add-comment', comment]) |
| |
| |
| def _EnsureEditor(): |
| if 'EDITOR' not in os.environ: |
| os.environ['EDITOR'] = cli_helpers.Prompt( |
| 'Looks like EDITOR environment varible is not defined. Please enter ' |
| 'the command to view logs: ') |
| |
| |
| def _OpenEditor(filepath): |
| subprocess.check_call([os.environ['EDITOR'], filepath]) |
| |
| |
| class WprUpdater(object): |
| def __init__(self, args): |
| self.story = args.story |
| # TODO(sergiyb): Impelement a method that auto-detects a single connected |
| # device when device_id is set to 'auto'. This should take advantage of the |
| # method adb_wrapper.Devices in catapult repo. |
| self.device_id = args.device_id |
| self.repeat = args.repeat |
| self.binary = args.binary |
| self.output_dir = tempfile.mkdtemp() |
| self.bug_id = args.bug_id |
| self.reviewers = args.reviewers or DEFAULT_REVIEWERS |
| |
| def _PrepareEnv(self): |
| # Enforce the same local settings for recording and replays on the bots. |
| env = os.environ.copy() |
| env['LC_ALL'] = 'en_US.UTF-8' |
| return env |
| |
| def _Run(self, command, ok_fail=False): |
| return cli_helpers.Run(command, ok_fail=ok_fail, env=self._PrepareEnv()) |
| |
| def _CheckLog(self, command, log_name): |
| # This is a wrapper around cli_helpers.CheckLog that adds timestamp to the |
| # log filename and substitutes placeholders such as {src}, {story}, |
| # {device_id} in the command. |
| story_regex = '^%s$' % re.escape(self.story) |
| command = [ |
| c.format(src=SRC_ROOT, story=story_regex, device_id=self.device_id) |
| for c in command] |
| timestamp = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') |
| log_path = os.path.join(self.output_dir, '%s_%s' % (log_name, timestamp)) |
| cli_helpers.CheckLog(command, log_path=log_path, env=self._PrepareEnv()) |
| return log_path |
| |
| def _IsDesktop(self): |
| return self.device_id is None |
| |
| def _ExistingWpr(self): |
| """Returns a path to the current WPR archive for specified story.""" |
| config_file = os.path.join(DATA_DIR, 'system_health_%s.json' % ( |
| 'desktop' if self._IsDesktop() else 'mobile')) |
| with open(config_file) as f: |
| config = json.load(f) |
| archives = config['archives'] |
| archive = archives.get(self.story) |
| if archive is None: |
| return None |
| archive = archive['DEFAULT'] |
| return os.path.join(DATA_DIR, archive) |
| |
| def _DeleteExistingWpr(self): |
| """Deletes current WPR archive.""" |
| archive = self._ExistingWpr() |
| if archive is None: |
| return |
| cli_helpers.Info('Deleting WPR: {archive}', archive=archive) |
| if os.path.exists(archive): |
| os.remove(archive) |
| archive_sha1 = archive + '.sha1' |
| if os.path.exists(archive_sha1): |
| os.remove(archive_sha1) |
| |
| def _ExtractLogFile(self, out_file): |
| # This method extracts the name of the chrome log file from the |
| # run_benchmark output log and copies it to the temporary directory next to |
| # the log file, which ensures that it is not overridden by the next run. |
| try: |
| line = subprocess.check_output( |
| ['grep', 'Chrome log file will be saved in', out_file]) |
| os.rename(line.split()[-1], out_file + '.chrome.log') |
| except subprocess.CalledProcessError as e: |
| cli_helpers.Error('Could not find log file: {error}', error=e) |
| |
| def _ExtractResultsFile(self, out_file): |
| results_file = out_file + '.results.html' |
| os.rename(os.path.join(self.output_dir, 'results.html'), results_file) |
| |
| def _BrowserArgs(self): |
| """Generates args to be passed to RUN_BENCHMARK and UPDATE_WPR scripts.""" |
| if self.binary: |
| return [ |
| '--browser-executable=%s' % self.binary, |
| '--browser=exact', |
| ] |
| elif self._IsDesktop(): |
| return ['--browser=system'] |
| else: |
| return ['--browser=android-system-chrome'] |
| |
| def _RunSystemHealthMemoryBenchmark(self, log_name, live=False): |
| args = [RUN_BENCHMARK, 'run'] + self._BrowserArgs() |
| |
| if self._IsDesktop(): |
| args.append('system_health.memory_desktop') |
| else: |
| args.extend('system_health.memory_mobile') |
| |
| |
| args.extend([ |
| '--output-format=html', '--show-stdout', |
| '--reset-results', '--story-filter={story}', |
| '--browser-logging-verbosity=verbose', |
| '--pageset-repeat=%s' % self.repeat, |
| '--output-dir', self.output_dir]) |
| if live: |
| args.append('--use-live-sites') |
| out_file = self._CheckLog(args, log_name=log_name) |
| self._ExtractResultsFile(out_file) |
| self._ExtractLogFile(out_file) |
| return out_file |
| |
| def _PrintResultsHTMLInfo(self, out_file): |
| results_file = out_file + '.results.html' |
| histogram_json = out_file + '.hist.json' |
| histogram_csv = out_file + '.hist.csv' |
| |
| self._Run([RESULTS2JSON, results_file, histogram_json]) |
| self._Run([HISTOGRAM2CSV, histogram_json, histogram_csv]) |
| |
| cli_helpers.Info('Metrics results: file://{path}', path=results_file) |
| names = set([ |
| 'console:error:network', |
| 'console:error:js', |
| 'console:error:all', |
| 'console:error:security']) |
| with open(histogram_csv) as f: |
| for line in f.readlines(): |
| line = line.split(',') |
| if line[0] in names: |
| cli_helpers.Info(' %-26s%s' % ('[%s]:' % line[0], line[2])) |
| |
| def _PrintRunInfo(self, out_file, results_details=True): |
| try: |
| if results_details: |
| self._PrintResultsHTMLInfo(out_file) |
| except Exception as e: |
| cli_helpers.Error('Could not print results.html tests: %s' % e) |
| |
| def shell(cmd): |
| return subprocess.check_output(cmd, shell=True).rstrip() |
| |
| def statsFor(name, filters='wc -l'): |
| cmd = 'grep "DevTools console .%s." "%s"' % (name, out_file) |
| cmd += ' | ' + filters |
| output = shell(cmd) or '0' |
| if len(output) > 7: |
| cli_helpers.Info(' %-26s%s' % ('[%s]:' % name, cmd)) |
| cli_helpers.Info(' ' + output.replace('\n', '\n ')) |
| else: |
| cli_helpers.Info(' %-16s%-8s %s' % ('[%s]:' % name, output, cmd)) |
| |
| cli_helpers.Info('Stdout/Stderr Log: %s' % out_file) |
| cli_helpers.Info('Chrome Log: %s.chrome.log' % out_file) |
| cli_helpers.Info( |
| ' Total output: %s' % |
| subprocess.check_output(['wc', '-l', out_file]).rstrip()) |
| cli_helpers.Info( |
| ' Total Console: %s' % |
| shell('grep "DevTools console" "%s" | wc -l' % out_file)) |
| statsFor('security') |
| statsFor('network', 'cut -d " " -f 20- | sort | uniq -c | sort -nr') |
| |
| chrome_log = '%s.chrome.log' % out_file |
| if os.path.isfile(chrome_log): |
| cmd = 'grep "Uncaught .*Error" "%s"' % chrome_log |
| count = shell(cmd + '| wc -l') |
| cli_helpers.Info(' %-16s%-8s %s' % ('[javascript]:', count, cmd)) |
| |
| def _UploadArchiveToGoogleStorage(self, archive): |
| """Uploads specified WPR archive to the GS.""" |
| cli_helpers.Run([ |
| 'upload_to_google_storage.py', '--bucket=chrome-partner-telemetry', |
| archive]) |
| |
| def _GitAddArtifactHash(self, archive): |
| """Stages changes into SHA1 file for commit.""" |
| archive_sha1 = archive + '.sha1' |
| if not os.path.exists(archive_sha1): |
| cli_helpers.Error( |
| 'Could not find upload artifact: {sha}', sha=archive_sha1) |
| return False |
| cli_helpers.Run(['git', 'add', archive_sha1]) |
| return True |
| |
| def _GetBranchIssueUrl(self): |
| output_file = os.path.join(self.output_dir, 'git_cl_issue.json') |
| subprocess.check_output(['git', 'cl', 'issue', '--json', output_file]) |
| with open(output_file, 'r') as output_fd: |
| return json.load(output_fd)['issue_url'] |
| |
| def _CreateBranch(self): |
| sanitized_story = re.sub(r'[^A-Za-z0-9-_.]', r'-', self.story) |
| subprocess.check_call([ |
| 'git', 'new-branch', |
| 'update-wpr-%s-%d' % (sanitized_story, random.randint(0, 10000)), |
| ]) |
| |
| def _FilterLogForDiff(self, log_filename): |
| """Removes unimportant details from console logs for cleaner diffs. |
| |
| For example, log line from file `log_filename` |
| |
| 2018-02-01 22:23:22,123 operation abcdef01-abcd-abcd-0123-abcdef012345 |
| from /tmp/tmpX34v/results.html took 22145ms when accessed via |
| https://127.0.0.1:1233/endpoint |
| |
| would become |
| |
| <timestamp> operation <guid> from /tmp/tmp<random>/results.html took |
| <duration> when accessed via https://127.0.0.1:<port>/endpoint |
| |
| Returns: |
| Path to the filtered log. |
| """ |
| with open(log_filename) as src, tempfile.NamedTemporaryFile( |
| suffix='diff', dir=self.output_dir, delete=False) as dest: |
| for line in src: |
| # Remove timestamps. |
| line = re.sub( |
| r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}', r'<timestamp>', line) |
| # Remove GUIDs. |
| line = re.sub( |
| r'[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}', r'<guid>', line) |
| # Remove random letters in paths to temp dirs and files. |
| line = re.sub(r'(/tmp/tmp)[^/\s]+', r'\1<random>', line) |
| # Remove random port in localhost URLs. |
| line = re.sub(r'(://127.0.0.1:)\d+', r'\1<port>', line) |
| # Remove random durations in ms. |
| line = re.sub(r'\d+ ms', r'<duration>', line) |
| dest.write(line) |
| return dest.name |
| |
| def _StartPinpointJob(self, configuration): |
| """Creates, starts a Pinpoint job and returns its URL.""" |
| try: |
| resp = pinpoint_service.NewJob( |
| start_git_hash='HEAD', |
| end_git_hash='HEAD', |
| target='performance_test_suite', |
| patch=self._GetBranchIssueUrl(), |
| bug_id=self.bug_id or '', |
| story=self.story, |
| extra_test_args='--pageset-repeat=%d' % self.repeat, |
| configuration=configuration, |
| benchmark='system_health.common_%s' % ( |
| 'desktop' if self._IsDesktop() else 'mobile')) |
| except request.RequestError as e: |
| cli_helpers.Comment( |
| 'Failed to start a Pinpoint job for {config} automatically:\n {err}', |
| config=configuration, err=e.content) |
| return |
| |
| cli_helpers.Info( |
| 'Started a Pinpoint job for {configuration} at {url}', |
| configuration=configuration, url=resp['jobUrl']) |
| return resp['jobUrl'] |
| |
| def LiveRun(self): |
| cli_helpers.Step('LIVE RUN: %s' % self.story) |
| out_file = self._RunSystemHealthMemoryBenchmark( |
| log_name='live', live=True) |
| self._PrintRunInfo(out_file) |
| return out_file |
| |
| def Cleanup(self): |
| if cli_helpers.Ask('Should I clean up the temp dir with logs?'): |
| shutil.rmtree(self.output_dir, ignore_errors=True) |
| else: |
| cli_helpers.Comment( |
| 'No problem. All logs will remain in %s - feel free to remove that ' |
| 'directory when done.' % self.output_dir) |
| |
| def RecordWpr(self): |
| cli_helpers.Step('RECORD WPR: %s' % self.story) |
| self._DeleteExistingWpr() |
| args = [RECORD_WPR, '--story-filter={story}'] + self._BrowserArgs() |
| if self._IsDesktop(): |
| args.append('desktop_system_health_story_set') |
| else: |
| args.extend(['--device={device_id}', 'mobile_system_health_story_set']) |
| out_file = self._CheckLog(args, log_name='record') |
| self._PrintRunInfo(out_file, results_details=False) |
| |
| def ReplayWpr(self): |
| cli_helpers.Step('REPLAY WPR: %s' % self.story) |
| out_file = self._RunSystemHealthMemoryBenchmark( |
| log_name='replay', live=False) |
| self._PrintRunInfo(out_file) |
| return out_file |
| |
| def UploadWpr(self): |
| cli_helpers.Step('UPLOAD WPR: %s' % self.story) |
| archive = self._ExistingWpr() |
| if archive is None: |
| cli_helpers.Error('NO WPR FOUND, use the "record" subcommand') |
| self._UploadArchiveToGoogleStorage(archive) |
| return self._GitAddArtifactHash(archive) |
| |
| def UploadCL(self, short_description=False): |
| cli_helpers.Step('UPLOAD CL: %s' % self.story) |
| if short_description: |
| commit_message = 'Automated upload' |
| else: |
| commit_message = ( |
| 'Add %s system health story\n\nThis CL was created automatically ' |
| 'with tools/perf/update_wpr script' % self.story) |
| if self.bug_id: |
| commit_message += '\n\nBug: %s' % self.bug_id |
| cli_helpers.Run(['git', 'commit', '-a', '-m', commit_message]) |
| commit_msg_file = os.path.join(self.output_dir, 'commit_message.tmp') |
| with open(commit_msg_file, 'w') as fd: |
| fd.write(commit_message) |
| return cli_helpers.Run([ |
| 'git', 'cl', 'upload', |
| '--reviewers', ','.join(self.reviewers), |
| '--force', # to prevent message editor from appearing |
| '--message-file', commit_msg_file], ok_fail=True) |
| |
| def StartPinpointJobs(self, configs=None): |
| job_urls = [] |
| failed_configs = [] |
| if not configs: |
| if self._IsDesktop(): |
| configs = ['linux-perf', 'win-10-perf', 'mac-10_12_laptop_low_end-perf'] |
| else: |
| configs = ['Android Nexus5 Perf'] |
| for config in configs: |
| job_url = self._StartPinpointJob(config) |
| if not job_url: |
| failed_configs.append(config) |
| else: |
| job_urls.append(job_url) |
| return job_urls, failed_configs |
| |
| def AutoRun(self): |
| # Let the quest begin... |
| cli_helpers.Comment( |
| 'This script will help you update the recording of a story. It will go ' |
| 'through the following stages, which you can also invoke manually via ' |
| 'subcommand specified in parentheses:') |
| cli_helpers.Comment(' - help create a new branch if needed') |
| cli_helpers.Comment(' - run story with live network connection (live)') |
| cli_helpers.Comment(' - record story (record)') |
| cli_helpers.Comment(' - replay the recording (replay)') |
| cli_helpers.Comment(' - upload the recording to Google Storage (upload)') |
| cli_helpers.Comment( |
| ' - upload CL with updated recording reference (review)') |
| cli_helpers.Comment(' - trigger pinpoint tryjobs (pinpoint)') |
| cli_helpers.Comment(' - post links to these jobs on the CL') |
| cli_helpers.Comment( |
| 'Note that you can always enter prefix of the answer to any of the ' |
| 'questions asked below, e.g. "y" for "yes" or "j" for "just-replay".') |
| |
| # TODO(sergiyb): Detect if benchmark is not implemented and try to add it |
| # automatically by copying the same benchmark without :<current-year> suffix |
| # and changing name of the test, name of the benchmark and the year tag. |
| |
| # Create branch if needed. |
| reuse_cl = False |
| branch = _GetBranchName() |
| if branch == 'HEAD': |
| cli_helpers.Comment('You are not on a branch.') |
| if not cli_helpers.Ask( |
| 'Should script create a new branch automatically?'): |
| cli_helpers.Comment( |
| 'Please create a new branch and start this script again') |
| return |
| self._CreateBranch() |
| else: |
| issue = self._GetBranchIssueUrl() |
| if issue is not None: |
| issue_message = 'with an associated issue: %s' % issue |
| else: |
| issue_message = 'without an associated issue' |
| cli_helpers.Comment( |
| 'You are on a branch {branch} {issue_message}. Please commit or ' |
| 'stash any changes unrelated to the updated story before ' |
| 'proceeding.', branch=branch, issue_message=issue_message) |
| ans = cli_helpers.Ask( |
| 'Should the script create a new branch automatically, reuse ' |
| 'existing one or exit?', answers=['create', 'reuse', 'exit'], |
| default='create') |
| if ans == 'create': |
| self._CreateBranch() |
| elif ans == 'reuse': |
| reuse_cl = issue is not None |
| elif ans == 'exit': |
| return |
| |
| # Live run. |
| live_out_file = self.LiveRun() |
| cli_helpers.Comment( |
| 'Please inspect the live run results above for any errors.') |
| ans = None |
| while ans != 'continue': |
| ans = cli_helpers.Ask( |
| 'Should I continue with recording, view metric results in a browser, ' |
| 'view stdout/stderr output or stop?', |
| ['continue', 'metrics', 'output', 'stop'], default='continue') |
| if ans == 'stop': |
| cli_helpers.Comment( |
| 'Please update the story class to resolve the observed issues and ' |
| 'then run this script again.') |
| return |
| elif ans == 'metrics': |
| _OpenBrowser('file://%s.results.html' % live_out_file) |
| elif ans == 'output': |
| _OpenEditor(live_out_file) |
| |
| # Record & replay. |
| action = 'record' |
| while action != 'continue': |
| if action == 'record': |
| self.RecordWpr() |
| if action in ['record', 'just-replay']: |
| replay_out_file = self.ReplayWpr() |
| cli_helpers.Comment( |
| 'Check that the console:error:all metrics above have low values ' |
| 'and are similar to the live run above.') |
| if action == 'diff': |
| diff_path = os.path.join(self.output_dir, 'live_replay.diff') |
| with open(diff_path, 'w') as diff_file: |
| subprocess.call([ |
| 'diff', '--color', self._FilterLogForDiff(live_out_file), |
| self._FilterLogForDiff(replay_out_file)], stdout=diff_file) |
| _OpenEditor(diff_path) |
| if action == 'stop': |
| return |
| action = cli_helpers.Ask( |
| 'Should I record and replay again, just replay, continue with ' |
| 'uploading CL, stop and exit, or would you prefer to see diff ' |
| 'between live/replay console logs?', |
| ['record', 'just-replay', 'continue', 'stop', 'diff'], |
| default='continue') |
| |
| # Upload WPR and create a WIP CL for the new story. |
| if not self.UploadWpr(): |
| return |
| while self.UploadCL(short_description=reuse_cl) != 0: |
| if not cli_helpers.Ask('Upload failed. Should I try again?'): |
| return |
| |
| # Gerrit needs some time to sync its backends, hence we sleep here for 5 |
| # seconds. Otherwise, pinpoint app may get an answer that the CL that we've |
| # just uploaded does not exist yet. |
| cli_helpers.Comment( |
| 'Waiting 20 seconds for the Gerrit backends to sync, so that Pinpoint ' |
| 'app can detect the newly-created CL.') |
| time.sleep(20) |
| |
| # Trigger pinpoint jobs. |
| configs_to_trigger = None |
| job_urls = [] |
| while True: |
| new_job_urls, configs_to_trigger = self.StartPinpointJobs( |
| configs_to_trigger) |
| job_urls.extend(new_job_urls) |
| if not configs_to_trigger or not cli_helpers.Ask( |
| 'Do you want to try triggering the failed configs again?'): |
| break |
| |
| if configs_to_trigger: |
| if not cli_helpers.Ask( |
| 'Some jobs failed to trigger. Do you still want to send created ' |
| 'CL for review?', default='no'): |
| return |
| |
| # Post a link to the triggered jobs, publish CL for review and open it. |
| _SendCLForReview( |
| 'Started the following Pinpoint jobs:\n%s' % |
| '\n'.join(' - %s' % url for url in job_urls)) |
| cli_helpers.Comment( |
| 'Posted a message with Pinpoint job URLs on the CL and sent it for ' |
| 'review. Opening the CL in a browser...') |
| _OpenBrowser(self._GetBranchIssueUrl()) |
| |
| # Hooray, you won! :-) |
| cli_helpers.Comment( |
| 'Thank you, you have successfully updated the recording for %s. Please ' |
| 'wait for LGTM and land the created CL.' % self.story) |
| |
| |
| def Main(argv): |
| parser = argparse.ArgumentParser() |
| parser.add_argument( |
| '-s', '--story', dest='story', required=True, |
| help='Benchmark story to be recorded, replayed or uploaded.') |
| parser.add_argument( |
| '-d', '--device-id', dest='device_id', |
| help='Specify the device serial number listed by `adb devices`. When not ' |
| 'specified, the script runs in desktop mode.') |
| parser.add_argument( |
| '-b', '--bug', dest='bug_id', |
| help='Bug ID to be referenced on created CL') |
| parser.add_argument( |
| '-r', '--reviewer', action='append', dest='reviewers', |
| help='Email of the reviewer(s) for the created CL.') |
| parser.add_argument( |
| '--pageset-repeat', type=int, default=1, dest='repeat', |
| help='Number of times to repeat the entire pageset.') |
| parser.add_argument( |
| '--binary', default=None, |
| help='Path to the Chromium/Chrome binary relative to output directory. ' |
| 'Defaults to default Chrome browser installed if not specified.') |
| |
| subparsers = parser.add_subparsers( |
| title='Mode in which to run this script', dest='command') |
| subparsers.add_parser( |
| 'auto', help='interactive mode automating updating a recording') |
| subparsers.add_parser('live', help='run story on a live website') |
| subparsers.add_parser('record', help='record story from a live website') |
| subparsers.add_parser('replay', help='replay story from the recording') |
| subparsers.add_parser('upload', help='upload recording to the Google Storage') |
| subparsers.add_parser('review', help='create a CL with updated recording') |
| subparsers.add_parser( |
| 'pinpoint', help='trigger Pinpoint jobs to test the recording') |
| |
| args = parser.parse_args(argv) |
| |
| updater = WprUpdater(args) |
| if args.command == 'auto': |
| _EnsureEditor() |
| luci_auth.CheckLoggedIn() |
| updater.AutoRun() |
| elif args.command =='live': |
| updater.LiveRun() |
| elif args.command == 'record': |
| updater.RecordWpr() |
| elif args.command == 'replay': |
| updater.ReplayWpr() |
| elif args.command == 'upload': |
| updater.UploadWpr() |
| elif args.command == 'review': |
| updater.UploadCL() |
| elif args.command == 'pinpoint': |
| luci_auth.CheckLoggedIn() |
| updater.StartPinpointJobs() |
| updater.Cleanup() |