diff --git a/tools/android/checkstyle/chromium-style-5.0.xml b/tools/android/checkstyle/chromium-style-5.0.xml
index e2033a5..e790323 100644
--- a/tools/android/checkstyle/chromium-style-5.0.xml
+++ b/tools/android/checkstyle/chromium-style-5.0.xml
@@ -17,6 +17,10 @@
     <module name="IllegalCatch">
       <property name="severity" value="info"/>
     </module>
+    <module name="ModifierOrder">
+      <message key="mod.order" value="&quot;{0}&quot; modifier out of order with the JLS suggestions. The correct order: public protected private abstract default static final transient volatile synchronized native strictfp. See https://crbug.com/1003711 for details."/>
+      <property name="severity" value="error"/>
+    </module>
     <module name="RedundantImport">
       <message key="import.redundant" value="Redundant import: {0}. Use :JavaImportOrganize (ECLIM) or Ctrl+Shift+O (Eclipse) to sort imports"/>
       <property name="severity" value="error"/>
diff --git a/tools/clang/scripts/update.py b/tools/clang/scripts/update.py
index 0b49a56..1bbaa76 100755
--- a/tools/clang/scripts/update.py
+++ b/tools/clang/scripts/update.py
@@ -37,9 +37,9 @@
 # Do NOT CHANGE this if you don't know what you're doing -- see
 # https://chromium.googlesource.com/chromium/src/+/master/docs/updating_clang.md
 # Reverting problematic clang rolls is safe, though.
-CLANG_REVISION = '8455294f2ac13d587b13d728038a9bffa7185f2b'
-CLANG_SVN_REVISION = '371202'
-CLANG_SUB_REVISION = 2
+CLANG_REVISION = 'b4160cb94c54f0b31d0ce14694950dac7b6cd83f'
+CLANG_SVN_REVISION = '371856'
+CLANG_SUB_REVISION = 1
 
 PACKAGE_VERSION = '%s-%s-%s' % (CLANG_SVN_REVISION, CLANG_REVISION[:8],
                                 CLANG_SUB_REVISION)
diff --git a/tools/metrics/actions/actions.xml b/tools/metrics/actions/actions.xml
index bccaea2..04a353b 100644
--- a/tools/metrics/actions/actions.xml
+++ b/tools/metrics/actions/actions.xml
@@ -10696,10 +10696,10 @@
   </description>
 </action>
 
-<action name="ManualFallback_CreditCard_OpenAddCreditCard">
+<action name="ManualFallback_CreditCard_OpenAddPaymentMethod">
   <owner>javierrobles@chromium.org</owner>
   <description>
-    The user tapped on &quot;Add credit card&quot; on the Password Manual
+    The user tapped on &quot;Add Payment Method...&quot; on the Password Manual
     Fallback view.
   </description>
 </action>
@@ -11511,6 +11511,22 @@
   </description>
 </action>
 
+<action name="MobileAddCreditCard.AddPaymentMethodButton">
+  <owner>gambard@chromium.org</owner>
+  <description>
+    The user tapped on &quot;Add Payment Method...&quot; in the Settings,
+    Payment Methods menu.
+  </description>
+</action>
+
+<action name="MobileAddCreditCard.CardSaved">
+  <owner>gambard@chromium.org</owner>
+  <description>
+    The user saved a new credit card by tapping on &quot;Add&quot; in the
+    Settings, Add Payment Methods menu.
+  </description>
+</action>
+
 <action name="MobileBeamCallbackSuccess">
   <owner>Please list the metric's owners. Add more owner tags as needed.</owner>
   <description>Please enter the description of this user action.</description>
diff --git a/tools/metrics/histograms/histograms.xml b/tools/metrics/histograms/histograms.xml
index fff74fff..a643d08 100644
--- a/tools/metrics/histograms/histograms.xml
+++ b/tools/metrics/histograms/histograms.xml
@@ -36058,6 +36058,17 @@
   </summary>
 </histogram>
 
+<histogram name="Enterprise.HeartbeatSignal" enum="BooleanSuccess"
+    expires_after="2020-04-01">
+  <owner>ayaelattar@chromium.org</owner>
+  <owner>poromov@chromium.org</owner>
+  <summary>
+    Result of a single attempt to signal a device heartbeat, which is used for
+    monitoring the device connectivity. By default heartbeats per a single
+    device are sent every 2 minutes.
+  </summary>
+</histogram>
+
 <histogram name="Enterprise.InitialEnrollmentRequirement"
     enum="EnterpriseInitialEnrollmentRequirement" expires_after="M77">
   <owner>pmarko@chromium.org</owner>
@@ -110745,6 +110756,16 @@
   </summary>
 </histogram>
 
+<histogram name="Printing.CUPS.PrintJobDatabasePrintJobSaved"
+    enum="BooleanSuccess" expires_after="2021-09-11">
+  <owner>nikitapodguzov@chromium.org</owner>
+  <owner>skau@chromium.org</owner>
+  <summary>
+    Indicates whether we were successful performing saving print job database
+    operation.
+  </summary>
+</histogram>
+
 <histogram name="Printing.CUPS.PrintJobsQueued" units="count">
   <owner>skau@chromium.org</owner>
   <summary>
@@ -149616,7 +149637,7 @@
 </histogram>
 
 <histogram name="UpgradeDetector.RollbackReason"
-    enum="UpgradeDetectorRollbackReason" expires_after="M80">
+    enum="UpgradeDetectorRollbackReason" expires_after="M85">
   <owner>isandrk@chromium.org</owner>
   <owner>poromov@chromium.org</owner>
   <summary>
diff --git a/tools/metrics/ukm/ukm.xml b/tools/metrics/ukm/ukm.xml
index 7bd6fb9..3b36acb4 100644
--- a/tools/metrics/ukm/ukm.xml
+++ b/tools/metrics/ukm/ukm.xml
@@ -213,7 +213,7 @@
   </metric>
 </event>
 
-<event name="AdPageLoad">
+<event name="AdPageLoad" singular="True">
   <owner>johnidel@chromium.org</owner>
   <owner>jkarlin@chromium.org</owner>
   <summary>
diff --git a/tools/metrics/ukm/validate_format.py b/tools/metrics/ukm/validate_format.py
index b95d49d..3c6982a4 100755
--- a/tools/metrics/ukm/validate_format.py
+++ b/tools/metrics/ukm/validate_format.py
@@ -30,7 +30,7 @@
 
     results = dict();
 
-    if not metricCheckSuccess or not metricCheckSuccess:
+    if not ownerCheckSuccess or not metricCheckSuccess:
       results['Errors'] = ownerCheckErrors + metricCheckErrors
     if metricCheckWarnings and not IGNORE_METRIC_CHECK_WARNINGS:
       results['Warnings'] = metricCheckWarnings
diff --git a/tools/metrics/ukm/xml_validations.py b/tools/metrics/ukm/xml_validations.py
index b4d4327b..f832e3f 100644
--- a/tools/metrics/ukm/xml_validations.py
+++ b/tools/metrics/ukm/xml_validations.py
@@ -60,7 +60,7 @@
     enums, _ = extract_histograms.ExtractEnumsFromXmlTree(enum_tree)
 
     for event_node in self.config.getElementsByTagName('event'):
-      for metric_node in self.config.getElementsByTagName('metric'):
+      for metric_node in event_node.getElementsByTagName('metric'):
         if metric_node.hasAttribute('enum'):
           enum_name = metric_node.getAttribute('enum');
           # Check if the enum is defined in enums.xml.
diff --git a/tools/metrics/ukm/xml_validations_test.py b/tools/metrics/ukm/xml_validations_test.py
index 17a0074..cba3bb9 100644
--- a/tools/metrics/ukm/xml_validations_test.py
+++ b/tools/metrics/ukm/xml_validations_test.py
@@ -54,7 +54,10 @@
   def testMetricHasUndefinedEnum(self):
     ukm_config = self.toUkmConfig("""
         <ukm-configuration>
-          <event name="Event">
+          <event name="Event1">
+            <metric name="Metric2" enum="FeatureObserver"/>
+          </event>
+          <event name="Event2">
             <metric name="Metric1" enum="BadEnum"/>
             <metric name="Metric2" enum="FeatureObserver"/>
             <metric name="Metric3" unit="ms"/>
@@ -63,12 +66,12 @@
         </ukm-configuration>
         """.strip())
     expected_errors = [
-        "Unknown enum BadEnum in ukm metric Event:Metric1.",
+        "Unknown enum BadEnum in ukm metric Event2:Metric1.",
     ]
 
     expected_warnings = [
         "Warning: Neither 'enum' or 'unit' is specified for ukm metric "
-        "Event:Metric4.",
+        "Event2:Metric4.",
     ]
 
     validator = UkmXmlValidation(ukm_config)
diff --git a/tools/perf/core/results_processor/__init__.py b/tools/perf/core/results_processor/__init__.py
index d5b3e2f..bef1eea8 100644
--- a/tools/perf/core/results_processor/__init__.py
+++ b/tools/perf/core/results_processor/__init__.py
@@ -2,7 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-from core.results_processor.processor import ArgumentParser
-from core.results_processor.processor import ProcessOptions
+from core.results_processor.command_line import ArgumentParser
+from core.results_processor.command_line import ProcessOptions
 from core.results_processor.processor import ProcessResults
 from core.results_processor.processor import main
diff --git a/tools/perf/core/results_processor/command_line.py b/tools/perf/core/results_processor/command_line.py
new file mode 100644
index 0000000..faf113e
--- /dev/null
+++ b/tools/perf/core/results_processor/command_line.py
@@ -0,0 +1,152 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements the interface of the results_processor module.
+
+Provides functions to parse command line arguments and process options.
+"""
+
+import argparse
+import datetime
+import os
+import re
+import sys
+
+from py_utils import cloud_storage
+
+
+SUPPORTED_FORMATS = ['none', 'json-test-results']
+
+
+def ArgumentParser(standalone=False, legacy_formats=None):
+  """Create an ArgumentParser defining options required by the processor."""
+  all_output_formats = sorted(
+      set(SUPPORTED_FORMATS).union(legacy_formats or ()))
+  parser, group = _CreateTopLevelParser(standalone)
+  group.add_argument(
+      '--output-format', action='append', dest='output_formats',
+      metavar='FORMAT', choices=all_output_formats, required=standalone,
+      help=Sentences(
+          'Output format to produce.',
+          'May be used multiple times to produce multiple outputs.',
+          'Avaliable formats: %s.' % ', '.join(all_output_formats),
+          '' if standalone else 'Defaults to: html.'))
+  group.add_argument(
+      '--intermediate-dir', metavar='DIR_PATH', required=standalone,
+      help=Sentences(
+          'Path to a directory where intermediate results are stored.',
+          '' if standalone else 'If not provided, the default is to create a '
+          'new directory within "{output_dir}/artifacts/".'))
+  group.add_argument(
+      '--output-dir', default=_DefaultOutputDir(), metavar='DIR_PATH',
+      help=Sentences(
+          'Path to a directory where to write final results.',
+          'Default: %(default)s.'))
+  group.add_argument(
+      '--reset-results', action='store_true',
+      help=Sentences(
+          'Overwrite any previous output files in the output directory.',
+          'The default is to append to existing results.'))
+  group.add_argument(
+      '--results-label', metavar='LABEL',
+      help='Label to identify the results generated by this run.')
+  group.add_argument(
+      '--upload-results', action='store_true',
+      help='Upload generated artifacts to cloud storage.')
+  group.add_argument(
+      '--upload-bucket', default='output', metavar='BUCKET',
+      help=Sentences(
+          'Storage bucket to use for uploading artifacts.',
+          'Supported values are: %s; or a valid cloud storage bucket name.'
+          % ', '.join(sorted(cloud_storage.BUCKET_ALIASES)),
+          'Defaults to: %(default)s.'))
+  group.set_defaults(legacy_output_formats=[])
+  return parser
+
+
+def ProcessOptions(options):
+  """Adjust result processing options as needed before running benchmarks.
+
+  Note: The intended scope of this function is limited to only adjust options
+  defined by the ArgumentParser above. One should not attempt to read or modify
+  any other attributes that the options object may have.
+
+  Currently the main job of this function is to tease out and separate output
+  formats to be handled by the results processor, from those that should fall
+  back to the legacy output formatters in Telemetry.
+
+  Args:
+    options: An options object with values parsed from the command line.
+  """
+  # The output_dir option is None or missing if the selected Telemetry command
+  # does not involve output generation, e.g. "run_benchmark list", and the
+  # argument parser defined above was not invoked.
+  if getattr(options, 'output_dir', None) is None:
+    return
+
+  def resolve_dir(path):
+    return os.path.realpath(os.path.expanduser(path))
+
+  options.output_dir = resolve_dir(options.output_dir)
+
+  if options.intermediate_dir:
+    options.intermediate_dir = resolve_dir(options.intermediate_dir)
+  else:
+    if options.results_label:
+      filesafe_label = re.sub(r'\W+', '_', options.results_label)
+    else:
+      filesafe_label = 'run'
+    start_time = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
+    options.intermediate_dir = os.path.join(
+        options.output_dir, 'artifacts', '%s_%s' % (filesafe_label, start_time))
+
+  if options.upload_results:
+    options.upload_bucket = cloud_storage.BUCKET_ALIASES.get(
+        options.upload_bucket, options.upload_bucket)
+  else:
+    options.upload_bucket = None
+
+  if options.output_formats:
+    chosen_formats = sorted(set(options.output_formats))
+  else:
+    chosen_formats = ['html']
+
+  options.output_formats = []
+  for output_format in chosen_formats:
+    if output_format == 'none':
+      continue
+    elif output_format in SUPPORTED_FORMATS:
+      options.output_formats.append(output_format)
+    else:
+      options.legacy_output_formats.append(output_format)
+
+
+def _CreateTopLevelParser(standalone):
+  """Create top level parser, and group for result options."""
+  if standalone:
+    parser = argparse.ArgumentParser(
+        description='Standalone command line interface to results_processor.')
+    # In standalone mode, both the parser and group are the same thing.
+    return parser, parser
+  else:
+    parser = argparse.ArgumentParser(add_help=False)
+    group = parser.add_argument_group(title='Result processor options')
+    return parser, group
+
+
+def _DefaultOutputDir():
+  """Default output directory.
+
+  Points to the directory of the benchmark runner script, if found, or the
+  current working directory otherwise.
+  """
+  main_module = sys.modules['__main__']
+  if hasattr(main_module, '__file__'):
+    return os.path.realpath(os.path.dirname(main_module.__file__))
+  else:
+    return os.getcwd()
+
+
+def Sentences(*args):
+  return ' '.join(s for s in args if s)
diff --git a/tools/perf/core/results_processor/processor_unittest.py b/tools/perf/core/results_processor/command_line_unittest.py
similarity index 88%
rename from tools/perf/core/results_processor/processor_unittest.py
rename to tools/perf/core/results_processor/command_line_unittest.py
index a6e7c25..2220cd01 100644
--- a/tools/perf/core/results_processor/processor_unittest.py
+++ b/tools/perf/core/results_processor/command_line_unittest.py
@@ -16,12 +16,13 @@
 
 import mock
 
+from core.results_processor import command_line
 from core.results_processor import processor
 
 
-# To easily mock module level symbols within the processor module.
+# To easily mock module level symbols within the command_line module.
 def module(symbol):
-  return 'core.results_processor.processor.' + symbol
+  return 'core.results_processor.command_line.' + symbol
 
 
 class ProcessOptionsTestCase(unittest.TestCase):
@@ -52,10 +53,10 @@
     mock.patch.stopall()
 
   def ParseArgs(self, args):
-    parser = processor.ArgumentParser(
+    parser = command_line.ArgumentParser(
         standalone=self.standalone, legacy_formats=self.legacy_formats)
     options = parser.parse_args(args)
-    processor.ProcessOptions(options)
+    command_line.ProcessOptions(options)
     return options
 
 
@@ -132,7 +133,7 @@
     with self.assertRaises(SystemExit):
       self.ParseArgs(['--output-format', 'unknown'])
 
-  @mock.patch.dict(module('SUPPORTED_FORMATS'), {'new-format': None})
+  @mock.patch(module('SUPPORTED_FORMATS'), ['new-format'])
   def testOutputFormatsSplit(self):
     self.legacy_formats = ['old-format']
     options = self.ParseArgs(
@@ -140,7 +141,7 @@
     self.assertEqual(options.output_formats, ['new-format'])
     self.assertEqual(options.legacy_output_formats, ['old-format'])
 
-  @mock.patch.dict(module('SUPPORTED_FORMATS'), {'new-format': None})
+  @mock.patch(module('SUPPORTED_FORMATS'), ['new-format'])
   def testNoDuplicateOutputFormats(self):
     self.legacy_formats = ['old-format']
     options = self.ParseArgs(
@@ -159,15 +160,23 @@
     with self.assertRaises(SystemExit):
       self.ParseArgs([])
 
-  @mock.patch.dict(module('SUPPORTED_FORMATS'), {'new-format': None})
+  @mock.patch(module('SUPPORTED_FORMATS'), ['new-format'])
   def testIntermediateDirRequired(self):
     with self.assertRaises(SystemExit):
       self.ParseArgs(['--output-format', 'new-format'])
 
-  @mock.patch.dict(module('SUPPORTED_FORMATS'), {'new-format': None})
+  @mock.patch(module('SUPPORTED_FORMATS'), ['new-format'])
   def testSuccessful(self):
     options = self.ParseArgs(
         ['--output-format', 'new-format', '--intermediate-dir', 'some_dir'])
     self.assertEqual(options.output_formats, ['new-format'])
     self.assertEqual(options.intermediate_dir, '/path/to/curdir/some_dir')
     self.assertEqual(options.output_dir, '/path/to/output_dir')
+
+
+class TestSupportedFormats(unittest.TestCase):
+  def testAllSupportedFormatsHaveFormatters(self):
+    for output_format in command_line.SUPPORTED_FORMATS:
+      if output_format == 'none':
+        continue
+      self.assertIn(output_format, processor.FORMATTERS)
diff --git a/tools/perf/core/results_processor/processor.py b/tools/perf/core/results_processor/processor.py
index 9aab4c4..ad13da4 100644
--- a/tools/perf/core/results_processor/processor.py
+++ b/tools/perf/core/results_processor/processor.py
@@ -4,132 +4,24 @@
 
 """Implements the interface of the results_processor module.
 
-Provides functions to parse command line arguments, process options, and the
-entry point to start the processing of results.
+Provides functions to process intermediate results, and the entry point to
+the standalone version of Results Processor.
 """
 
-import argparse
-import datetime
 import json
 import os
-import re
-import sys
 
-from py_utils import cloud_storage
+from core.results_processor import command_line
 from core.results_processor import json3_output
 
 
 HTML_TRACE_NAME = 'trace.html'
 TELEMETRY_RESULTS = '_telemetry_results.jsonl'
-SUPPORTED_FORMATS = {
-    'none': NotImplemented,
+FORMATTERS = {
     'json-test-results': json3_output,
 }
 
 
-def ArgumentParser(standalone=False, legacy_formats=None):
-  """Create an ArgumentParser defining options required by the processor."""
-  all_output_formats = sorted(
-      set(SUPPORTED_FORMATS).union(legacy_formats or ()))
-  parser, group = _CreateTopLevelParser(standalone)
-  group.add_argument(
-      '--output-format', action='append', dest='output_formats',
-      metavar='FORMAT', choices=all_output_formats, required=standalone,
-      help=Sentences(
-          'Output format to produce.',
-          'May be used multiple times to produce multiple outputs.',
-          'Avaliable formats: %s.' % ', '.join(all_output_formats),
-          '' if standalone else 'Defaults to: html.'))
-  group.add_argument(
-      '--intermediate-dir', metavar='DIR_PATH', required=standalone,
-      help=Sentences(
-          'Path to a directory where intermediate results are stored.',
-          '' if standalone else 'If not provided, the default is to create a '
-          'new directory within "{output_dir}/artifacts/".'))
-  group.add_argument(
-      '--output-dir', default=_DefaultOutputDir(), metavar='DIR_PATH',
-      help=Sentences(
-          'Path to a directory where to write final results.',
-          'Default: %(default)s.'))
-  group.add_argument(
-      '--reset-results', action='store_true',
-      help=Sentences(
-          'Overwrite any previous output files in the output directory.',
-          'The default is to append to existing results.'))
-  group.add_argument(
-      '--results-label', metavar='LABEL',
-      help='Label to identify the results generated by this run.')
-  group.add_argument(
-      '--upload-results', action='store_true',
-      help='Upload generated artifacts to cloud storage.')
-  group.add_argument(
-      '--upload-bucket', default='output', metavar='BUCKET',
-      help=Sentences(
-          'Storage bucket to use for uploading artifacts.',
-          'Supported values are: %s; or a valid cloud storage bucket name.'
-          % ', '.join(sorted(cloud_storage.BUCKET_ALIASES)),
-          'Defaults to: %(default)s.'))
-  group.set_defaults(legacy_output_formats=[])
-  return parser
-
-
-def ProcessOptions(options):
-  """Adjust result processing options as needed before running benchmarks.
-
-  Note: The intended scope of this function is limited to only adjust options
-  defined by the ArgumentParser above. One should not attempt to read or modify
-  any other attributes that the options object may have.
-
-  Currently the main job of this function is to tease out and separate output
-  formats to be handled by the results processor, from those that should fall
-  back to the legacy output formatters in Telemetry.
-
-  Args:
-    options: An options object with values parsed from the command line.
-  """
-  # The output_dir option is None or missing if the selected Telemetry command
-  # does not involve output generation, e.g. "run_benchmark list", and the
-  # argument parser defined above was not invoked.
-  if getattr(options, 'output_dir', None) is None:
-    return
-
-  def resolve_dir(path):
-    return os.path.realpath(os.path.expanduser(path))
-
-  options.output_dir = resolve_dir(options.output_dir)
-
-  if options.intermediate_dir:
-    options.intermediate_dir = resolve_dir(options.intermediate_dir)
-  else:
-    if options.results_label:
-      filesafe_label = re.sub(r'\W+', '_', options.results_label)
-    else:
-      filesafe_label = 'run'
-    start_time = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
-    options.intermediate_dir = os.path.join(
-        options.output_dir, 'artifacts', '%s_%s' % (filesafe_label, start_time))
-
-  if options.upload_results:
-    options.upload_bucket = cloud_storage.BUCKET_ALIASES.get(
-        options.upload_bucket, options.upload_bucket)
-  else:
-    options.upload_bucket = None
-
-  if options.output_formats:
-    chosen_formats = sorted(set(options.output_formats))
-  else:
-    chosen_formats = ['html']
-
-  options.output_formats = []
-  for output_format in chosen_formats:
-    if output_format == 'none':
-      continue
-    elif output_format in SUPPORTED_FORMATS:
-      options.output_formats.append(output_format)
-    else:
-      options.legacy_output_formats.append(output_format)
-
-
 def ProcessResults(options):
   """Process intermediate results and produce the requested outputs.
 
@@ -152,26 +44,13 @@
   _UploadArtifacts(intermediate_results, options.upload_bucket)
 
   for output_format in options.output_formats:
-    if output_format not in SUPPORTED_FORMATS:
+    if output_format not in FORMATTERS:
       raise NotImplementedError(output_format)
 
-    formatter = SUPPORTED_FORMATS[output_format]
+    formatter = FORMATTERS[output_format]
     formatter.Process(intermediate_results, options.output_dir)
 
 
-def _CreateTopLevelParser(standalone):
-  """Create top level parser, and group for result options."""
-  if standalone:
-    parser = argparse.ArgumentParser(
-        description='Standalone command line interface to results_processor.')
-    # In standalone mode, both the parser and group are the same thing.
-    return parser, parser
-  else:
-    parser = argparse.ArgumentParser(add_help=False)
-    group = parser.add_argument_group(title='Result processor options')
-    return parser, group
-
-
 def _LoadIntermediateResults(intermediate_file):
   """Load intermediate results from a file into a single dict."""
   results = {'benchmarkRun': {}, 'testResults': []}
@@ -219,26 +98,9 @@
         assert 'remoteUrl' in artifact
 
 
-def _DefaultOutputDir():
-  """Default output directory.
-
-  Points to the directory of the benchmark runner script, if found, or the
-  current working directory otherwise.
-  """
-  main_module = sys.modules['__main__']
-  if hasattr(main_module, '__file__'):
-    return os.path.realpath(os.path.dirname(main_module.__file__))
-  else:
-    return os.getcwd()
-
-
-def Sentences(*args):
-  return ' '.join(s for s in args if s)
-
-
 def main(args=None):
   """Entry point for the standalone version of the results_processor script."""
-  parser = ArgumentParser(standalone=True)
+  parser = command_line.ArgumentParser(standalone=True)
   options = parser.parse_args(args)
-  ProcessOptions(options)
+  command_line.ProcessOptions(options)
   return ProcessResults(options)