Improve documentation

- Update Readme.md
- Improve `describe` command to print tabular data by default
- Add tabulate package

Change-Id: I83ff99ec46786ce4d1a95f336cff8f0799b86b50
Reviewed-on: https://chromium-review.googlesource.com/c/crossbench/+/3909888
Reviewed-by: Patrick Thier <pthier@chromium.org>
diff --git a/README.md b/README.md
index bb5d243..09ca659 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,17 @@
-Crossbench
-==========
+# Crossbench
 
 Crossbench is a cross-browser/cross-benchmark runner to extract performance
 numbers.
 
-This project uses [poetry](https://python-poetry.org/) deps and package scripts to setup the correct environment for testing and debugging.
+## Setup:
+This project uses [poetry](https://python-poetry.org/) deps and package scripts
+to setup the correct environment for testing and debugging.
 
 ```
 pip3 install poetry
 ```
 
-Example usage:
+## Basic usage:
 
 ```
 poetry run crossbench speedometer_2.0 \
@@ -20,25 +21,70 @@
     --probe=v8.log
 ```
 
-Describe *all* subcommands with stories and all probes:
+## Main Components
+
+### Browsers
+Crossbench supports running benchmarks on one or multiple browser configurations.
+The main implementation uses selenium for maximum system independence.
+
+You can specify a single browser with `--browser=<name>`
+
 ```
-poetry run crossbench describe
+poetry run crossbench speedometer_2.0 \
+    --browser=/path/to/chromium  \
+    -- \
+    --browser-flag-foo \
+    --browser-flag-bar \
 ```
 
+For more complex scenarios you can use a
+[browser.config.hjson](browser.config.example.hjson) file.
+It allows you to specify multiple browser and multiple flag configurations in
+a single file and produce performance numbers with a single invocation.
 
-Main Components
----------------
-Browsers
-:   Interface to start, interact and stop browsers.
-    The main implementions use [selenium](https://www.selenium.dev/) for
-    maximum system independence.
+```
+poetry run crossbench speedometer_2.0 \
+    --browser-config=config.hjson
+```
 
-Probes
-:   Probes define a way to extract arbitrary (performance) numbers from a
-    host or running browser. This can reach from running simple JS-snippets to
-    extract page-specific numbers to system-wide profiling.
+The [example file](browser.config.example.hjson) lists and explains all
+configuration details.
 
-Stories
-:   Stories define sequences of browser interactions. This can be simply
-    loading a URL and waiting for a given period of time, or in more complex
-    scenarios, actively interact with a page and navigate multiple times.
+### Probes
+Probes define a way to extract arbitrary (performance) numbers from a
+host or running browser. This can reach from running simple JS-snippets to
+extract page-specific numbers to system-wide profiling.
+
+Multiple probes can be added with repeated `--probe=XXX` options.
+You can use the `describe` subcommand to list all probes:
+
+```
+poetry run crossbench describe probes
+```
+
+### Benchmarks
+Use the `describe` command to list all benchmark details:
+
+```
+poetry run crossbench describe benchmarks
+```
+
+### Stories
+Stories define sequences of browser interactions. This can be simply
+loading a URL and waiting for a given period of time, or in more complex
+scenarios, actively interact with a page and navigate multiple times.
+
+Use `--help` or describe to list all stories for a benchmark:
+
+```
+poetry run crossbench speedometer_2.0 --help
+```
+
+Use `--stories` to list individual comma-separated story names, or use a
+regular expression as filter.
+
+```
+poetry run crossbench speedometer_2.0 \
+    --browser=/path/to/chromium \
+    --stories=VanillaJS.*
+```
diff --git a/crossbench/cli.py b/crossbench/cli.py
index f7cb34f..4315891 100644
--- a/crossbench/cli.py
+++ b/crossbench/cli.py
@@ -9,6 +9,7 @@
 import json
 import logging
 import pathlib
+from tabulate import tabulate
 from typing import Dict, Iterable, List, Optional, Tuple, Type, Union
 
 import crossbench as cb
@@ -251,10 +252,22 @@
   def _setup_subparser(self):
     self.subparsers = self.parser.add_subparsers(
         title="Subcommands", dest="subcommand", required=True)
+
     for benchmark_cls in self.BENCHMARKS:
       self._setup_benchmark_subparser(benchmark_cls)
+
     describe_parser = self.subparsers.add_parser(
-        "describe", help="Print all benchmarks and stories")
+        "describe", aliases=["desc"], help="Print all benchmarks and stories")
+    describe_parser.add_argument(
+        "filter",
+        nargs="?",
+        choices=["all", "benchmarks", "probes"],
+        default="all",
+        help="Limit output to the given category, defaults to 'all'")
+    describe_parser.add_argument("--json",
+                                 default=False,
+                                 action="store_true",
+                                 help="Print the data as json data")
     describe_parser.set_defaults(subcommand=self.describe_subcommand)
 
   def describe_subcommand(self, args):
@@ -268,7 +281,33 @@
             for probe_cls in cb.probes.GENERAL_PURPOSE_PROBES
         }
     }
-    print(json.dumps(data, indent=2))
+    if args.json:
+      if args.filter == "probes":
+        data = data["probes"]
+      elif args.filter == "benchmarks":
+        data = data["benchmarks"]
+      else:
+        assert args.filter == "all"
+      print(json.dumps(data, indent=2))
+      return
+    # Create tabular format
+    if args.filter == "all" or args.filter == "benchmarks":
+      table = [["Benchmark", "Property", "Value"]]
+      for benchmark_name, values in data['benchmarks'].items():
+        table.append([benchmark_name, ])
+        for name, value in values.items():
+          if isinstance(value, (tuple, list)):
+            value = "\n".join(value)
+          elif isinstance(value, dict):
+            value = tabulate(value.items(), tablefmt="plain")
+          table.append([None, name, value])
+      print(tabulate(table, tablefmt="grid"))
+    if args.filter == "all" or args.filter == "probes":
+      print(
+          tabulate(data["probes"].items(),
+                   headers=["Probe", "Help"],
+                   tablefmt="grid"))
+
 
   def _setup_benchmark_subparser(self, benchmark_cls):
     subparser = benchmark_cls.add_cli_parser(self.subparsers)
diff --git a/poetry.lock b/poetry.lock
index 5e5395c..1e6d142 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -363,7 +363,7 @@
 name = "tabulate"
 version = "0.8.10"
 description = "Pretty-print tabular data"
-category = "dev"
+category = "main"
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
 
@@ -466,7 +466,7 @@
 [metadata]
 lock-version = "1.1"
 python-versions = ">=3.8,<=3.10"
-content-hash = "b8e8fd41db34ee76e7dc00ba101070a5c69c20cdb2eca42556532ab2921e350c"
+content-hash = "68ba45d54b31f4222bd0af88e825efd681b36236f58c1d5b916cc2dab9ad5095"
 
 [metadata.files]
 async-generator = [
diff --git a/pyproject.toml b/pyproject.toml
index 377887e..4373de5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,6 +20,7 @@
 hjson = "^3.1.0"
 psutil = "^5.9.1"
 selenium = "^4.4.3"
+tabulate = "^0.8.10"
 
 [tool.poetry.scripts]
 crossbench = 'scripts:crossbench'