blob: cf74d93e52ff8066fcebbd10644d626ba36692ed [file] [log] [blame]
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
import sys
import textwrap
from settings_factory import BenchmarkSettings
from settings_factory import GlobalSettings
from settings_factory import LabelSettings
class Help(object):
def GetUsage(self):
return """%s [OPTIONS] [ACTION] EXPERIMENT_FILE""" % (sys.argv[0])
def _WrapLine(self, line):
return "\n".join(textwrap.wrap(line, 80))
def _GetFieldDescriptions(self, fields):
res = ""
for field_name in fields:
field = fields[field_name]
res += "Field:\t\t%s\n" % field.name
res += self._WrapLine("Description:\t%s" % field.description) + "\n"
res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "")
res += "Required:\t%s\n" % field.required
if field.default:
res += "Default:\t%s\n" % field.default
res += "\n"
return res
def GetHelp(self):
global_fields = self._GetFieldDescriptions(GlobalSettings("").fields)
benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings("").fields)
label_fields = self._GetFieldDescriptions(LabelSettings("").fields)
return """%s is a script for running performance experiments on ChromeOS. It
allows one to run ChromeOS Autotest benchmarks over several images and compare
the results to determine whether there is a performance difference.
Comparing several images using %s is referred to as running an
"experiment". An "experiment file" is a configuration file which holds all the
information that describes the experiment and how it should be run. An example
of a simple experiment file is below:
--------------------------------- test.exp ---------------------------------
name: my_experiment
board: x86-alex
remote: chromeos-alex5 172.18.122.132
benchmark: PageCycler {
iterations: 3
}
my_first_image {
chromeos_image: /usr/local/chromeos-1/chromiumos_image.bin
}
my_second_image {
chromeos_image: /usr/local/chromeos-2/chromiumos_image.bin
}
----------------------------------------------------------------------------
This experiment file names the experiment "my_experiment". It will be run
on the board x86-alex. Benchmarks will be run using two remote devices,
one is a device specified by a hostname and the other is a device specified
by it's IP address. Benchmarks will be run in parallel across these devices.
There is currently no way to specify which benchmark will run on each device.
We define one "benchmark" that will be run, PageCycler. This benchmark has one
"field" which specifies how many iterations it will run for.
We specify 2 "labels" or images which will be compared. The PageCycler benchmark
will be run on each of these images 3 times and a result table will be output
which compares the two.
The full list of fields that can be specified are as follows:
=================
Global Fields
=================
%s
=================
Benchmark Fields
=================
%s
=================
Label Fields
=================
%s
Note that global fields are overidden by label or benchmark fields, if they can
be specified in both places. Fields that are specified as arguments override
fields specified in experiment files.
%s is invoked by passing it a path to an experiment file, as well as an action
to execute on that experiment file. The possible actions to use are:
run\t\tRun the experiment and cache the results.
table\t\tDisplay cached results of an experiment, without running anything.
email\t\tEmail a summary of the results to the user.
do\t\tThe default action. Executes the following actions: run, table, email.
""" % (sys.argv[0], sys.argv[0], global_fields,
benchmark_fields, label_fields, sys.argv[0])