Updated to arc-runtime-43.4410.290.0
diff --git a/src/build/DEPS.chrome b/src/build/DEPS.chrome
index 1abcc3d..003f3c8 100644
--- a/src/build/DEPS.chrome
+++ b/src/build/DEPS.chrome
@@ -1 +1 @@
-317019
+317549
diff --git a/src/build/DEPS.naclsdk b/src/build/DEPS.naclsdk
index c7dc241..abcfd11 100644
--- a/src/build/DEPS.naclsdk
+++ b/src/build/DEPS.naclsdk
@@ -385,63 +385,63 @@
       "archives": [
         {
           "checksum": {
-            "sha1": "896ad2bf7769b7a09db7eba63b53b3448941106b"
+            "sha1": "6d620b673d05dd18b91dc27cee6a87212fba6683"
           },
           "host_os": "all",
-          "size": 78708022,
-          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317052/naclports.tar.bz2"
+          "size": 78713336,
+          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317615/naclports.tar.bz2"
         },
         {
           "checksum": {
-            "sha1": "e59668626184222cd5b31bc2ec311fbea8520dc2"
+            "sha1": "d5aa55ceee736a2d44649eebe14086db12aeb49c"
           },
           "host_os": "linux",
-          "size": 373139577,
-          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317052/naclsdk_linux.tar.bz2"
+          "size": 383995641,
+          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317615/naclsdk_linux.tar.bz2"
         },
         {
           "checksum": {
-            "sha1": "2c0b46d070b6e81579ab001588ec3af5775f595f"
+            "sha1": "bd38d050d1d9c1b156130882034ed4591566f5c0"
           },
           "host_os": "mac",
-          "size": 367040724,
-          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317052/naclsdk_mac.tar.bz2"
+          "size": 378318039,
+          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317615/naclsdk_mac.tar.bz2"
         },
         {
           "checksum": {
-            "sha1": "bb763b2754bee92922bb24134ab4abedfae44509"
+            "sha1": "80233ff1bed2d2a127cdceade5fb3e2ff27ccdf0"
           },
           "host_os": "win",
-          "size": 367819359,
-          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317052/naclsdk_win.tar.bz2"
+          "size": 378637778,
+          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317615/naclsdk_win.tar.bz2"
         }
       ],
-      "description": "Chrome 42 bundle. Chrome revision: 317052. NaCl revision: cc52724dde60334c60fed1e248f4ebf858db86a5",
+      "description": "Chrome 43 bundle. Chrome revision: 317615. NaCl revision: 456d6e41cd963607ca2ce966d6d9a8d9895ea973",
       "name": "pepper_canary",
       "recommended": "no",
-      "repath": "pepper_42",
-      "revision": 317052,
+      "repath": "pepper_43",
+      "revision": 317615,
       "stability": "canary",
-      "version": 42
+      "version": 43
     },
     {
       "archives": [
         {
           "checksum": {
-            "sha1": "6567df1c2407bc356432d91c83ce75c900b3ec72"
+            "sha1": "3d8ea2efe6b4db124bf76b5002b23d5adba31f8e"
           },
           "host_os": "linux",
-          "size": 426327761,
-          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317114/naclsdk_bionic.tar.bz2"
+          "size": 437083266,
+          "url": "https://storage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk/trunk.317630/naclsdk_bionic.tar.bz2"
         }
       ],
-      "description": "Chrome 42 bundle. Chrome revision: 317114. NaCl revision: 2c9f79345c54506498a3c2dfbc43920180886e7b",
+      "description": "Chrome 43 bundle. Chrome revision: 317630. NaCl revision: 456d6e41cd963607ca2ce966d6d9a8d9895ea973",
       "name": "bionic_canary",
       "recommended": "no",
-      "repath": "pepper_42",
-      "revision": 317114,
+      "repath": "pepper_43",
+      "revision": 317630,
       "stability": "canary",
-      "version": 42
+      "version": 43
     }
   ],
   "manifest_version": 2
diff --git a/src/build/launch_chrome.py b/src/build/launch_chrome.py
index b5c80f1..177142f 100755
--- a/src/build/launch_chrome.py
+++ b/src/build/launch_chrome.py
@@ -669,9 +669,19 @@
     # LANGUAGE takes priority over --lang option in Linux.
     os.environ['LANGUAGE'] = parsed_args.lang
     # In Mac, there is no handy way to change the locale.
-    if sys.platform == 'darwin':
+    if platform_util.is_running_on_mac():
       print '\nWARNING: --lang is not supported in Mac.'
 
+  if (parsed_args.mode in ('atftest', 'perftest') and
+      not platform_util.is_running_on_chromeos() and
+      not platform_util.is_running_on_mac()):
+    # This launches ARC without creating a browser window.  We only do it for
+    # automated tests, in case the user wants to do something like examine the
+    # Chromium settings ("about:flags" for example), which requires using the
+    # browser window. Note that this does not work on Mac, and should be
+    # unnecessary on a remote Chromebook target.
+    params.append('--silent-launch')
+
   params.extend(_compute_chrome_plugin_params(parsed_args))
   params.extend(_compute_chrome_sandbox_params(parsed_args))
   params.extend(_compute_chrome_graphics_params(parsed_args))
diff --git a/src/build/lint_source.py b/src/build/lint_source.py
index a5f8f2a..b1ba0ad 100755
--- a/src/build/lint_source.py
+++ b/src/build/lint_source.py
@@ -7,6 +7,7 @@
 import argparse
 import cPickle
 import collections
+import itertools
 import json
 import logging
 import os
@@ -140,8 +141,9 @@
 
   def run(self, path):
     command = self._build_command(path)
+    env = self._build_env()
     try:
-      subprocess.check_output(command, stderr=subprocess.STDOUT)
+      subprocess.check_output(command, stderr=subprocess.STDOUT, env=env)
       return True
     except OSError:
       logging.exception('Unable to invoke %s', command)
@@ -155,9 +157,20 @@
       return False
 
   def _build_command(self, path):
+    """Builds the commandline to run a subprocess, and returns it."""
     # All subclasses must implement this.
     raise NotImplementedError()
 
+  def _build_env(self):
+    """Builds the env dict for a subprocess, and returns it.
+
+    By default returns None, which means to use the current os.environ
+    as is.
+    """
+    # Subclass can override this to set up environment variable dict for
+    # the subprocess.
+    return None
+
 
 class CppLinter(CommandLineLinterBase):
   """Linter for C/C++ source/header files."""
@@ -242,6 +255,35 @@
             path]
 
 
+class TestConfigLinter(CommandLineLinterBase):
+  """Linter for src/integration_tests/expectations/"""
+  # This list must be sync'ed with suite_runner_config's evaluation context.
+  # Please see also suite_runner_config._read_test_config().
+  _BUILTIN_VARS = [
+      # Expectation flags.
+      'PASS', 'FAIL', 'TIMEOUT', 'NOT_SUPPORTED', 'LARGE', 'FLAKY',
+      'REQUIRES_OPENGL',
+
+      # OPTIONS is commonly used in the conditions.
+      'OPTIONS',
+  ]
+
+  def __init__(self):
+    super(TestConfigLinter, self).__init__(
+        'testconfig', target_groups=[_GROUP_PY])
+
+  def should_run(self, path):
+    return path.startswith('src/integration_tests/expectations')
+
+  def _build_command(self, path):
+    return ['src/build/flake8', '--ignore=E501', path]
+
+  def _build_env(self):
+    env = os.environ.copy()
+    env['PYFLAKES_BUILTINS'] = ','.join(TestConfigLinter._BUILTIN_VARS)
+    return env
+
+
 class CopyrightLinter(CommandLineLinterBase):
   """Linter to check copyright notice."""
 
@@ -264,6 +306,8 @@
 class UpstreamLinter(Linter):
   """Linter to check the contents of upstream note in mods/upstream."""
 
+  _VAR_PATTERN = re.compile(r'^\s*([A-Z_]+)\s*=(.*)$')
+
   def __init__(self):
     super(UpstreamLinter, self).__init__('upstreamlint')
 
@@ -272,31 +316,37 @@
     # run this linter.
     if open_source.is_open_source_repo():
       return False
-    return path.startswith(analyze_diffs.UPSTREAM_BASE_PATH + os.path.sep)
+    return path.startswith(analyze_diffs.UPSTREAM_BASE_PATH)
 
   def run(self, path):
-    # TODO(20150228): This implementation has a bug (if '=' is contained the
-    # description, the calculation of the number of description does not work
-    # properly.)
-    description_line_count = 0
-    vars = {}
     with open(path) as f:
       lines = f.read().splitlines()
-    for line in lines:
-      line = line.strip()
-      pos = line.find('=')
-      if pos != -1:
-        vars[line[:pos].strip()] = line[pos + 1:].strip()
-      elif line and not vars:
-        description_line_count += 1
 
-    if 'ARC_COMMIT' in vars and vars['ARC_COMMIT'] == '':
+    # The description is leading lines before variable definitions.
+    description = list(itertools.takewhile(
+        lambda line: not UpstreamLinter._VAR_PATTERN.search(line), lines))
+
+    # Parse variables from the trailing lines.
+    var_map = {}
+    for line in lines[len(description):]:
+      m = UpstreamLinter._VAR_PATTERN.search(line)
+      if not m:
+        continue
+      var_map[m.group(1)] = m.group(2).strip()
+
+    # If ARC_COMMIT is present, it must not be empty.
+    if var_map.get('ARC_COMMIT') == '':
       logging.error('Upstream file has empty commit info: %s', path)
       return False
-    if 'UPSTREAM' not in vars:
+
+    # 'UPSTREAM' var must be contained.
+    if 'UPSTREAM' not in var_map:
       logging.error('Upstream file has no upstream info: %s', path)
       return False
-    if description_line_count == 0 and not vars['UPSTREAM']:
+
+    # If 'UPSTREAM' var is empty, there must be (non-empty) descriptions.
+    if (not var_map['UPSTREAM'] and
+        sum(1 for line in description if line.strip()) == 0):
       logging.error(
           'Upstream file has no upstream URL and no description: %s', path)
       return False
@@ -412,8 +462,9 @@
     files, if necessary.
   """
   runner = LinterRunner(
-      [CppLinter(), JsLinter(), PyLinter(), CopyrightLinter(),
-       UpstreamLinter(), LicenseLinter(), DiffLinter(output_dir)],
+      [CppLinter(), JsLinter(), PyLinter(), TestConfigLinter(),
+       CopyrightLinter(), UpstreamLinter(), LicenseLinter(),
+       DiffLinter(output_dir)],
       ignore_rule)
   result = True
   for path in target_file_list:
diff --git a/src/build/ninja_generator.py b/src/build/ninja_generator.py
index aee4aec..2d7a34f 100644
--- a/src/build/ninja_generator.py
+++ b/src/build/ninja_generator.py
@@ -14,6 +14,7 @@
 import os
 import StringIO
 import sys
+import traceback
 
 import analyze_diffs
 import build_common
@@ -279,6 +280,14 @@
   # Default implicit dependencies.
   _default_implicit = []
 
+  def __new__(type, *args, **kargs):
+    obj = super(NinjaGenerator, type).__new__(type, *args, **kargs)
+    # Installs the debugingo into the class instance.
+    # [-1] is __new__().
+    # [-2] is the caller of ninja generators.
+    obj._debuginfo = traceback.format_stack()[-2]
+    return obj
+
   def __init__(self, module_name, ninja_name=None,
                host=False, generate_path=True, base_path=None,
                implicit=None, target_groups=None,
@@ -313,6 +322,9 @@
     # TODO(crbug.com/366751): remove notice_archive hack when possible
     self._notice_archive = None
 
+    for line in self._debuginfo.split('\n'):
+      self.comment(line)
+
   @staticmethod
   def emit_common_rules(n):
     n.variable('extract_test_list', NinjaGenerator._EXTRACT_TEST_LIST_PATH)
diff --git a/src/build/run_integration_tests_test.py b/src/build/run_integration_tests_test.py
index 892cd8f..0879972 100755
--- a/src/build/run_integration_tests_test.py
+++ b/src/build/run_integration_tests_test.py
@@ -134,7 +134,7 @@
   def _make_flaky_suite_configuration(*unused, **kwunused):
     # We must return a new dictionary for every call.
     return dict(
-        flags=flags.FLAKY, bug=None, metadata=None, deadline=300,
+        flags=flags.PASS | flags.FLAKY, bug=None, metadata=None, deadline=300,
         test_order={}, suite_test_expectations={})
   return patch(
       'util.test.suite_runner_config._evaluate',
diff --git a/src/build/util/test/suite_runner_config.py b/src/build/util/test/suite_runner_config.py
index cabc763..c06d127 100644
--- a/src/build/util/test/suite_runner_config.py
+++ b/src/build/util/test/suite_runner_config.py
@@ -92,10 +92,19 @@
 
     # The value must be an ExpectationFlagSet or a dict.
     if isinstance(outer_expectation, flags.ExclusiveFlagSet):
+      assert '*' == outer_name or '*' not in outer_name, (
+          'suite_test_expectations pattern "%s" is not allowed. Only "*" is '
+          'allowed.' % outer_name)
+      assert outer_name.count('#') <= 1, (
+          'suite_test_expectations pattern "%s" is not allowed. The "#" '
+          'character is only expected at most once.' % outer_name)
       continue
     assert isinstance(outer_expectation, dict), (
         'suite_test_expectations %s needs to be either a dict or an '
         'expectation flag set: %s' % (outer_name, outer_expectation))
+    assert '*' not in outer_name, (
+        'suite_test_expectations "%s" is not a valid name (no asterisks '
+        'allowed)' % outer_name)
     for inner_name, inner_expectation in outer_expectation.iteritems():
       # Inner dict must be a map from string to an expectation flag set.
       assert isinstance(inner_name, basestring), (
@@ -104,6 +113,13 @@
       assert isinstance(inner_expectation, flags.ExclusiveFlagSet), (
           'suite_test_expectations %s#%s is not an expectation flag set: '
           '%s' % (outer_name, inner_name, inner_expectation))
+      assert '*' == inner_name or '*' not in inner_name, (
+          'suite_test_expectations pattern "%s#%s" is not allowed. Only "%s#*" '
+          'is allowed.' % (outer_name, inner_name, outer_name))
+      assert '#' not in inner_name, (
+          'suite_test_expectations pattern "%s#%s" is not allowed. The "#" '
+          'character is only expected at most once.' % (
+              outer_name, inner_name))
 
 
 def _validate_configurations(value):
@@ -202,6 +218,43 @@
   return result
 
 
+def _read_test_config(path):
+  """Reads the file, and eval() it with the test config context."""
+  if not os.path.exists(path):
+    return {}
+
+  with open(path) as stream:
+    content = stream.read()
+  test_context = {
+      '__builtin__': None,  # Do not inherit the current context.
+
+      # Expectation flags.
+      'PASS': flags.PASS,
+      'FAIL': flags.FAIL,
+      'TIMEOUT': flags.TIMEOUT,
+      'NOT_SUPPORTED': flags.NOT_SUPPORTED,
+      'LARGE': flags.LARGE,
+      'FLAKY': flags.FLAKY,
+      'REQUIRES_OPENGL': flags.REQUIRES_OPENGL,
+
+      # OPTIONS is commonly used for the conditions.
+      'OPTIONS': OPTIONS,
+  }
+
+  try:
+    raw_config = eval(content, test_context)
+  except Exception as e:
+    e.args = (e.args[0] + '\neval() failed: ' + path,) + e.args[1:]
+    raise
+
+  try:
+    _validate(raw_config)
+  except Exception as e:
+    e.args = (e.args[0] + '\nValidation failed: ' + path,) + e.args[1:]
+    raise
+  return raw_config
+
+
 def default_run_configuration():
   return _evaluate({
       'configurations': [{
@@ -229,24 +282,12 @@
 
 
 # TODO(crbug.com/384028): The class will eventually eliminate the need for
-# make_suite_run_configs and default_run_configuration above, and make it
-# easier to clean up _SuiteRunConfiguration too.
+# make_suite_run_configs and default_run_configuration above.
 class SuiteExpectationsLoader(object):
   def __init__(self, base_path):
     self._base_path = base_path
     self._cache = {}
 
-  def _get_raw_expectations_dict(self, suite_name):
-    suite_expectations_path = os.path.join(self._base_path, suite_name + '.py')
-    if not os.path.exists(suite_expectations_path):
-      return {}
-    with open(suite_expectations_path) as suite_expectations:
-      sys.dont_write_bytecode = True
-      config_module = imp.load_source('', suite_expectations_path,
-                                      suite_expectations)
-      sys.dont_write_bytecode = False
-      return config_module.get_expectations()
-
   def get(self, suite_name):
     parent_config = None
     components = suite_name.split('.')
@@ -254,7 +295,8 @@
       partial_name = '.'.join(components[:i]) if i else 'defaults'
       config = self._cache.get(partial_name)
       if config is None:
-        raw_config = self._get_raw_expectations_dict(partial_name)
+        raw_config = _read_test_config(
+            os.path.join(self._base_path, partial_name))
         config = _evaluate(raw_config, defaults=parent_config)
         self._cache[partial_name] = config
       parent_config = config
diff --git a/src/build/util/test/suite_runner_config_test.py b/src/build/util/test/suite_runner_config_test.py
index 37fce3f..6f69a1c 100755
--- a/src/build/util/test/suite_runner_config_test.py
+++ b/src/build/util/test/suite_runner_config_test.py
@@ -82,16 +82,21 @@
     self.assertEquals(PASS._mask, flags._mask)
 
 
+def _evaluate(raw_config, defaults=None):
+  return suite_runner_config._evaluate(raw_config, defaults=defaults)
+
+
+def _evaluate_test_expectations(suite_test_expectations):
+  return _evaluate(dict(suite_test_expectations=suite_test_expectations))[
+      'suite_test_expectations']
+
+
 class SuiteRunConfigInputTests(unittest.TestCase):
   """Tests the evaluation of the input configuration."""
 
-  @staticmethod
-  def _evaluate(raw_config, defaults=None):
-    return suite_runner_config._evaluate(raw_config, defaults=defaults)
-
   def test_defaults_applied(self):
-    result = self._evaluate({'flags': PASS},
-                            defaults={'bug': 'crbug.com/1234', 'flags': FAIL})
+    result = _evaluate({'flags': PASS},
+                       defaults={'bug': 'crbug.com/1234', 'flags': FAIL})
     self.assertEquals('crbug.com/1234', result['bug'])
     self.assertEquals(suite_runner_config._DEFAULT_OUTPUT_TIMEOUT,
                       result['deadline'])
@@ -99,22 +104,22 @@
     self.assertNotIn(FAIL, result['flags'])
 
   def test_simple_passing_test(self):
-    self.assertIn(PASS, self._evaluate(None)['flags'])
-    self.assertIn(PASS, self._evaluate({})['flags'])
-    self.assertIn(PASS, self._evaluate({'flags': PASS})['flags'])
+    self.assertIn(PASS, _evaluate(None)['flags'])
+    self.assertIn(PASS, _evaluate({})['flags'])
+    self.assertIn(PASS, _evaluate({'flags': PASS})['flags'])
 
   def test_simple_failing_test(self):
-    result = self._evaluate({'flags': FAIL})
+    result = _evaluate({'flags': FAIL})
     self.assertNotIn(PASS, result['flags'])
     self.assertIn(FAIL, result['flags'])
 
   def test_configured_to_fail_for_target(self):
-    result = self._evaluate({'configurations': [{'flags': FAIL | FLAKY}]})
+    result = _evaluate({'configurations': [{'flags': FAIL | FLAKY}]})
     self.assertNotIn(PASS, result['flags'])
     self.assertIn(FAIL, result['flags'])
     self.assertIn(FLAKY, result['flags'])
 
-    result = self._evaluate({'configurations': [{
+    result = _evaluate({'configurations': [{
         'enable_if': False,
         'flags': FAIL | FLAKY
     }]})
@@ -122,27 +127,73 @@
     self.assertNotIn(FAIL, result['flags'])
     self.assertNotIn(FLAKY, result['flags'])
 
-  def test_suite_test_expectations(self):
-    result = self._evaluate({
-        'suite_test_expectations': {
-            'foo': {'bar': FLAKY}
-        }
-    })
-    expectations = result['suite_test_expectations']
-    self.assertIn(PASS, expectations['foo#bar'])
-    self.assertIn(FLAKY, expectations['foo#bar'])
-    self.assertNotIn(FAIL, expectations['foo#bar'])
-    self.assertNotIn(NOT_SUPPORTED, expectations['foo#bar'])
+  def test_flat_suite_test_expectations(self):
+    result = _evaluate_test_expectations({'x': FLAKY})
+    self.assertEqual(PASS | FLAKY, result['x'])
+
+    result = _evaluate_test_expectations({'*': FLAKY})
+    self.assertEqual(PASS | FLAKY, result['*'])
+
+    # Only a simple '*' pattern is allowed.
+    # (Though this pattern still allows us to do a prefix match later, we
+    # disallow it.)
+    with self.assertRaisesRegexp(AssertionError, r'"x\*" is not allowed'):
+      _evaluate_test_expectations({'x*': PASS})
+
+    # Only a simple '*' pattern is allowed.
+    # (This allows us to to a simple prefix match later)
+    with self.assertRaisesRegexp(AssertionError, r'"\*x" is not allowed'):
+      _evaluate_test_expectations({'*x': PASS})
+
+    # A "class#method" style name is allowed.
+    result = _evaluate_test_expectations({'x#y': FLAKY})
+    self.assertEqual(PASS | FLAKY, result['x#y'])
+
+    # Only one '#' is allowed.
+    with self.assertRaisesRegexp(AssertionError, r'"x#y#z" is not allowed'):
+      _evaluate_test_expectations({'x#y#z': PASS})
+
+  def test_hierarchical_suite_test_expectations(self):
+    result = _evaluate_test_expectations({'x': {'y': FLAKY}})
+    self.assertEqual(PASS | FLAKY, result['x#y'])
+
+    result = _evaluate_test_expectations({'x': {'*': FLAKY}})
+    self.assertEqual(PASS | FLAKY, result['x#*'])
+
+    # Only a simple '*' pattern is allowed.
+    # (Though this pattern still allows us to do a prefix match later, we
+    # disallow it.)
+    with self.assertRaisesRegexp(AssertionError, r'"x#y\*" is not allowed'):
+      _evaluate_test_expectations({'x': {'y*': FLAKY}})
+
+    # Only a simple '*' pattern is allowed.
+    # (This allows us to use a simple prefix match later)
+    with self.assertRaisesRegexp(AssertionError, r'"x#\*y" is not allowed'):
+      _evaluate_test_expectations({'x': {'*y': FLAKY}})
+
+    # If there is an asterisk wildcard, it must be in the leaf.
+    # (This allows us to to a simple prefix match later)
+    with self.assertRaisesRegexp(AssertionError, r'"\*" is not a valid name'):
+      _evaluate_test_expectations({'*': {'x': FLAKY}})
+
+    # If there is an asterisk wildcard, it must be in the leaf.
+    # (This allows us to to a simple prefix match later)
+    with self.assertRaisesRegexp(AssertionError, r'"\*" is not a valid name'):
+      _evaluate_test_expectations({'*': {'*': FLAKY}})
+
+    # Only one '#' is allowed.
+    with self.assertRaisesRegexp(AssertionError, r'"x#y#z" is not allowed'):
+      _evaluate_test_expectations({'x': {'y#z': FLAKY}})
 
   def test_suite_test_order(self):
-    result = self._evaluate({
+    result = _evaluate({
         'configurations': [{
-            'test_order': {'foo': 1}
+            'test_order': {'x': 1}
         }]
     })
     test_order = result['test_order']
-    self.assertIn('foo', test_order)
-    self.assertEquals(test_order['foo'], 1)
+    self.assertIn('x', test_order)
+    self.assertEquals(test_order['x'], 1)
 
 
 class SuiteRunConfigIntegrationTests(unittest.TestCase):
diff --git a/src/build/util/test/suite_runner_util.py b/src/build/util/test/suite_runner_util.py
index c2d8700..7b27b4d 100644
--- a/src/build/util/test/suite_runner_util.py
+++ b/src/build/util/test/suite_runner_util.py
@@ -7,6 +7,83 @@
 from util.test import suite_runner_config_flags as flags
 
 
+class _GlobalExpectationMatcher(object):
+  """Handles the special case of a single {"*": <expectation>} entry."""
+  def __init__(self, expectations):
+    self._expectation = expectations['*']
+    assert len(expectations) == 1, (
+        'Using the test expectation pattern "*" with anything else is '
+        'ambiguous. Use either "*" alone, or specify just the patterns:\n'
+        '    "%s"' % '"\n    "'.join(
+            name for name in expectations if name != '*'))
+
+  def __getitem__(self, name):
+    return self._expectation
+
+  def check_unused(self):
+    # "*" is allowed to match nothing.
+    pass
+
+
+class _ExpectationMatcher(object):
+  """Handles the general case of a list of exact or class names."""
+  def __init__(self, expectations):
+    for name in expectations:
+      assert name.count('#') == 1, (
+          'The test expectation pattern "%s" does not match the expected form. '
+          'A name like "class_name#test_name" is expected' % name)
+      if not name.endswith('#*'):
+        class_name = name.split('#', 1)[0]
+        assert (class_name + '#*') not in expectations, (
+            'The test expectation patterns "%s" and "%s#*" are ambiguous. '
+            'Mixing an exact match with a class name match is not allowed.' % (
+                name, class_name))
+
+    self._expectations = expectations
+    self._unused = set(expectations)
+
+  def __getitem__(self, name):
+    # If this triggers, we would need to handle it in a special way, and we
+    # might not be able to set an override expectation without additional work.
+    assert not name.endswith('#*'), (
+        'Test name "%s" ends with a reserved sequence "#*".' % name)
+
+    match_name = name
+    expectation = self._expectations.get(match_name)
+    if expectation is None:
+      # If the exact match failed to find an expectation, see if there is an
+      # expectation for the class name.
+      match_name = name.split('#', 1)[0] + '#*'
+      expectation = self._expectations.get(match_name)
+
+    # Mark the match as used.
+    if expectation is not None and match_name in self._unused:
+      self._unused.remove(match_name)
+
+    return expectation
+
+  def check_unused(self):
+    # Every name should have been used. If not, display a message so the
+    # configuration can be cleaned up.
+    assert not self._unused, (
+        'The expectations configuration includes patterns with no match:\n'
+        '    %s\n'
+        'Please remove the ones that that are no longer used.' % (
+            '\n    '.join(sorted(self._unused))))
+
+
+def _merge(base_expectation, override_expectation, default_expectation):
+  # |default_expectation| must be left hand side, because '|' operator for
+  # the expectation set is asymmetric. (cf suite_runner_config_flags.py).
+  # TODO(crbug.com/437402): Clean up this.
+  if override_expectation:
+    return default_expectation | override_expectation
+  elif flags.PASS in default_expectation:
+    return default_expectation | base_expectation
+  else:
+    return default_expectation
+
+
 def merge_expectation_map(
     base_expectation_map, override_expectation_map, default_expectation):
   # In test cases, |base_expectation_map| is stubbed out as {'*': flags.PASS}.
@@ -19,31 +96,15 @@
   if base_expectation_map == {'*': flags.PASS}:
     return base_expectation_map
 
-  # First, check the integrity of our CTS configuration.
-  unknown_test_list = [test_name for test_name in override_expectation_map
-                       if test_name not in base_expectation_map]
-  assert not unknown_test_list, (
-      'Unknown tests found:\n%s' % '\n'.join(unknown_test_list))
+  if '*' in override_expectation_map:
+    overrides = _GlobalExpectationMatcher(override_expectation_map)
+  else:
+    overrides = _ExpectationMatcher(override_expectation_map)
 
-  # Then merge the expectation dicts as follows:
-  # 1) If the test's expectation is in |override_expectation_map|, choose it.
-  # 2) If there is default expectation and it is not PASS, choose it.
-  # 3) Otherwise, choose the original expectation.
-  # In any case, default expectation is applied again if specified,
-  # in order to expand some flags, such as LARGE or FLAKY.
-  if default_expectation and default_expectation == flags.PASS:
-    default_expectation = None
-
-  result = {}
-  for test_name, original_expectation in base_expectation_map.iteritems():
-    expectation = override_expectation_map.get(
-        test_name, default_expectation or original_expectation)
-    if default_expectation:
-      # |default_expectation| must be left hand side, because '|' operator for
-      # the expectation set is asymmetric. (cf suite_runner_config_flags.py).
-      # TODO(crbug.com/437402): Clean up this.
-      expectation = default_expectation | expectation
-    result[test_name] = expectation
+  result = dict((name, _merge(expectation, overrides[name],
+                              default_expectation))
+                for name, expectation in base_expectation_map.iteritems())
+  overrides.check_unused()
   return result
 
 
diff --git a/src/build/util/test/suite_runner_util_test.py b/src/build/util/test/suite_runner_util_test.py
index 753c0f6..60ab789 100755
--- a/src/build/util/test/suite_runner_util_test.py
+++ b/src/build/util/test/suite_runner_util_test.py
@@ -13,78 +13,146 @@
 class SuiteRunnerUtilTest(unittest.TestCase):
   def test_merge_expectation_map(self):
     base_map = {
-        'test1': flags.PASS,
-        'test2': flags.FAIL,
+        'c#test1': flags.PASS,
+        'c#test2': flags.FAIL,
     }
 
-    # Not overriden.
+    # With no override expectations, the base expectations should be used.
     self.assertEquals(
         {
-            'test1': flags.PASS,
-            'test2': flags.FAIL,
+            'c#test1': flags.PASS,
+            'c#test2': flags.FAIL,
         },
-        suite_runner_util.merge_expectation_map(base_map, {}, None))
+        suite_runner_util.merge_expectation_map(base_map, {}, flags.PASS))
 
-    # "'*': PASS" does not override anything.
+    # test1 should be overridden to FAIL, test2 should keep the base FAIL.
     self.assertEquals(
         {
-            'test1': flags.PASS,
-            'test2': flags.FAIL,
+            'c#test1': flags.FAIL,
+            'c#test2': flags.FAIL,
         },
         suite_runner_util.merge_expectation_map(
-            base_map, {}, flags.PASS))
+            base_map, {'c#test1': flags.FAIL}, flags.PASS))
 
-    # Both should be overriden.
+    # The pure flags from the default expectation should end up in the output
+    # expectation map.
     self.assertEquals(
         {
-            'test1': flags.NOT_SUPPORTED,
-            'test2': flags.NOT_SUPPORTED,
+            'c#test1': flags.PASS | flags.FLAKY,
+            'c#test2': flags.FAIL | flags.FLAKY,
         },
         suite_runner_util.merge_expectation_map(
-            base_map, {}, flags.NOT_SUPPORTED))
+            base_map, {}, flags.PASS | flags.FLAKY))
 
-    # Only "test1" should be overriden.
+    # If the default expectation is TIMEOUT, all the tests inside should be too
+    # if no other test-level overrides are given
     self.assertEquals(
         {
-            'test1': flags.FAIL,
-            'test2': flags.FAIL,
+            'c#test1': flags.PASS | flags.FLAKY,
+            'c#test2': flags.TIMEOUT,
         },
         suite_runner_util.merge_expectation_map(
-            base_map, {'test1': flags.FAIL}, None))
+            base_map, {'c#test1': flags.PASS | flags.FLAKY}, flags.TIMEOUT))
 
-    # Only "test1" should be overriden.
+    # A suite level FLAKY flag should cause all tests to be marked FLAKY,
+    # regardless of whether the base or override expectation is used.
     self.assertEquals(
         {
-            'test1': flags.FAIL,
-            'test2': flags.FAIL,
+            'c#test1': flags.FAIL | flags.FLAKY | flags.LARGE,
+            'c#test2': flags.FAIL | flags.FLAKY,
         },
         suite_runner_util.merge_expectation_map(
-            base_map, {'test1': flags.FAIL}, flags.PASS))
+            base_map, {'c#test1': flags.FAIL | flags.LARGE},
+            flags.PASS | flags.FLAKY))
 
-    # Only "test1" should be overriden to FAIL, and "test2" to
-    # NOT_SUPPORTED (default value).
+    # A suite level LARGE flag should cause all tests to be marked LARGE,
+    # regardless of whether the base or override expectation is used.
     self.assertEquals(
         {
-            'test1': flags.FAIL,
-            'test2': flags.NOT_SUPPORTED,
+            'c#test1': flags.PASS | flags.LARGE,
+            'c#test2': flags.PASS | flags.LARGE,
         },
         suite_runner_util.merge_expectation_map(
-            base_map, {'test1': flags.FAIL}, flags.NOT_SUPPORTED))
-
-    self.assertEquals(
-        # Each should be overriden.
-        {
-            'test1': flags.FAIL,
-            'test2': flags.PASS,
-        },
-        suite_runner_util.merge_expectation_map(
-            base_map,
-            {'test1': flags.FAIL, 'test2': flags.PASS}, None))
+            base_map, {'c#test2': flags.PASS}, flags.PASS | flags.LARGE))
 
     with self.assertRaises(AssertionError):
       # Raise an exception if suite_expectations contains an unknown test name.
       suite_runner_util.merge_expectation_map(
-          base_map, {'test3': flags.PASS}, None)
+          base_map, {'c#test3': flags.PASS}, flags.PASS)
+
+  def _check_simple(self, expected, patterns):
+    self.assertEquals(
+        expected,
+        suite_runner_util.merge_expectation_map(
+            dict.fromkeys(expected, flags.PASS), patterns, flags.PASS))
+
+  def test_merge_star_matches_all(self):
+    # An entry of '*' should match all tests."""
+    self._check_simple({
+        'x#m1': flags.TIMEOUT,
+        'y#m2': flags.TIMEOUT,
+        'z#m1': flags.TIMEOUT,
+    }, {
+        '*': flags.TIMEOUT,
+    })
+
+  def test_merge_class_name_matching(self):
+    # An entry like 'classname#*' should match "classname#any_method_name".
+    self._check_simple({
+        'x#m1': flags.FAIL,
+        'x#m2': flags.FAIL,
+        'x#m3': flags.FAIL,
+        'y#m1': flags.TIMEOUT,
+        'y#m2': flags.TIMEOUT,
+        'z#m1': flags.PASS,
+    }, {
+        'x#*': flags.FAIL,
+        'y#*': flags.TIMEOUT
+    })
+
+  def test_merge_all_patterns_used(self):
+    # The logic should verify that all patterns were matched.
+    with self.assertRaisesRegexp(
+        AssertionError, r'.*patterns with no match:\s+y#*\*'):
+      self._check_simple({
+          'x#m1': flags.PASS,
+      }, {
+          'y#*': flags.FAIL,
+      })
+
+  def test_merge_star_can_match_no_tests(self):
+    # '*' as a special case is allowed to match no tests.
+    self._check_simple({}, {'*': flags.FAIL})
+
+  def test_merge_patterns_cannot_overlap(self):
+    # No two patterns can match the same test name.
+    with self.assertRaisesRegexp(
+        AssertionError,
+        r'The test expectation patterns "x#m1" and "x#\*" are ambiguous'):
+      self._check_simple({
+          'x#m1': flags.PASS,
+      }, {
+          'x#*': flags.FAIL,
+          'x#m1': flags.FAIL,
+      })
+      suite_runner_util.merge_expectation_map({'abc_123': flags.PASS}, {
+          'abc_*': flags.PASS,
+          'abc*': flags.FAIL,
+      })
+
+  def test_merge_star_is_exclusive(self):
+    # A global '*' cannot be used with any other patterns.
+    with self.assertRaisesRegexp(
+        AssertionError,
+        (r'Using the test expectation pattern "\*" with anything else is '
+         r'ambiguous')):
+      self._check_simple({
+          'x#m1': flags.PASS,
+      }, {
+          '*': flags.TIMEOUT,
+          'x#*': flags.FAIL,
+          'x#m1': flags.FAIL,
+      })
 
   def test_create_gtest_filter_list(self):
     # Result should be empty for an empty list.
diff --git a/src/integration_tests/expectations/defaults.py b/src/integration_tests/expectations/defaults.py
deleted file mode 100644
index 5fefdad..0000000
--- a/src/integration_tests/expectations/defaults.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from build_options import OPTIONS
-from util.test.suite_runner_config_flags import FLAKY
-from util.test.suite_runner_config_flags import PASS
-
-
-def get_expectations():
-  return {
-      'flags': PASS,
-      'suite_test_expectations': {},
-      'deadline': 300,  # Seconds
-      'configurations': [{
-          'enable_if': OPTIONS.weird(),
-          'flags': FLAKY,
-      }],
-      'metadata': {}
-  }
diff --git a/third_party/chromium-ppapi b/third_party/chromium-ppapi
index 395240b..e8e88da 160000
--- a/third_party/chromium-ppapi
+++ b/third_party/chromium-ppapi
@@ -1 +1 @@
-Subproject commit 395240b4e8e66980b4c1d2d173f29688c54c3ad0
+Subproject commit e8e88dadc0cdf7c8b1c510c171f5ad20b22b9980
diff --git a/third_party/native_client b/third_party/native_client
index cc52724..456d6e4 160000
--- a/third_party/native_client
+++ b/third_party/native_client
@@ -1 +1 @@
-Subproject commit cc52724dde60334c60fed1e248f4ebf858db86a5
+Subproject commit 456d6e41cd963607ca2ce966d6d9a8d9895ea973