Updated to arc-runtime-43.4410.330.0
diff --git a/.gitignore b/.gitignore
index c16872f..dd78846 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@
 .ninja_log
 build.ninja
 botlogs/
+cache/
 out/
 # The mods/android/dalvik/vm/mterp/out directory is special and should not be
 # ignored. It contains machine-generated source for dalvikvm.
@@ -26,4 +27,4 @@
 .cproject
 .pydevproject
 .subversion
-internal
\ No newline at end of file
+internal
diff --git a/mods/android/bionic/libc/arch-nacl/syscalls/nacl_dirent.h b/mods/android/bionic/libc/arch-nacl/syscalls/nacl_dirent.h
new file mode 100644
index 0000000..a21c1ac
--- /dev/null
+++ b/mods/android/bionic/libc/arch-nacl/syscalls/nacl_dirent.h
@@ -0,0 +1,36 @@
+// ARC MOD TRACK "third_party/nacl-glibc/sysdeps/nacl/nacl_dirent.h"
+/*
+ * Copyright 2008 The Native Client Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can
+ * be found in the LICENSE file.
+ */
+
+#ifndef _NACL_DIRENT_H
+#define _NACL_DIRENT_H
+
+/* nacl_stat.h is required for nacl_abi_{ino_t,off_t} */
+#include <nacl_stat.h>
+
+/* From native_client/src/trusted/service_runtime/include/sys/dirent.h */
+/* TODO(mikhailt): extract the shared part of the dirent declarations to
+   native_client/src/shared */
+
+/* We need a way to define the maximum size of a name. */
+#ifndef MAXNAMLEN
+# ifdef NAME_MAX
+#  define MAXNAMLEN NAME_MAX
+# else
+#  define MAXNAMLEN 255
+# endif
+#endif
+
+/* dirent represents a single directory entry. */
+struct nacl_abi_dirent
+  {
+    nacl_abi_ino_t nacl_abi_d_ino;
+    nacl_abi_off_t nacl_abi_d_off;
+    uint16_t       nacl_abi_d_reclen;
+    char           nacl_abi_d_name[MAXNAMLEN + 1];
+  };
+
+#endif  /* _NACL_DIRENT_H */
diff --git a/mods/android/bionic/linker/linker.cpp b/mods/android/bionic/linker/linker.cpp
index 3c7c639..c4cc317 100644
--- a/mods/android/bionic/linker/linker.cpp
+++ b/mods/android/bionic/linker/linker.cpp
@@ -935,7 +935,12 @@
 #endif
 
 // ARC MOD END
-static int open_library_on_path(const char* name, const char* const paths[]) {
+// ARC MOD BEGIN bionic-linker-ndk-detection
+// Add |is_in_vendor_lib| argument.
+// TODO(crbug.com/364344): Remove /vendor/lib.
+static int open_library_on_path(const char* name, const char* const paths[],
+                                int* is_in_vendor_lib) {
+// ARC MOD END
   char buf[512];
   for (size_t i = 0; paths[i] != NULL; ++i) {
     int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
@@ -945,13 +950,24 @@
     }
     int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
     if (fd != -1) {
+      // ARC MOD BEGIN bionic-linker-ndk-detection
+      // Unlike the MOD in load_library, we do not need to check files
+      // in /data/app-lib as this path is not in LD_LIBRARY_PATH.
+      if (!strcmp(paths[i], kVendorLibDir)) {
+        *is_in_vendor_lib = 1;
+      }
+      // ARC MOD END
       return fd;
     }
   }
   return -1;
 }
 
-static int open_library(const char* name) {
+// ARC MOD BEGIN bionic-linker-ndk-detection
+// Add |is_in_vendor_lib| argument.
+// TODO(crbug.com/364344): Remove /vendor/lib.
+static int open_library(const char* name, int* is_in_vendor_lib) {
+// ARC MOD END
   // ARC MOD BEGIN
   // Note on which code path is used for which case:
   //
@@ -996,7 +1012,7 @@
   // "out/target/nacl_i686_opt/lib/", not in "/lib". Also note that
   // open_library_on_path does nothing as gLdPaths is empty on
   // production ARC and therefore is fast.
-  return open_library_on_path(name, gLdPaths);
+  return open_library_on_path(name, gLdPaths, is_in_vendor_lib);
 
   // We have already tried /system/lib by __nacl_irt_open_resource
   // (before __inject_arc_linker_hooks) or __nacl_irt_open (after
@@ -1018,7 +1034,12 @@
 
 static soinfo* load_library(const char* name) {
     // Open the file.
-    int fd = open_library(name);
+    // ARC MOD BEGIN bionic-linker-ndk-detection
+    // Pass |is_in_vendor_lib| to open_library.
+    // TODO(crbug.com/364344): Remove /vendor/lib.
+    int is_in_vendor_lib = 0;
+    int fd = open_library(name, &is_in_vendor_lib);
+    // ARC MOD END
     if (fd == -1) {
         DL_ERR("library \"%s\" not found", name);
         return NULL;
@@ -1052,11 +1073,13 @@
       si->entry = header.e_entry + elf_reader.load_bias();
     if (!si->phdr)
       DL_ERR("Cannot locate a program header in \"%s\".", name);
-
+    // ARC MOD END
+    // ARC MOD BEGIN bionic-linker-ndk-detection
     // Set is_ndk appropriately. NDK libraries in APKs are in
     // /data/app-lib/<app-name>.
     const char kNdkLibraryDir[] = "/data/app-lib/";
-    si->is_ndk = (!strncmp(name, kNdkLibraryDir, sizeof(kNdkLibraryDir) - 1) ||
+    si->is_ndk = (is_in_vendor_lib ||
+                  !strncmp(name, kNdkLibraryDir, sizeof(kNdkLibraryDir) - 1) ||
                   !strncmp(name, kVendorLibDir, sizeof(kVendorLibDir) - 1));
 #endif
     // ARC MOD END
@@ -1326,6 +1349,8 @@
               lsi = si;
             } else {
 #if defined(__native_client__) || defined(BARE_METAL_BIONIC)
+            // ARC MOD END
+              // ARC MOD BEGIN bionic-linker-ndk-detection
               // If |g_resolve_symbol| is injected, try this first for NDK.
               if (si->is_ndk && g_resolve_symbol) {
                   sym_addr = reinterpret_cast<Elf32_Addr>(
@@ -1334,7 +1359,8 @@
                       goto symbol_found;
                   }
               }
-
+              // ARC MOD END
+              // ARC MOD BEGIN
               // Then look up the symbol following Android's default
               // semantics.
               s = soinfo_do_lookup(si, sym_name, &lsi, needed);
diff --git a/mods/android/bionic/tests/dirent_test.cpp b/mods/android/bionic/tests/dirent_test.cpp
deleted file mode 100644
index cbda90b..0000000
--- a/mods/android/bionic/tests/dirent_test.cpp
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <set>
-#include <string>
-
-// ARC MOD BEGIN
-// TODO(crbug.com/348883): dirent.h is not working in unittests.
-#if !defined(__native_client__) && !defined(BARE_METAL_BIONIC)
-// ARC MOD END
-static void CheckProcSelf(std::set<std::string>& names) {
-  // We have a good idea of what should be in /proc/self.
-  ASSERT_TRUE(names.find(".") != names.end());
-  ASSERT_TRUE(names.find("..") != names.end());
-  ASSERT_TRUE(names.find("cmdline") != names.end());
-  ASSERT_TRUE(names.find("fd") != names.end());
-  ASSERT_TRUE(names.find("stat") != names.end());
-}
-
-TEST(dirent, scandir) {
-  // Get everything from /proc/self...
-  dirent** entries;
-  int entry_count = scandir("/proc/self", &entries, NULL, alphasort);
-  ASSERT_GE(entry_count, 0);
-
-  // Turn the directory entries into a set and vector of the names.
-  std::set<std::string> name_set;
-  std::vector<std::string> unsorted_name_list;
-  for (size_t i = 0; i < static_cast<size_t>(entry_count); ++i) {
-    name_set.insert(entries[i]->d_name);
-    unsorted_name_list.push_back(entries[i]->d_name);
-    free(entries[i]);
-  }
-  free(entries);
-
-  // No duplicates.
-  ASSERT_EQ(name_set.size(), unsorted_name_list.size());
-
-  // All entries sorted.
-  std::vector<std::string> sorted_name_list(unsorted_name_list);
-  std::sort(sorted_name_list.begin(), sorted_name_list.end());
-  ASSERT_EQ(sorted_name_list, unsorted_name_list);
-
-  CheckProcSelf(name_set);
-}
-
-TEST(dirent, fdopendir_invalid) {
-  ASSERT_TRUE(fdopendir(-1) == NULL);
-  ASSERT_EQ(EBADF, errno);
-
-  int fd = open("/dev/null", O_RDONLY);
-  ASSERT_NE(fd, -1);
-  ASSERT_TRUE(fdopendir(fd) == NULL);
-  ASSERT_EQ(ENOTDIR, errno);
-  close(fd);
-}
-
-TEST(dirent, fdopendir) {
-  int fd = open("/proc/self", O_RDONLY);
-  DIR* d = fdopendir(fd);
-  ASSERT_TRUE(d != NULL);
-  dirent* e = readdir(d);
-  ASSERT_STREQ(e->d_name, ".");
-  ASSERT_EQ(closedir(d), 0);
-
-  // fdopendir(3) took ownership, so closedir(3) closed our fd.
-  ASSERT_EQ(close(fd), -1);
-  ASSERT_EQ(EBADF, errno);
-}
-
-TEST(dirent, opendir_invalid) {
-  ASSERT_TRUE(opendir("/does/not/exist") == NULL);
-  ASSERT_EQ(ENOENT, errno);
-
-  ASSERT_TRUE(opendir("/dev/null") == NULL);
-  ASSERT_EQ(ENOTDIR, errno);
-}
-
-TEST(dirent, opendir) {
-  DIR* d = opendir("/proc/self");
-  ASSERT_TRUE(d != NULL);
-  dirent* e = readdir(d);
-  ASSERT_STREQ(e->d_name, ".");
-  ASSERT_EQ(closedir(d), 0);
-}
-
-TEST(dirent, closedir_invalid) {
-  DIR* d = NULL;
-  ASSERT_EQ(closedir(d), -1);
-  ASSERT_EQ(EINVAL, errno);
-}
-
-TEST(dirent, closedir) {
-  DIR* d = opendir("/proc/self");
-  ASSERT_TRUE(d != NULL);
-  ASSERT_EQ(closedir(d), 0);
-}
-
-TEST(dirent, readdir) {
-  DIR* d = opendir("/proc/self");
-  ASSERT_TRUE(d != NULL);
-  std::set<std::string> name_set;
-  errno = 0;
-  dirent* e;
-  while ((e = readdir(d)) != NULL) {
-    name_set.insert(e->d_name);
-  }
-  // Reading to the end of the directory is not an error.
-  // readdir(3) returns NULL, but leaves errno as 0.
-  ASSERT_EQ(0, errno);
-  ASSERT_EQ(closedir(d), 0);
-
-  CheckProcSelf(name_set);
-}
-
-TEST(dirent, readdir_r) {
-  DIR* d = opendir("/proc/self");
-  ASSERT_TRUE(d != NULL);
-  std::set<std::string> name_set;
-  errno = 0;
-  dirent storage;
-  dirent* e = NULL;
-  while (readdir_r(d, &storage, &e) == 0 && e != NULL) {
-    name_set.insert(e->d_name);
-  }
-  // Reading to the end of the directory is not an error.
-  // readdir_r(3) returns NULL, but leaves errno as 0.
-  ASSERT_EQ(0, errno);
-  ASSERT_EQ(closedir(d), 0);
-
-  CheckProcSelf(name_set);
-}
-// ARC MOD BEGIN
-#endif
-// ARC MOD END
-
-TEST(dirent, rewinddir) {
-  // ARC MOD BEGIN
-  // nonsfi_loader does not support getdents IRT and it aborts.
-  // TODO(crbug.com/468208): Consider supporting it.
-#if !defined(BARE_METAL_BIONIC)
-  // ARC MOD END
-  DIR* d = opendir("/proc/self");
-  ASSERT_TRUE(d != NULL);
-
-  // Get all the names once...
-  std::vector<std::string> pass1;
-  dirent* e;
-  while ((e = readdir(d)) != NULL) {
-    pass1.push_back(e->d_name);
-  }
-
-  // ...rewind...
-  rewinddir(d);
-
-  // ...and get all the names again.
-  std::vector<std::string> pass2;
-  while ((e = readdir(d)) != NULL) {
-    pass2.push_back(e->d_name);
-  }
-
-  ASSERT_EQ(closedir(d), 0);
-
-  // We should have seen the same names in the same order both times.
-  ASSERT_EQ(pass1.size(), pass2.size());
-  for (size_t i = 0; i < pass1.size(); ++i) {
-    ASSERT_EQ(pass1[i], pass2[i]);
-  }
-  // ARC MOD BEGIN
-#endif
-  // ARC MOD END
-}
diff --git a/mods/graphics_translation/egl/api_entries.cpp b/mods/graphics_translation/egl/api_entries.cpp
index deb6e48..f35a4cb 100644
--- a/mods/graphics_translation/egl/api_entries.cpp
+++ b/mods/graphics_translation/egl/api_entries.cpp
@@ -909,13 +909,34 @@
 void eglBeginFrame(EGLDisplay dpy, EGLSurface surface) {
   EGL_API_ENTRY("%p, %p", dpy, surface);
   EglDisplayImpl* display = EglDisplayImpl::GetDisplay(dpy);
-  if (!display->IsInitialized()) {
+  if (display == NULL || display->IsInitialized() == false) {
+    SetError(EGL_BAD_DISPLAY);
     return;
   }
-  SurfacePtr sfc = display->GetSurfaces().Get(surface);
-  if (sfc) {
-    sfc->BeginFrame();
+  SurfacePtr s = display->GetSurfaces().Get(surface);
+  if (!s) {
+    SetError(EGL_BAD_SURFACE);
+    return;
   }
+  const int64_t timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+  s->SetTimestamp(timestamp);
+}
+
+EGLBoolean eglPresentationTimeANDROID(EGLDisplay dpy, EGLSurface surface,
+                                      EGLnsecsANDROID time) {
+  EGL_API_ENTRY("%p, %p, %lld", dpy, surface, time);
+  EglDisplayImpl* display = EglDisplayImpl::GetDisplay(dpy);
+  if (display == NULL || display->IsInitialized() == false) {
+    SetError(EGL_BAD_DISPLAY);
+    return EGL_FALSE;
+  }
+  SurfacePtr s = display->GetSurfaces().Get(surface);
+  if (!s) {
+    SetError(EGL_BAD_SURFACE);
+    return EGL_FALSE;
+  }
+  s->SetTimestamp(time);
+  return EGL_TRUE;
 }
 
 EGLuint64NV eglGetSystemTimeNV() {
@@ -928,12 +949,6 @@
   return seconds_to_nanoseconds(1);
 }
 
-EGLBoolean eglPresentationTimeANDROID(EGLDisplay dpy, EGLSurface surface,
-                                      EGLnsecsANDROID time) {
-  LOG_ALWAYS_FATAL("Unimplemented");
-  return EGL_FALSE;
-}
-
 EGLint eglDupNativeFenceFDANDROID(EGLDisplay dpy, EGLSyncKHR sync) {
   LOG_ALWAYS_FATAL("Unimplemented");
   return 0;
diff --git a/mods/graphics_translation/egl/egl_surface_impl.h b/mods/graphics_translation/egl/egl_surface_impl.h
index 0133e5a..107a2fe 100644
--- a/mods/graphics_translation/egl/egl_surface_impl.h
+++ b/mods/graphics_translation/egl/egl_surface_impl.h
@@ -46,6 +46,7 @@
   virtual void BindTexImage() {}
   virtual void EnsureBufferReady() {}
   virtual void SetSwapInterval(int interval) {}
+  virtual void SetTimestamp(int64_t time) {}
   virtual EGLBoolean SwapBuffers() { return EGL_TRUE; }
 
   EGLSurface GetKey() const { return key_; }
diff --git a/mods/graphics_translation/egl/egl_window_surface_impl.cpp b/mods/graphics_translation/egl/egl_window_surface_impl.cpp
index d46dfd6..71475aa 100644
--- a/mods/graphics_translation/egl/egl_window_surface_impl.cpp
+++ b/mods/graphics_translation/egl/egl_window_surface_impl.cpp
@@ -123,9 +123,8 @@
   return EGL_TRUE;
 }
 
-void EglWindowSurfaceImpl::BeginFrame() {
-  int64_t timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
-  native_window_set_buffers_timestamp(android_window_, timestamp);
+void EglWindowSurfaceImpl::SetTimestamp(int64_t time) {
+  native_window_set_buffers_timestamp(android_window_, time);
 }
 
 void EglWindowSurfaceImpl::EnsureBufferReady() {
diff --git a/mods/graphics_translation/egl/egl_window_surface_impl.h b/mods/graphics_translation/egl/egl_window_surface_impl.h
index 379b1d3..fac0390 100644
--- a/mods/graphics_translation/egl/egl_window_surface_impl.h
+++ b/mods/graphics_translation/egl/egl_window_surface_impl.h
@@ -36,7 +36,7 @@
   virtual EGLBoolean SwapBuffers();
 
   // Add a timestamp to the underlying window buffer.
-  virtual void BeginFrame();
+  virtual void SetTimestamp(int64_t time);
 
   // Specify the swap interval for the underlying window buffer.
   virtual void SetSwapInterval(int interval);
diff --git a/src/build/DEPS.arc-int b/src/build/DEPS.arc-int
index 1073262..9350882 100644
--- a/src/build/DEPS.arc-int
+++ b/src/build/DEPS.arc-int
@@ -1 +1 @@
-48478db6615ed7ddda84b5745ca9fe97bf2f4175
+8633cfcb3b1fbddc15918c778f22615828b4813d
diff --git a/src/build/configure.py b/src/build/configure.py
index 0110e0f..41bf943 100755
--- a/src/build/configure.py
+++ b/src/build/configure.py
@@ -39,7 +39,8 @@
   hooks = {
       'pre-push': os.path.join(script_dir, 'git_pre_push.py'),
       'prepare-commit-msg': os.path.join(script_dir, 'git_prepare_commit.py'),
-      'commit-msg': staging.as_staging('gerrit/commit-msg')}
+      'commit-msg': 'third_party/gerrit/commit-msg',
+  }
   obsolete_hooks = ['pre-commit']  # Replaced by pre-push hook.
 
   git_hooks_dir = os.path.join(build_common.get_arc_root(), '.git', 'hooks')
diff --git a/src/build/download_common.py b/src/build/download_common.py
deleted file mode 100644
index a033c40..0000000
--- a/src/build/download_common.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import os
-import sys
-import subprocess
-import tempfile
-import urllib
-
-import build_common
-from util import file_util
-
-
-class BaseGetAndUnpackArchiveFromURL(object):
-  """Handles downloading and extracting a package from a URL."""
-
-  # Override these in a derived class
-  NAME = None
-  DEPS_FILE = None
-  FINAL_DIR = None
-  STAGE_DIR = None
-  DOWNLOAD_NAME = None
-
-  @classmethod
-  def _unpack_update(cls, download_file):
-    raise NotImplementedError('Please implement this in a derived class.')
-
-  @classmethod
-  def _gsretrieve(cls, url, download_file):
-    try:
-      cmd = [build_common.get_gsutil_executable(), 'cp', url, download_file]
-      subprocess.check_call(cmd)
-    except subprocess.CalledProcessError:
-      logging.error('Cannot download ' + url + '. '
-                    'Did you make sure to run prodaccess?')
-      sys.exit(1)
-
-  @classmethod
-  def _fetch_and_stage_update(cls, url):
-    """Downloads an update file to a temp directory, and manages replacing the
-    final directory with the stage directory contents."""
-
-    result = True
-    try:
-      tmp_dir = tempfile.mkdtemp(suffix='.tmp', prefix=cls.DOWNLOAD_NAME)
-      try:
-        file_util.rmtree(cls.STAGE_DIR, ignore_errors=True)
-        os.mkdir(cls.STAGE_DIR)
-
-        download_file = os.path.join(tmp_dir, cls.DOWNLOAD_NAME)
-        if url.startswith('gs://'):
-          cls._gsretrieve(url, download_file)
-        else:
-          urllib.urlretrieve(url, download_file)
-
-        cls._unpack_update(download_file)
-
-        file_util.rmtree(cls.FINAL_DIR, ignore_errors=True)
-        os.rename(cls.STAGE_DIR, cls.FINAL_DIR)
-      finally:
-        file_util.rmtree(cls.STAGE_DIR, ignore_errors=True)
-        file_util.rmtree(tmp_dir, ignore_errors=True)
-    except Exception as e:
-      print e
-      result = False
-    return result
-
-  @classmethod
-  def post_update_work(cls):
-    """Override in derived classes to perform additional work after downloading
-    and unpacking the download."""
-    return True
-
-  @classmethod
-  def check_and_perform_update(cls):
-    """Checks the current and dependency stamps, and performs the update if
-    they are different."""
-
-    deps_file = file_util.read_metadata_file(cls.DEPS_FILE)
-    url = deps_file[0]
-    stamp_file = build_common.StampFile(
-        ','.join(deps_file), os.path.join(cls.FINAL_DIR, 'URL'))
-    if stamp_file.is_up_to_date():
-      return True
-
-    print 'INFO: Updating %s...' % cls.NAME
-    if not cls._fetch_and_stage_update(url):
-      print 'Failed to update %s.' % cls.NAME
-      return False
-
-    stamp_file.update()
-    result = cls.post_update_work()
-    print 'INFO: Done'
-    return result
diff --git a/src/build/download_cts_files.py b/src/build/download_cts_files.py
index 9c9a327..482d5df 100755
--- a/src/build/download_cts_files.py
+++ b/src/build/download_cts_files.py
@@ -5,50 +5,24 @@
 # found in the LICENSE file.
 
 import argparse
-import os
-import os.path
-import subprocess
 import sys
 
-import build_common
-import download_common
-
-
-_ROOT_DIR = build_common.get_arc_root()
-
-
-class BaseAndroidCTSDownload(download_common.BaseGetAndUnpackArchiveFromURL):
-  """Handles syncing a pre-built Android CTS zip file package."""
-
-  @classmethod
-  def _unpack_update(cls, download_file):
-    subprocess.check_call(['unzip', '-d', cls.STAGE_DIR, download_file])
-
-
-class AndroidCTSBaseFiles(BaseAndroidCTSDownload):
-  """The full ready-built .apk files and .xml files describing the tests."""
-  NAME = 'Android CTS'
-  DEPS_FILE = os.path.join(_ROOT_DIR, 'src', 'build', 'DEPS.android-cts')
-  FINAL_DIR = os.path.join(_ROOT_DIR, 'third_party', 'android-cts')
-  STAGE_DIR = os.path.join(_ROOT_DIR, 'third_party', 'android-cts.bak')
-  DOWNLOAD_NAME = 'cts.zip'
-
-
-class AndroidCTSMediaFiles(BaseAndroidCTSDownload):
-  """Approx 1Gb of data specific to the media tests."""
-  NAME = 'Android CTS Media'
-  DEPS_FILE = os.path.join(_ROOT_DIR, 'src', 'build', 'DEPS.android-cts-media')
-  FINAL_DIR = os.path.join(_ROOT_DIR, 'third_party', 'android-cts-media')
-  STAGE_DIR = os.path.join(_ROOT_DIR, 'third_party', 'android-cts-media.bak')
-  DOWNLOAD_NAME = 'cts_media.zip'
+from util import download_package_util
 
 
 def check_and_perform_updates(include_media=False):
-  success = True
-  success &= AndroidCTSBaseFiles.check_and_perform_update()
+  # Downloads the pre-built CTS packages and .xml files.
+  download_package_util.BasicCachedPackage(
+      'src/build/DEPS.android-cts',
+      'third_party/android-cts'
+  ).check_and_perform_update()
+
   if include_media:
-    success &= AndroidCTSMediaFiles.check_and_perform_update()
-  return not success
+    # Approx 1Gb of data specific to the media tests.
+    download_package_util.BasicCachedPackage(
+        'src/build/DEPS.android-cts-media',
+        'third_party/android-cts-media'
+    ).check_and_perform_update()
 
 
 def main():
@@ -60,7 +34,7 @@
 
   args = parser.parse_args()
 
-  return check_and_perform_updates(include_media=args.include_media)
+  check_and_perform_updates(include_media=args.include_media)
 
 
 if __name__ == '__main__':
diff --git a/src/build/download_naclports_files.py b/src/build/download_naclports_files.py
index c3518df..02803f6 100755
--- a/src/build/download_naclports_files.py
+++ b/src/build/download_naclports_files.py
@@ -4,37 +4,20 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import os
-import os.path
-import subprocess
 import sys
 
-import build_common
-import download_common
-
-
-_ROOT_DIR = build_common.get_arc_root()
-
-
-class NaClPortsDownload(download_common.BaseGetAndUnpackArchiveFromURL):
-  """Handles syncing a pre-built python for NaCl zip file package."""
-  NAME = 'naclports python'
-  DEPS_FILE = os.path.join(_ROOT_DIR, 'src', 'build', 'DEPS.naclports-python')
-  FINAL_DIR = os.path.join(_ROOT_DIR, 'out', 'naclports-python')
-  STAGE_DIR = os.path.join(_ROOT_DIR, 'out', 'naclports-python.bak')
-  DOWNLOAD_NAME = 'python.zip'
-
-  @classmethod
-  def _unpack_update(cls, download_file):
-    subprocess.check_call(['unzip', '-d', cls.STAGE_DIR, download_file])
+from util import download_package_util
 
 
 def check_and_perform_updates():
-  return not NaClPortsDownload.check_and_perform_update()
+  download_package_util.BasicCachedPackage(
+      'src/build/DEPS.naclports-python',
+      'out/naclports-python'
+  ).check_and_perform_update()
 
 
 def main():
-  return check_and_perform_updates()
+  check_and_perform_updates()
 
 
 if __name__ == '__main__':
diff --git a/src/build/download_sdk_and_ndk.py b/src/build/download_sdk_and_ndk.py
index 64a20cd..2b9daa4 100755
--- a/src/build/download_sdk_and_ndk.py
+++ b/src/build/download_sdk_and_ndk.py
@@ -4,150 +4,149 @@
 # found in the LICENSE file.
 
 import argparse
-import io
 import logging
 import os
 import select
 import subprocess
 import sys
 
-import build_common
-import download_common
 import toolchain
-from util import file_util
-
-_ROOT_DIR = build_common.get_arc_root()
+from util import download_package_util
+from util import nonblocking_io
 
 
-class BaseAndroidCompressedTarDownload(
-    download_common.BaseGetAndUnpackArchiveFromURL):
-  """Handle syncing Android source code packages in compressed tar forms."""
-
-  @classmethod
-  def _unpack_update(cls, download_file):
-    subprocess.check_call(['tar', '--extract',
-                           '--use-compress-program=' + cls.COMPRESSION_PROGRAM,
-                           '--directory=' + cls.STAGE_DIR,
-                           '--strip-components=1',
-                           '--file=' + download_file])
+# TODO(lpique): This code really needs to use or otherwise be unified with
+# filtered_subprocess.py
+def _process_sdk_update_output_fragment(process, fragment):
+  # Look for the last newline, and split there
+  if '\n' in fragment:
+    completed, remaining = fragment.rsplit('\n', 1)
+    if completed:
+      sys.stdout.write(completed + '\n')
+  else:
+    remaining = fragment
+  if remaining.startswith('Do you accept the license '):
+    sys.stdout.write(remaining)
+    process.stdin.write('y\n')
+    remaining = ''
+  return remaining
 
 
-class AndroidNDKFiles(BaseAndroidCompressedTarDownload):
-  """The Android NDK."""
-  NAME = 'Android NDK'
-  DEPS_FILE = os.path.join(_ROOT_DIR, 'src', 'build', 'DEPS.ndk')
-  FINAL_DIR = os.path.join(_ROOT_DIR, 'third_party', 'ndk')
-  STAGE_DIR = os.path.join(_ROOT_DIR, 'third_party', 'ndk.bak')
-  DOWNLOAD_NAME = 'ndk.tar.bz2'
-  COMPRESSION_PROGRAM = 'pbzip2'
+# TODO(lpique): This code really needs to use or otherwise be unified with
+# filtered_subprocess.py
+def accept_android_license_subprocess(args):
+  logging.info('accept_android_license_subprocess: %s', args)
+  p = subprocess.Popen(
+      args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE)
+  stdout = nonblocking_io.LineReader(p.stdout)
+  stderr = nonblocking_io.LineReader(p.stderr)
+  current_line = ''
+  while True:
+    select_streams = []
+    if not stdout.closed:
+      select_streams.append(stdout)
+    if not stderr.closed:
+      select_streams.append(stderr)
+    rset = []
+    if select_streams:
+      rset, _, _ = select.select(select_streams, [], [])
+
+    for stream in rset:
+      new_fragment = os.read(stream.fileno(), 4096)
+      if not new_fragment:
+        stream.close()
+        continue
+      current_line = _process_sdk_update_output_fragment(
+          p, current_line + new_fragment)
+    if p.poll() is not None:
+      while not stdout.closed:
+        stdout.read_full_line()
+      while not stderr.closed:
+        stderr.read_full_line()
+      break
+  if p.wait() != 0:
+    raise subprocess.CalledProcessError(p.returncode, args)
 
 
-class AndroidSDKFiles(BaseAndroidCompressedTarDownload):
+class AndroidSDKFiles(download_package_util.BasicCachedPackage):
   """The Android SDK."""
-  NAME = 'Android SDK'
-  DEPS_FILE = os.path.join(_ROOT_DIR, 'src', 'build', 'DEPS.android-sdk')
-  FINAL_DIR = os.path.join(_ROOT_DIR, 'third_party', 'android-sdk')
-  STAGE_DIR = os.path.join(_ROOT_DIR, 'third_party', 'android-sdk.bak')
-  DOWNLOAD_NAME = 'sdk.tgz'
-  COMPRESSION_PROGRAM = 'pigz'
-  # This tag is used for downloading the default version, which may be newer
-  # than the pinned version defined in toolchain.py.
-  SDK_BUILD_TOOLS_TAG = 'Android SDK Build-tools'
 
-  @classmethod
-  def post_update_work(cls):
-    api_tag = file_util.read_metadata_file(cls.DEPS_FILE)[1]
-    android_tool = os.path.join(cls.FINAL_DIR, 'tools', 'android')
-    packages = subprocess.Popen([android_tool, 'list', 'sdk'],
-                                stdout=subprocess.PIPE).communicate()[0]
-    filters = ['platform-tools']
-    for line in packages.split('\n'):
-      if api_tag in line or cls.SDK_BUILD_TOOLS_TAG in line:
-        ind = line.find('-')
-        if ind > 0:
-          filters.append(line[:ind].strip())
-    assert len(filters) >= 3, 'No "%s" or "%s" packages found' % (
-        api_tag, cls.SDK_BUILD_TOOLS_TAG)
+  _SDK_TOOLS_ID = 'tools'
+  _SDK_PLATFORM_TOOLS_ID = 'platform-tools'
 
-    return AndroidSDKFiles._update_sdk(android_tool, filters)
+  def __init__(self, *args, **kwargs):
+    super(AndroidSDKFiles, self).__init__(*args, **kwargs)
+    self.android_tool = os.path.join(
+        self.unpacked_linked_cache_path, 'tools', 'android')
 
-  @staticmethod
-  def _update_sdk(android_tool, filters, extra_args=None):
-    args = [android_tool, 'update', 'sdk', '--no-ui',
-            '--filter', ','.join(filters)]
-    if extra_args:
-      args.extend(extra_args)
+  def _update_component_by_id(self, update_component_ids):
+    if not update_component_ids:
+      return
 
-    p = subprocess.Popen(
-        args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-        stderr=subprocess.PIPE)
-    p.stdout = AndroidSDKFiles._reopen_without_buffering(p.stdout)
-    p.stderr = AndroidSDKFiles._reopen_without_buffering(p.stderr)
-    streams = [p.stdout, p.stderr]
-    current_line = ''
-    while True:
-      rset, _, _ = select.select([p.stdout, p.stderr], [], [])
-      for stream in streams:
-        if stream not in rset:
-          continue
-        new_fragment = os.read(stream.fileno(), 4096)
-        if not new_fragment:
-          stream.close()
-          continue
-        current_line = AndroidSDKFiles._process_sdk_update_output_fragment(
-            p, current_line + new_fragment)
-      if p.poll() is not None:
-        break
-    if p.wait() != 0:
-      raise subprocess.CalledProcessError(p.returncode, args)
+    logging.info('Updating Android SDK components: %s',
+                 ','.join(update_component_ids))
+    accept_android_license_subprocess([
+        self.android_tool, 'update', 'sdk', '--all', '--no-ui', '--filter',
+        ','.join(update_component_ids)])
 
-    return True
+    # Ensure the final directory properly links to the cache.
+    self.populate_final_directory()
 
-  @staticmethod
-  def _process_sdk_update_output_fragment(p, fragment):
-    # Look for the last newline, and split there
-    if '\n' in fragment:
-      completed, remaining = fragment.rsplit('\n', 1)
-      if completed:
-        sys.stdout.write(completed + '\n')
-    else:
-      remaining = fragment
-    if remaining.startswith('Do you accept the license '):
-      sys.stdout.write(remaining)
-      p.stdin.write('y\n')
-      remaining = ''
-    return remaining
+  def post_update_work(self):
+    """Perform some one time work after the SDK is first downloaded."""
+    # Perform a self-update on the SDK tools, to ensure we have the latest
+    # version. We do this update before downloading any other components so that
+    # the tools are up to date for even doing that fetch.
+    self._update_component_by_id([AndroidSDKFiles._SDK_TOOLS_ID])
 
-  @staticmethod
-  def _reopen_without_buffering(stream):
-    if not stream:
-      return None
-    new_stream = io.open(os.dup(stream.fileno()), mode='rb', buffering=0)
-    stream.close()
-    return new_stream
+  def _check_platform_tools_update(self, update_component_ids):
+    """Checks and performs update for the platform-tools."""
+    platform_tools_dir = os.path.join(
+        self.unpacked_linked_cache_path, 'build-tools')
+    if not os.path.exists(platform_tools_dir):
+      update_component_ids.append(AndroidSDKFiles._SDK_PLATFORM_TOOLS_ID)
 
-  @classmethod
-  def check_and_perform_pinned_build_tools_update(cls):
+  def _check_sdk_platform_update(self, update_component_ids):
+    """Checks and performs update for the sdk platform."""
+    pinned_version = toolchain.get_android_sdk_build_tools_pinned_version()
+    pinned_id = 'android-' + pinned_version.split('.')[0]
+    pinned_dir = os.path.join(
+        self.unpacked_linked_cache_path, 'platforms', pinned_id)
+    if not os.path.exists(pinned_dir):
+      update_component_ids.append(pinned_id)
+
+  def _check_pinned_build_tools_update(self, update_component_ids):
     """Checks and performs update for the pinned build-tools."""
     pinned_version = toolchain.get_android_sdk_build_tools_pinned_version()
     pinned_id = 'build-tools-' + pinned_version
-    pinned_dir = os.path.join(cls.FINAL_DIR, 'build-tools', pinned_version)
+    pinned_dir = os.path.join(
+        self.unpacked_linked_cache_path, 'build-tools', pinned_version)
     if not os.path.exists(pinned_dir):
-      android_tool = os.path.join(cls.FINAL_DIR, 'tools', 'android')
-      filters = [pinned_id]
-      # Add --all so that the bulid tools package is selected even if it's
-      # obsolete or newer than the installed version.
-      extra_args = ['--all']
-      return AndroidSDKFiles._update_sdk(android_tool, filters, extra_args)
-    return True
+      update_component_ids.append(pinned_id)
+
+  def check_and_perform_component_updates(self):
+    update_component_ids = []
+    self._check_platform_tools_update(update_component_ids)
+    self._check_sdk_platform_update(update_component_ids)
+    self._check_pinned_build_tools_update(update_component_ids)
+    self._update_component_by_id(update_component_ids)
 
 
 def check_and_perform_updates(include_media=False):
-  success = True
-  success &= AndroidNDKFiles.check_and_perform_update()
-  success &= AndroidSDKFiles.check_and_perform_update()
-  success &= AndroidSDKFiles.check_and_perform_pinned_build_tools_update()
-  return not success
+  download_package_util.BasicCachedPackage(
+      'src/build/DEPS.ndk',
+      'third_party/ndk',
+      unpack_method=download_package_util.unpack_tar_archive('pbzip2')
+  ).check_and_perform_update()
+
+  sdk = AndroidSDKFiles(
+      'src/build/DEPS.android-sdk',
+      'third_party/android-sdk',
+      unpack_method=download_package_util.unpack_tar_archive('pigz')
+  )
+  sdk.check_and_perform_update()
+  sdk.check_and_perform_component_updates()
 
 
 def main():
@@ -156,7 +155,7 @@
   args = parser.parse_args(sys.argv[1:])
   if args.verbose:
     logging.getLogger().setLevel(logging.INFO)
-  return check_and_perform_updates()
+  check_and_perform_updates()
 
 
 if __name__ == '__main__':
diff --git a/src/build/file_list_cache_test.py b/src/build/file_list_cache_test.py
index 6f32df5..f72078c 100755
--- a/src/build/file_list_cache_test.py
+++ b/src/build/file_list_cache_test.py
@@ -6,12 +6,14 @@
 
 """Unittest for file_list_cache.py."""
 
+import atexit
 import os
 import re
 import tempfile
 import unittest
 
 import file_list_cache
+from util import file_util
 
 
 def _touch(path):
@@ -32,7 +34,9 @@
 
 class FileListCacheUnittest(unittest.TestCase):
   def setUp(self):
-    os.chdir(tempfile.mkdtemp())
+    tmpdir = tempfile.mkdtemp()
+    os.chdir(tmpdir)
+    atexit.register(lambda: file_util.rmtree(tmpdir, ignore_errors=True))
 
     os.makedirs('foo/bar/baz')
     _touch('foo/bar/baz/hoge.cc')
diff --git a/src/build/interleaved_perftest.py b/src/build/interleaved_perftest.py
index 39a9182..c20bc1a 100755
--- a/src/build/interleaved_perftest.py
+++ b/src/build/interleaved_perftest.py
@@ -106,7 +106,8 @@
       '--confidence-level', type=int, metavar='<%>', default=90,
       help='Confidence level of confidence intervals.')
   compare_parser.add_argument(
-      '--launch-chrome-opt', action='append', default=[], metavar='OPTIONS',
+      '--launch-chrome-opt', action='append',
+      default=['--enable-nacl-list-mappings'], metavar='OPTIONS',
       help=('An Option to pass on to launch_chrome. Repeat as needed for any '
             'options to pass on.'))
   compare_parser.add_argument(
@@ -522,7 +523,7 @@
       significance = '[++]'
     else:
       significance = '[not sgfnt.]'
-    print '     %s: expt=%.0f%s, ctrl=%.0f%s, diffCI=(%+.0f%s,%+.0f%s) %s' % (
+    print '     %s: ctrl=%.0f%s, expt=%.0f%s, diffCI=(%+.0f%s,%+.0f%s) %s' % (
         prefix,
         ctrl_median, unit,
         expt_median, unit,
diff --git a/src/build/launch_chrome.py b/src/build/launch_chrome.py
index 3fcef56..063c266 100755
--- a/src/build/launch_chrome.py
+++ b/src/build/launch_chrome.py
@@ -7,7 +7,6 @@
 import atexit
 import logging
 import os
-import re
 import signal
 import subprocess
 import sys
@@ -19,7 +18,6 @@
 import launch_chrome_options
 import prep_launch_chrome
 import toolchain
-import util.statistics
 from build_options import OPTIONS
 from util import chrome_process
 from util import file_util
@@ -29,6 +27,7 @@
 from util import platform_util
 from util import remote_executor
 from util import signal_util
+from util import startup_stats
 from util.minidump_filter import MinidumpFilter
 from util.output_handler import AtfTestHandler
 from util.output_handler import ArcStraceFilter
@@ -126,111 +125,6 @@
     _USER_DATA_DIR = build_common.get_chrome_default_user_data_dir()
 
 
-class StartupStats:
-  STAT_VARS = ['pre_plugin_time_ms',
-               'pre_embed_time_ms',
-               'plugin_load_time_ms',
-               'on_resume_time_ms',
-               'app_virt_mem',
-               'app_res_mem']
-  DERIVED_STAT_VARS = ['boot_time_ms']
-  ALL_STAT_VARS = STAT_VARS + DERIVED_STAT_VARS
-
-  def __init__(self, num_runs=1):
-    self.num_runs = num_runs
-    for name in StartupStats.STAT_VARS:
-      setattr(self, name, None)
-
-    self.pre_plugin_perf_message_pattern = re.compile(
-        r'W/libplugin.*Time spent before plugin: (\d+)ms = (\d+)ms \+ (\d+)ms')
-    self.start_message_pattern = re.compile(
-        (r'\d+\.\d+s \+ (\d+\.\d+)s = \d+\.\d+s '
-         '\(\+(\d+)M virt\, \+(\d+)M res.*\): '
-         'Activity onResume .*'))
-
-  def check(self):
-    if not self.is_complete():
-      raise Exception('Not all stats were collected')
-
-  def is_complete(self):
-    if any(getattr(self, num) is None for num in StartupStats.STAT_VARS):
-      return False
-    return True
-
-  def parse_pre_plugin_perf_message(self, line):
-    # We use re.search instead of re.match to work around stdout mixing.
-    match = self.pre_plugin_perf_message_pattern.search(line)
-    if match:
-      print line
-      self.pre_plugin_time_ms = int(match.group(1))
-      self.pre_embed_time_ms = int(match.group(2))
-      self.plugin_load_time_ms = int(match.group(3))
-      return True
-    return False
-
-  def parse_app_start_message(self, line):
-    if self.on_resume_time_ms is not None:
-      return  # Ignore subsequent messages
-    # We use re.search instead of re.match to work around stdout mixing.
-    match = self.start_message_pattern.search(line)
-    if match:
-      self.on_resume_time_ms = int(float(match.group(1)) * 1000)
-      self.app_virt_mem = int(match.group(2))
-      self.app_res_mem = int(match.group(3))
-      return True
-    return False
-
-  @property
-  def boot_time_ms(self):
-    return self.pre_plugin_time_ms + self.on_resume_time_ms
-
-  def PrintRawStats(self):
-    rawstats = {key: [getattr(self, key)] for key in StartupStats.ALL_STAT_VARS}
-    print ('VRAWPERF=%s' % rawstats)
-
-  def PrintDetailedStats(self):
-    rawstats = {key: [] for key in StartupStats.ALL_STAT_VARS}
-    for num in StartupStats.ALL_STAT_VARS:
-      for run in getattr(self, 'raw'):
-        rawstats[num].append(getattr(run, num))
-      unit = 'ms' if num.endswith('_ms') else 'MB'
-      val = getattr(self, num)
-      p90 = getattr(self, num + '_90')
-      print ('VPERF=%s: %.2f%s 90%%=%.2f' %
-             (num, val, unit, p90))
-    print ('VRAWPERF=%s' % rawstats)
-
-  def Print(self):
-    # Note: since each value is the median for each data set, they are not
-    # guaranteed to add up.
-    print ('\nPERF=boot:%dms (preEmbed:%dms + pluginLoad:%dms + onResume:%dms),'
-           '\n     virt:%dMB, res:%dMB, runs:%d\n' % (
-               self.boot_time_ms,
-               self.pre_embed_time_ms,
-               self.plugin_load_time_ms,
-               self.on_resume_time_ms,
-               self.app_virt_mem,
-               self.app_res_mem,
-               self.num_runs))
-
-  @staticmethod
-  def compute_stats(stat_list):
-    # Skip incomplete stats (probably crashed during this run).  We collect
-    # enough runs to make up for an occasional missed run.
-    stat_list = filter(lambda s: s.is_complete(), stat_list)
-
-    result = StartupStats(len(stat_list))
-    setattr(result, 'raw', stat_list)
-    for num in StartupStats.ALL_STAT_VARS:
-      values = [getattr(s, num) for s in stat_list]
-      percentiles = util.statistics.compute_percentiles(values, (50, 90))
-
-      # Report median and 90th percentile.
-      setattr(result, num, percentiles[0])
-      setattr(result, num + '_90', percentiles[1])
-    return result
-
-
 def set_environment_for_chrome():
   # Prevent GTK from attempting to move the menu bar, which prints many warnings
   # about undefined symbol "menu_proxy_module_load"
@@ -252,8 +146,7 @@
 def _run_chrome_iterations(parsed_args):
   if not parsed_args.no_cache_warming:
     _maybe_wait_iteration_lock(parsed_args)
-    stats = StartupStats()
-    _run_chrome(parsed_args, stats, cache_warming=True)
+    stats = _run_chrome(parsed_args, cache_warming=True)
     if parsed_args.mode == 'perftest':
       total = (stats.pre_embed_time_ms + stats.plugin_load_time_ms +
                stats.on_resume_time_ms)
@@ -261,24 +154,22 @@
                                      stats.pre_embed_time_ms,
                                      stats.plugin_load_time_ms,
                                      stats.on_resume_time_ms)
-      stats.PrintRawStats()
+      startup_stats.print_raw_stats(stats)
       sys.stdout.flush()
 
   if parsed_args.iterations > 0:
     stat_list = []
     for i in xrange(parsed_args.iterations):
       _maybe_wait_iteration_lock(parsed_args)
-      stats = StartupStats()
-      sys.stderr.write('\nStarting Chrome, test run #%s\n' %
-                       (len(stat_list) + 1))
-      _run_chrome(parsed_args, stats)
-      stats.PrintRawStats()
+
+      sys.stderr.write('\nStarting Chrome, test run #%s\n' % (i + 1))
+      stats = _run_chrome(parsed_args)
+      startup_stats.print_raw_stats(stats)
       sys.stdout.flush()
       stat_list.append(stats)
-    stats = StartupStats.compute_stats(stat_list)
-    if stats.num_runs:
-      stats.PrintDetailedStats()
-    stats.Print()
+
+    startup_stats.print_aggregated_stats(stat_list)
+    sys.stdout.flush()
 
 
 def _check_apk_existence(parsed_args):
@@ -704,7 +595,7 @@
   chrome.wait(_CHROME_KILL_TIMEOUT)
 
 
-def _run_chrome(parsed_args, stats, **kwargs):
+def _run_chrome(parsed_args, **kwargs):
   if parsed_args.logcat is not None:
     # adb process will be terminated in the atexit handler, registered
     # in the signal_util.setup().
@@ -730,6 +621,7 @@
   with open(_CHROME_PID_PATH, 'w') as pid_file:
     pid_file.write('%d\n' % p.pid)
 
+  stats = startup_stats.StartupStats()
   output_handler = _select_output_handler(parsed_args, stats, p, **kwargs)
 
   # Wait for the process to finish or us to be interrupted.
@@ -743,6 +635,8 @@
   if status_code:
     sys.exit(status_code)
 
+  return stats
+
 
 if __name__ == '__main__':
   sys.exit(main())
diff --git a/src/build/run_integration_tests.py b/src/build/run_integration_tests.py
index c4e8ee4..6c98d5f 100755
--- a/src/build/run_integration_tests.py
+++ b/src/build/run_integration_tests.py
@@ -19,7 +19,6 @@
 
 import argparse
 import collections
-import logging
 import multiprocessing
 import os
 import subprocess
@@ -472,7 +471,8 @@
   if args.plan_report:
     util.test.suite_results.initialize(test_driver_list, args, False)
     suite_results.report_expected_results(
-        driver.scoreboard for driver in test_driver_list)
+        driver.scoreboard for driver in sorted(test_driver_list,
+                                               key=lambda driver: driver.name))
     return 0
   elif args.list:
     list_fully_qualified_test_names(
diff --git a/src/build/sync_nacl_sdk.py b/src/build/sync_nacl_sdk.py
index 1538092..3c56957 100755
--- a/src/build/sync_nacl_sdk.py
+++ b/src/build/sync_nacl_sdk.py
@@ -6,116 +6,42 @@
 # Syncs the nacl sdk at a pinned version given in NACLSDK.json
 
 import argparse
-import filecmp
 import logging
 import os
-import shutil
-import subprocess
 import sys
-import time
 import urllib
 
 import build_common
-from util import file_util
 from util import logging_util
+from util import download_package_util
 
 
-_ROOT_DIR = build_common.get_arc_root()
-_NACL_SDK_DIR = os.path.join(_ROOT_DIR, 'third_party', 'nacl_sdk')
-_STAMP_PATH = os.path.join(_NACL_SDK_DIR, 'STAMP')
-_PINNED_MANIFEST = os.path.join(_ROOT_DIR, 'src', 'build', 'DEPS.naclsdk')
+_DEPS_FILE_PATH = 'src/build/DEPS.naclsdk'
 _NACL_MIRROR = 'https://commondatastorage.googleapis.com/nativeclient-mirror'
 _LATEST_MANIFEST_URL = _NACL_MIRROR + '/nacl/nacl_sdk/naclsdk_manifest2.json'
 _NACL_SDK_ZIP_URL = _NACL_MIRROR + '/nacl/nacl_sdk/nacl_sdk.zip'
 
 
-def _log_check_call(log_function, *args, **kwargs):
-  """Log each line of output from a command.
-
-  Args:
-    log_function: Function to call to log.
-    *args: Ordered args.
-    **kwargs: Keyword args.
-  """
-  p = subprocess.Popen(
-      *args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
-  for line in p.stdout:
-    log_function(line.rstrip())
-  return_code = p.wait()
-  if return_code:
-    # Unlike subprocess.check_call, as we do not use 'args' kw-arg in this
-    # module, we do not check it.
-    cmd = args[0]
-    raise subprocess.CalledProcessError(return_code, cmd)
-
-
-def _roll_forward_pinned_manifest():
+@build_common.with_retry_on_exception
+def roll_pinned_manifest_forward():
   """Roll forward the pinned manifest to the latest version."""
-  logging.info('Rolling forward the pinned NaCl manifest...')
-
-  @build_common.with_retry_on_exception
-  def retrieve_manifest():
-    urllib.urlretrieve(_LATEST_MANIFEST_URL, _PINNED_MANIFEST)
-  retrieve_manifest()
+  logging.info('Rolling forward the pinned NaCl manifest.')
+  urllib.urlretrieve(_LATEST_MANIFEST_URL, _DEPS_FILE_PATH)
   logging.info('Done.')
 
 
-def _should_delete_nacl_sdk():
-  """Returns True if the SDK tree should be deleted."""
-  if not os.path.exists(_STAMP_PATH):
-    return False
-  # Returns true if _PINNED_MANIFEST is modified. This is necessary because
-  # './naclsdk update' does nothing when _PINNED_MANIFEST is reverted back
-  # to an older revision. We use filecmp.cmp() rather than parsing the manifest
-  # file. Since deleting the SDK is relatively cheap, and updating the SDK is
-  # as slow as installing it from scratch, just comparing files would be okay.
-  return not filecmp.cmp(_PINNED_MANIFEST, _STAMP_PATH)
-
-
-def _ensure_naclsdk_downloaded():
-  """Downloads the naclsdk script if necessary."""
-  if (not _should_delete_nacl_sdk() and
-      os.path.exists(os.path.join(_NACL_SDK_DIR, 'naclsdk'))):
-    return
-
-  # Deleting the obsolete SDK tree usually takes only <1s.
-  logging.info('Deleting old NaCl SDK...')
-  file_util.rmtree(_NACL_SDK_DIR, ignore_errors=True)
-
-  # Download sdk zip if needed. The zip file only contains a set of Python
-  # scripts that download the actual SDK. This step usually takes only <1s.
-  logging.info('Downloading nacl_sdk.zip...')
-  zip_content = build_common.download_content(_NACL_SDK_ZIP_URL)
-  # The archived path starts with nacl_sdk/, so we inflate the contents
-  # into the one level higher directory.
-  file_util.inflate_zip(zip_content, os.path.dirname(_NACL_SDK_DIR))
-  os.chmod(os.path.join(_NACL_SDK_DIR, 'naclsdk'), 0700)
-
-
-def _update_nacl_sdk():
-  """Syncs the NaCL SDK. based on pinned manifest."""
-
-  # In ./naclsdk execution, it sometimes fails due to the server-side or
-  # network errors. So, here we retry on failure sometimes.
+class NaClSDKFiles(download_package_util.BasicCachedPackage):
+  """Handles syncing the NaCl SDK."""
   @build_common.with_retry_on_exception
-  def internal():
-    start = time.time()
-    logging.info('Updating NaCl SDK...')
-    _log_check_call(
-        logging.info,
-        ['./naclsdk', 'update', '-U', 'file://' + _PINNED_MANIFEST,
-         '--force', 'pepper_canary'],
-        cwd=_NACL_SDK_DIR)
-    elapsed_time = time.time() - start
-    if elapsed_time > 1:
-      print 'NaCl SDK update took %0.3fs' % elapsed_time
-    logging.info('Done. [%fs]' % elapsed_time)
-  return internal()
-
-
-def _update_stamp():
-  """Update a stamp file for build tracking."""
-  shutil.copyfile(_PINNED_MANIFEST, _STAMP_PATH)
+  def post_update_work(self):
+    # Update based on pinned manifest. This part can be as slow as 1-2 minutes
+    # regardless of whether it is a fresh install or an update.
+    logging.info('%s: Updating naclsdk using manifest.', self.name)
+    download_package_util.execute_subprocess([
+        './naclsdk', 'update', '-U',
+        'file://' + os.path.join(build_common.get_arc_root(),
+                                 _DEPS_FILE_PATH),
+        '--force', 'pepper_canary'], cwd=self.unpacked_linked_cache_path)
 
 
 def main(args):
@@ -127,13 +53,15 @@
                       'latest..')
   args = parser.parse_args(args)
   logging_util.setup(verbose=args.verbose)
-
   if args.roll:
-    _roll_forward_pinned_manifest()
+    roll_pinned_manifest_forward()
 
-  _ensure_naclsdk_downloaded()
-  _update_nacl_sdk()
-  _update_stamp()
+  NaClSDKFiles(
+      _DEPS_FILE_PATH,
+      'third_party/nacl_sdk',
+      url=_NACL_SDK_ZIP_URL,
+      link_subdir='nacl_sdk'
+  ).check_and_perform_update()
 
 
 if __name__ == '__main__':
diff --git a/src/build/update_open_source_repo.py b/src/build/update_open_source_repo.py
index 17a6147..8332ad2 100755
--- a/src/build/update_open_source_repo.py
+++ b/src/build/update_open_source_repo.py
@@ -21,7 +21,8 @@
 def _update_submodules(dest):
   logging.info('Submodule update')
   subprocess.check_call(['git', 'submodule', 'sync'], cwd=dest)
-  subprocess.check_call(['git', 'submodule', 'update', '--init'], cwd=dest)
+  subprocess.check_call(['git', 'submodule', 'update', '--init', '--force'],
+                        cwd=dest)
 
 
 def _clone_repo_if_needed(dest):
@@ -97,9 +98,13 @@
 def _reset_and_clean_repo(dest):
   logging.info('Resetting local open source repository')
   subprocess.check_call(['git', 'reset', '--hard'], cwd=dest)
+  subprocess.check_call(['git', 'submodule', 'foreach',
+                         'git', 'reset', '--hard'], cwd=dest)
   logging.info('Clearing untracked files from repository')
   # -f -f is intentional, this will get rid of untracked modules left behind.
   subprocess.check_call(['git', 'clean', '-f', '-f', '-d'], cwd=dest)
+  subprocess.check_call(['git', 'submodule', 'foreach',
+                         'git', 'clean', '-f', '-f', '-d', '-x'], cwd=dest)
 
 
 def _validate_args(args):
diff --git a/src/build/util/download_package_util.py b/src/build/util/download_package_util.py
new file mode 100644
index 0000000..a2f1dc3
--- /dev/null
+++ b/src/build/util/download_package_util.py
@@ -0,0 +1,377 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions for downloading and unpacking archives, with caching."""
+
+import contextlib
+import hashlib
+import json
+import logging
+import os
+import platform
+import re
+import shutil
+import stat
+import subprocess
+import tempfile
+import time
+import urllib
+
+import build_common
+import file_util
+
+
+# TODO(lpique): Remove this code. The buildbot should be detected by a command
+# line flag so that we do not have to update the regular expression below as our
+# build infrastructure changes.
+def _is_running_on_bulidbot():
+  return bool(re.match('(build[0-9]+-a75.*)|'
+                       '(slave[0-9]+-c[0-9]+.*)', platform.node()))
+
+
+# TODO(lpique): Remove this code. The path should be passed in by a command line
+# argument.
+_DEFAULT_CACHE_BASE_PATH = (
+    os.path.join(build_common.get_arc_root(), 'cache')
+    if not _is_running_on_bulidbot() else
+    '/b/build/slave/cache_dir/arc_downloads')
+_DEFAULT_CACHE_HISTORY_SIZE = 3
+
+
+class CacheHistory(object):
+  """Interface for the working with the history of a particular package."""
+
+  def __init__(self, name, base_path, history_size, contents):
+    self._name = name
+    self._base_path = base_path
+    self._history_size = history_size
+    self._contents = contents
+
+  def clean_old(self):
+    """Cleans out the least-recently used entries, deleting cache paths."""
+    while len(self._contents) > self._history_size:
+      path = self._contents.pop(0)
+      assert path.startswith(self._base_path)
+      logging.info('%s: Cleaning old cache entry %s', self._name,
+                   os.path.basename(path))
+      shutil.rmtree(path, ignore_errors=True)
+
+  def ensure_recent(self, path):
+    """Ensures the path is moved to a recently-used position in the history."""
+    if path in self._contents:
+      self._contents.remove(path)
+    self._contents.append(path)
+
+
+@contextlib.contextmanager
+def _persisted_cache_history(name, base_path, history_size):
+  """Persists the cache history using a context."""
+
+  # Ensure we have a cache directory
+  file_util.makedirs_safely(base_path)
+  cache_contents_path = os.path.join(base_path, 'contents.json')
+
+  # Load in the existing cache content history.
+  cache_contents = {}
+  if os.path.exists(cache_contents_path):
+    with open(cache_contents_path) as cache_contents_file:
+      try:
+        cache_contents = json.load(cache_contents_file)
+      except ValueError:
+        pass
+
+  # Get the history for this particular download, and yield it for use by the
+  # caller.
+  history = CacheHistory(
+      name, base_path, history_size,
+      cache_contents.setdefault('cache', {}).setdefault(name, []))
+
+  # If the user of this contextmanager generates an exception, this yield
+  # will effectively reraise the exception, and the rest of this function will
+  # not be executed since we do not have anything like a  try...finally block
+  # here.
+  yield history
+
+  history.clean_old()
+
+  # Save out the modified cache content history.
+  with open(cache_contents_path, 'w') as cache_contents_file:
+    json.dump(cache_contents, cache_contents_file, indent=2, sort_keys=True)
+
+
+def execute_subprocess(cmd, cwd=None):
+  """Executes a subprocess, logging its output.
+
+  Since logging.info() is used if the process runs normally, the subprocess is
+  run quietly. However should the process exit with a non-zero error code, its
+  output will be logged at a higher error level, allowing problems to be
+  diagnosed.
+  """
+  try:
+    output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
+    for line in output.splitlines():
+      logging.info(line)
+  except subprocess.CalledProcessError as e:
+    logging.error('While running %s%s', cmd, (' in ' + cwd) if cwd else '')
+    if e.output:
+      logging.error(e.output)
+    raise
+
+
+def default_download_url():
+  """Creates a closure for downloading a file given a standard URL for it."""
+  def _download(url, destination_path):
+    urllib.urlretrieve(url, destination_path)
+  return _download
+
+
+def gsutil_download_url():
+  """Creates a closure for downloading a Google Cloud Storage file."""
+  def _download(url, destination_path):
+    execute_subprocess([
+        build_common.get_gsutil_executable(), 'cp', url, destination_path])
+  return _download
+
+
+def unpack_zip_archive(extra_args=None):
+  """Creates a closure which performs a simple unzip of an archive file."""
+  def _unpack(archive_path, destination_path):
+    args = ['unzip']
+    if extra_args:
+      args.extend(extra_args)
+    args.extend(['-d', destination_path, archive_path])
+    execute_subprocess(args)
+  return _unpack
+
+
+def unpack_tar_archive(compression_program=None):
+  """Creates a closure which performs a simple untar of an archive file."""
+  def _unpack(archive_path, destination_path):
+    cmd = ['tar', '--extract']
+    if compression_program:
+      cmd.append('--use-compress-program=' + compression_program)
+    cmd.extend(['--directory=' + destination_path, '--strip-components=1',
+                '--file=' + archive_path])
+    execute_subprocess(cmd)
+  return _unpack
+
+
+def unpack_self_extracting_archive():
+  """Creates a closure which unpacks a self-extracting archive."""
+  def _unpack(archive_path, destination_path):
+    os.chmod(archive_path, stat.S_IRWXU)
+    execute_subprocess([archive_path], cwd=destination_path)
+  return _unpack
+
+
+class BasicCachedPackage(object):
+  """Handles downloading and extracting a package from a URL."""
+
+  def __init__(self, deps_file_path, unpacked_final_path, url=None,
+               link_subdir=None, download_method=None, unpack_method=None,
+               cache_base_path=None, cache_history_size=None):
+    """Sets up the basic configuration for this package.
+
+    |deps_file_path| is the relative path to the DEPS.XXXX file to use for this
+    package.
+    |unpacked_final_path| is the path the unpacked package should appear at.
+    |url| is the URL to use to retrieve the download. If not specified (the
+    typical case), the URL is taken from the first line of the DEPS file.
+    |link_subdir| is the subdirectory of the unpacked package from the cache
+    that should appear at the final location. This is useful if the archive
+    unpacks to a subdirectory.
+    |download_method| is a function to call taking a pair of arguments, (URL,
+    archive_path), which should retrieve the package given its URL, and write
+    the contents as a file to archive_path.
+    |unpack_method| is a function to call taking a pair of arguments,
+    (archive_path, destination_path), to extract the archive file to the
+    indicated destination.
+    |cache_base_path| allows a derived class to choose the cache path
+    explicitly, but is really only meant for the unittest.
+    |cache_history_size| allows a derived class to choose the cache history
+    size, but it is really only meant for the unittest.
+    """
+    self._name = os.path.basename(unpacked_final_path)
+    self._cache_base_path = cache_base_path or _DEFAULT_CACHE_BASE_PATH
+    self._cache_history_size = cache_history_size or _DEFAULT_CACHE_HISTORY_SIZE
+    self._deps_file_path = os.path.join(
+        build_common.get_arc_root(), deps_file_path)
+    self._unpacked_final_path = os.path.join(
+        build_common.get_arc_root(), unpacked_final_path)
+    self._link_subdir = link_subdir or '.'
+    self._download_method = download_method or default_download_url()
+    self._unpack_method = unpack_method or unpack_zip_archive()
+    self._deps_file_lines = file_util.read_metadata_file(deps_file_path)
+    self._url = url or self._deps_file_lines[0]
+    self._unpacked_cache_path = (
+        self._get_cache_entry_path(self._deps_file_lines))
+
+  @property
+  def name(self):
+    """The name to use to identify the package."""
+    return self._name
+
+  @property
+  def cache_base_path(self):
+    """The base path to use for the cache."""
+    return self._cache_base_path
+
+  @property
+  def unpacked_final_path(self):
+    """The path to the final location for the package."""
+    return self._unpacked_final_path
+
+  @property
+  def unpacked_cache_path(self):
+    """The path to the location in the cache to unpack the package to."""
+    return self._unpacked_cache_path
+
+  @property
+  def unpacked_linked_cache_path(self):
+    return os.path.abspath(os.path.join(
+        self._unpacked_cache_path, self._link_subdir))
+
+  def _get_stampfile_content(self):
+    return ','.join(self._deps_file_lines)
+
+  def post_update_work(self):
+    """Override in derived classes to perform additional work after downloading
+    and unpacking the download."""
+    pass
+
+  def _get_cache_entry_path(self, deps_file_lines):
+    return os.path.join(self.cache_base_path, '%s.%s' % (
+        self.name,
+        hashlib.sha1(','.join(deps_file_lines)).hexdigest()[:7]))
+
+  @build_common.with_retry_on_exception
+  def _download_package_with_retries(self, url, download_package_path):
+    self._download_method(url, download_package_path)
+
+  def _fetch_and_cache_package(self):
+    """Downloads an update file to a temp directory, and manages replacing the
+    final directory with the stage directory contents."""
+    try:
+      # Clean out the cache unpack location.
+      logging.info('%s: Cleaning %s', self._name, self._unpacked_cache_path)
+      file_util.rmtree(self._unpacked_cache_path, ignore_errors=True)
+      file_util.makedirs_safely(self._unpacked_cache_path)
+
+      # Setup the temporary location for the download.
+      tmp_dir = tempfile.mkdtemp()
+      try:
+        downloaded_package_path = os.path.join(tmp_dir, self._name)
+
+        # Download the package.
+        logging.info('%s: Downloading %s', self._name,
+                     downloaded_package_path)
+        self._download_package_with_retries(self._url, downloaded_package_path)
+
+        # Unpack it.
+        logging.info('%s: Unpacking %s to %s', self._name,
+                     downloaded_package_path, self._unpacked_cache_path)
+        self._unpack_method(downloaded_package_path,
+                            self._unpacked_cache_path)
+      finally:
+        file_util.rmtree(tmp_dir, ignore_errors=True)
+    except:
+      file_util.rmtree(self._unpacked_cache_path, ignore_errors=True)
+      raise
+
+  def populate_final_directory(self):
+    """Sets up the final location for the download from the cache."""
+    logging.info('%s: Setting up %s from cache %s', self._name,
+                 self._unpacked_final_path, self.unpacked_linked_cache_path)
+
+    file_util.makedirs_safely(self._unpacked_final_path)
+
+    # We create a directory, and make symbolic links for the first level
+    # of contents for backwards compatibility with an older version of
+    # this code, which could only handle FINAL_DIR being a directory.
+    for child in os.listdir(self.unpacked_linked_cache_path):
+      file_util.create_link(
+          os.path.join(self._unpacked_final_path, child),
+          os.path.join(self.unpacked_linked_cache_path, child),
+          overwrite=True)
+
+  # TODO(2015-03-20): This is here for backwards compatibility with previous
+  # code which unpacked directly to the FINAL_DIR (no cache), and should be
+  # able to be removed after this timestamp.  Worst case the download has to
+  # be re-downloaded into the cache directory when it is removed.
+  def _populate_cache_from_non_symlinked_files(self, history):
+    final_url_path = os.path.join(self._unpacked_final_path, 'URL')
+    # See if there is an existing URL file
+    if not os.path.isfile(final_url_path):
+      return
+
+    # Read the content of the URL file in the subdirectory to figure out
+    # how to move it into the cache (the DEPS hash may not match!)
+    url_file_content = file_util.read_metadata_file(final_url_path)
+    cache_path = self._get_cache_entry_path(url_file_content)
+    cache_link = os.path.abspath(os.path.join(cache_path, self._link_subdir))
+
+    # Ensure that this cache path is in our history as the most recent entry.
+    history.ensure_recent(cache_path)
+
+    # If there appears to be something already cached, then we do not need to do
+    # anything.
+    if os.path.isdir(cache_path):
+      return
+
+    # Move the existing unpacked download into the cache directory
+    file_util.makedirs_safely(os.path.dirname(cache_link))
+    os.rename(self._unpacked_final_path, cache_link)
+
+  def check_and_perform_update(self):
+    """Checks the current and dependency stamps, and performs the update if
+    they are different."""
+    start = time.time()
+
+    with _persisted_cache_history(self._name, self._cache_base_path,
+                                  self._cache_history_size) as history:
+      # Maintain a recent used history of entries for this path.
+      history.ensure_recent(self._unpacked_cache_path)
+
+      # Temporarily populate the cache path from the final path, if we do not
+      # already have the contents cached.
+      self._populate_cache_from_non_symlinked_files(history)
+
+      logging.info('%s: Checking %s', self._name, self._unpacked_final_path)
+      stamp_file = build_common.StampFile(
+          self._get_stampfile_content(),
+          os.path.join(self._unpacked_final_path, 'URL'))
+      if stamp_file.is_up_to_date():
+        logging.info('%s: %s is up to date', self._name,
+                     self._unpacked_final_path)
+        return
+
+      logging.info('%s: %s is out of date', self._name,
+                   self._unpacked_final_path)
+      file_util.rmtree(self._unpacked_final_path, ignore_errors=True)
+
+      cached_stamp_file = build_common.StampFile(
+          self._get_stampfile_content(),
+          os.path.join(self.unpacked_linked_cache_path, 'URL'))
+      if not cached_stamp_file.is_up_to_date():
+        self._fetch_and_cache_package()
+
+        # We do this now so that the post_update_work step can run out of
+        # FINAL_DIR if it wants to.
+        self.populate_final_directory()
+
+        # Do any extra work needed after unpacking the package.
+        self.post_update_work()
+
+        # Write out the updated stamp file
+        cached_stamp_file.update()
+
+      # Ensure the final directory properly links to the cache.
+      self.populate_final_directory()
+
+    total_time = time.time() - start
+    if total_time > 1:
+      print '%s update took %0.3fs' % (
+          self._name[:-5] if self._name.endswith('Files') else self._name,
+          total_time)
+    logging.info('%s: Done. [%0.3fs]', self._name, total_time)
diff --git a/src/build/util/download_package_util_test.py b/src/build/util/download_package_util_test.py
new file mode 100755
index 0000000..ee7ce32
--- /dev/null
+++ b/src/build/util/download_package_util_test.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for download_package_util."""
+
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+from util import download_package_util
+
+
+class NoVersionFileError(Exception):
+  pass
+
+
+class TestPackageStub(download_package_util.BasicCachedPackage):
+  """Records information about what functions were called."""
+  def __init__(self, mock, deps_path, final_path, cache_base_path,
+               url=None, link_subdir=None):
+    super(TestPackageStub, self).__init__(
+        deps_path, final_path, url=url, link_subdir=link_subdir,
+        unpack_method=self.unpack_update, cache_base_path=cache_base_path,
+        cache_history_size=3)
+    self.mock = mock
+
+  def _download_package_with_retries(self, url, download_package_path):
+    return self.mock.retrieve(url, download_package_path)
+
+  def unpack_update(self, download_file, unpack_path):
+    return self.mock.unpack_update(download_file, unpack_path)
+
+  def post_update_work(self):
+    return self.mock.post_update_work(self.unpacked_cache_path)
+
+
+class NoUpdateMock(object):
+  def __init__(self, test):
+    self._test = test
+
+  def retrieve(self, url, download_file):
+    self._test.fail('Unexpected call to retrieve()')
+
+  def unpack_update(self, download_file, unpack_path):
+    self._test.fail('Unexpected call to unpack_update()')
+
+  def post_update_work(self, cache_path):
+    self._test.fail('Unexpected call to post_update_work()')
+
+
+class DownloadFailedMock(object):
+  def __init__(self, test):
+    self._test = test
+
+  def retrieve(self, url, download_file):
+    assert False
+
+  def unpack_update(self, download_file, unpack_path):
+    self._test.fail('Unexpected call to unpack_update()')
+
+  def post_update_work(self, cache_path):
+    self._test.fail('Unexpected call to post_update_work()')
+
+
+class UpdateMock(object):
+  def __init__(self, test, version, url, link_subdir=None):
+    self._test = test
+    self.retrieved = False
+    self.unpacked = False
+    self.post_update = False
+    self.version = version or 'unknown'
+    self.link_subdir = link_subdir
+    self.url = url
+
+  def retrieve(self, url, download_file):
+    self._test.assertFalse(self.retrieved)
+    self._test.assertEqual(url, self.url)
+    self.retrieved = True
+
+  def unpack_update(self, download_file, unpack_path):
+    self._test.assertFalse(self.unpacked)
+    self.unpacked = True
+    os.makedirs(os.path.join(unpack_path, self.link_subdir))
+
+  def post_update_work(self, cache_path):
+    self._test.assertFalse(self.post_update)
+    self.post_update = True
+
+
+class DownloadPackageUtilTest(unittest.TestCase):
+  def setUp(self):
+    logging.basicConfig(level=logging.DEBUG)
+    self._deps_file = os.path.join(tempfile.mkdtemp(), 'DEPS.testing')
+    self._final_dir = tempfile.mkdtemp()
+    self._cache_base_path = tempfile.mkdtemp()
+
+  def tearDown(self):
+    # clean up temporary files
+    shutil.rmtree(os.path.dirname(self._deps_file), ignore_errors=True)
+    shutil.rmtree(self._final_dir, ignore_errors=True)
+    shutil.rmtree(self._cache_base_path, ignore_errors=True)
+
+  def _setup_deps(self, version):
+    with open(self._deps_file, 'w') as f:
+      f.write(version or 'unknown')
+
+  def _create_stub(self, mock, url=None, link_subdir=None):
+    self._stub = TestPackageStub(
+        mock, self._deps_file, self._final_dir, self._cache_base_path, url=url,
+        link_subdir=link_subdir)
+    return self._stub
+
+  def _setup_cache(self, version):
+    version = version or 'unknown'
+    cache_path = self._stub._get_cache_entry_path([version])
+    os.makedirs(os.path.join(cache_path, self._stub._link_subdir))
+    with open(os.path.join(cache_path, self._stub._link_subdir, 'URL'),
+              'w') as f:
+      f.write(version)
+
+  def _setup_final(self, version):
+    version = version or 'unknown'
+    with open(os.path.join(self._stub.unpacked_final_path, 'URL'), 'w') as f:
+      f.write(version)
+
+  def _check_cache(self, version):
+    version = version or 'unknown'
+    cache_path = self._stub._get_cache_entry_path([version])
+    url_path = os.path.join(cache_path, self._stub._link_subdir, 'URL')
+    if not os.path.isfile(url_path):
+      raise NoVersionFileError(url_path)
+    with open(url_path) as f:
+      return version == f.read().strip()
+
+  def _check_final(self, version):
+    version = version or 'unknown'
+    url_path = os.path.join(self._stub.unpacked_final_path, 'URL')
+    if not os.path.isfile(url_path):
+      raise NoVersionFileError(url_path)
+    with open(url_path) as f:
+      return version == f.read().strip()
+
+  def test_no_update_needed(self):
+    self._setup_deps('v1')
+    stub = self._create_stub(NoUpdateMock(self), link_subdir='sub')
+    self._setup_cache('v1')
+
+    stub.check_and_perform_update()
+
+  def test_cache_populated_from_final(self):
+    self._setup_deps('v1')
+    stub = self._create_stub(NoUpdateMock(self), link_subdir='sub')
+    self._setup_final('v1')
+
+    stub.check_and_perform_update()
+    self.assertTrue(self._check_cache('v1'))
+
+  def test_final_populated_from_cache(self):
+    self._setup_deps('v2')
+    stub = self._create_stub(NoUpdateMock(self), link_subdir='sub')
+    self._setup_final('v1')
+    self._setup_cache('v2')
+
+    stub.check_and_perform_update()
+    self.assertTrue(self._check_final('v2'))
+
+  def test_cache_and_final_not_populated_from_failed_download(self):
+    mock = DownloadFailedMock(self)
+    self._setup_deps('v2')
+    stub = self._create_stub(mock, link_subdir='sub')
+    self._setup_final('v1')
+
+    self.assertRaises(AssertionError, stub.check_and_perform_update)
+    self.assertRaises(NoVersionFileError, self._check_cache, 'v2')
+    self.assertRaises(NoVersionFileError, self._check_final, 'v2')
+    self.assertRaises(NoVersionFileError, self._check_final, 'v1')
+
+  def test_cache_and_final_populated_from_download(self):
+    url = 'http://example.com/test_download.zip'
+    mock = UpdateMock(self, 'v1', url, link_subdir='sub')
+    self._setup_deps('v1')
+    stub = self._create_stub(mock, url=url, link_subdir='sub')
+
+    stub.check_and_perform_update()
+
+    self.assertTrue(mock.retrieved)
+    self.assertTrue(mock.unpacked)
+    self.assertTrue(mock.post_update)
+    self.assertTrue(self._check_cache('v1'))
+    self.assertTrue(self._check_final('v1'))
+
+  def test_cache_files_limited_correctly(self):
+    def _rollTo(version):
+      mock = UpdateMock(self, version, version, link_subdir='sub')
+      self._setup_deps(version)
+      stub = self._create_stub(mock, link_subdir='sub')
+      stub.check_and_perform_update()
+
+    def _rollToCached(version):
+      mock = NoUpdateMock(self)
+      self._setup_deps(version)
+      stub = self._create_stub(mock, link_subdir='sub')
+      stub.check_and_perform_update()
+
+    _rollTo('v1')
+    self.assertTrue(self._check_cache('v1'))
+    self.assertTrue(self._check_final('v1'))
+
+    _rollTo('v2')
+    self.assertTrue(self._check_cache('v1'))
+    self.assertTrue(self._check_cache('v2'))
+    self.assertTrue(self._check_final('v2'))
+
+    _rollTo('v3')
+    self.assertTrue(self._check_cache('v1'))
+    self.assertTrue(self._check_cache('v2'))
+    self.assertTrue(self._check_cache('v3'))
+    self.assertTrue(self._check_final('v3'))
+
+    _rollTo('v4')
+    self.assertRaises(NoVersionFileError, self._check_cache, 'v1')
+    self.assertTrue(self._check_cache('v2'))
+    self.assertTrue(self._check_cache('v3'))
+    self.assertTrue(self._check_cache('v4'))
+    self.assertTrue(self._check_final('v4'))
+
+    _rollToCached('v2')
+    self.assertRaises(NoVersionFileError, self._check_cache, 'v1')
+    self.assertTrue(self._check_cache('v2'))
+    self.assertTrue(self._check_cache('v3'))
+    self.assertTrue(self._check_cache('v4'))
+    self.assertTrue(self._check_final('v2'))
+
+    _rollTo('v5')
+    self.assertRaises(NoVersionFileError, self._check_cache, 'v1')
+    self.assertTrue(self._check_cache('v2'))
+    self.assertRaises(NoVersionFileError, self._check_cache, 'v3')
+    self.assertTrue(self._check_cache('v4'))
+    self.assertTrue(self._check_cache('v5'))
+    self.assertTrue(self._check_final('v5'))
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/build/util/jdb_util.py b/src/build/util/jdb_util.py
index ffbfcc6..2a795dd 100644
--- a/src/build/util/jdb_util.py
+++ b/src/build/util/jdb_util.py
@@ -3,15 +3,23 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import os
 import re
 import staging
 import subprocess
 import sys
-import time
 
 import eclipse_connector
 
 
+# The list of Java source file root paths.
+# TODO(crbug.com/470798): Find proper paths.
+_JAVA_SOURCE_PATHS = (
+    'android/libcore/luni/src/main/java',
+    'android/frameworks/base/core/java',
+)
+
+
 def maybe_launch_jdb(jdb_port, jdb_type):
   # If jdb option is specified and jdb_port exists. Now it is time to
   # check which Java debugger to start.
@@ -23,7 +31,7 @@
 
 class JdbHandlerAdapter(object):
   _WAITING_JDB_CONNECTION_PATTERN = re.compile(
-      r'Waiting for JDWP connection on port (\d+)')
+      r'Hello ARC, start jdb please at port (\d+)')
 
   def __init__(self, base_handler, jdb_port, jdb_type):
     self._base_handler = base_handler
@@ -37,18 +45,19 @@
     self._base_handler.handle_stdout(line)
 
   def _start_emacsclient_jdb(self):
-    source_path = ':'.join([
-        staging.as_staging('android/frameworks/base/core/java/'),
-        # Add the real paths too to let emacs know these paths too are
-        # candidates for setting breakpoints etc.
-        './mods/android/frameworks/base/core/java/',
-        './third_party/android/frameworks/base/core/java/'])
-    command = ['emacsclient', '-e',
-               ('(jdb "jdb -attach localhost:%i '
-                '-sourcepath%s ")') % (self._jdb_port, source_path)]
-    # TODO(crbug.com/469037): Try to wait until JDWP port is
-    # really available. There should be a better way?
-    time.sleep(0.6)
+    source_paths = []
+    for path in _JAVA_SOURCE_PATHS:
+      source_paths.extend([
+          staging.as_staging(path),
+          # Add the real paths too to let emacs know these paths too are
+          # candidates for setting breakpoints etc.
+          os.path.join('./mods', path),
+          os.path.join('./third_party', path),
+      ])
+    command = [
+        'emacsclient', '-e',
+        '(jdb "jdb -attach localhost:%d -sourcepath%s")' %
+        (self._jdb_port, ':'.join(source_paths))]
     subprocess.Popen(command)
 
   def handle_stderr(self, line):
diff --git a/src/build/util/output_handler.py b/src/build/util/output_handler.py
index ffaddfd..e1db2b8 100644
--- a/src/build/util/output_handler.py
+++ b/src/build/util/output_handler.py
@@ -248,6 +248,16 @@
     return False
 
 
+_PRE_PLUGIN_PERF_MESSAGE_PATTERN = re.compile(
+    r'W/libplugin.*Time spent before plugin: '
+    r'(?P<pre_plugin>\d+)ms = (?P<pre_embed>\d+)ms \+ (?P<plugin_load>\d+)ms')
+
+_START_MESSAGE_PATTERN = re.compile(
+    r'\d+\.\d+s \+ (?P<on_resume>\d+\.\d+)s = \d+\.\d+s '
+    r'\(\+(?P<virt_mem>\d+)M virt, \+(?P<res_mem>\d+)M res.*\): '
+    r'Activity onResume .*')
+
+
 class PerfTestHandler(object):
   def __init__(self, parsed_args, stats, chrome_process, cache_warming=False):
     self.parsed_args = parsed_args
@@ -308,10 +318,10 @@
 
   def handle_stderr(self, line):
     self._handle_line_common(line)
-    if self.stats.parse_pre_plugin_perf_message(line):
+    if self._parse_pre_plugin_perf_message(line):
       return
 
-    if self.stats.parse_app_start_message(line):
+    if self._parse_app_start_message(line):
       dash_line = '--------------------------------'
       sys.stderr.write(dash_line + '\n')
       sys.stderr.write(line)
@@ -329,6 +339,32 @@
         return
       self._finish()
 
+  def _parse_pre_plugin_perf_message(self, line):
+    # We use re.search instead of re.match to work around stdout mixing.
+    match = _PRE_PLUGIN_PERF_MESSAGE_PATTERN.search(line)
+    if not match:
+      return False
+
+    self.stats.pre_plugin_time_ms = int(match.group('pre_plugin'))
+    self.stats.pre_embed_time_ms = int(match.group('pre_embed'))
+    self.stats.plugin_load_time_ms = int(match.group('plugin_load'))
+    return True
+
+  def _parse_app_start_message(self, line):
+    if self.stats.on_resume_time_ms is not None:
+      # If the message is already found, ignore subsequent messages.
+      return False
+
+    # We use re.search instead of re.match to work around stdout mixing.
+    match = _START_MESSAGE_PATTERN.search(line)
+    if not match:
+      return False
+
+    self.stats.on_resume_time_ms = int(float(match.group('on_resume')) * 1000)
+    self.stats.app_virt_mem = int(match.group('virt_mem'))
+    self.stats.app_res_mem = int(match.group('res_mem'))
+    return True
+
   def _finish(self):
     if not self.any_errors:
       print '[  PASSED  ]'
diff --git a/src/build/util/startup_stats.py b/src/build/util/startup_stats.py
new file mode 100644
index 0000000..fdd1d75
--- /dev/null
+++ b/src/build/util/startup_stats.py
@@ -0,0 +1,89 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from util import statistics
+
+_RAW_STAT_VARS = ['pre_plugin_time_ms',
+                  'pre_embed_time_ms',
+                  'plugin_load_time_ms',
+                  'on_resume_time_ms',
+                  'app_virt_mem',
+                  'app_res_mem']
+_DERIVED_STAT_VARS = ['boot_time_ms']
+_ALL_STAT_VARS = _RAW_STAT_VARS + _DERIVED_STAT_VARS
+
+
+class StartupStats:
+  def __init__(self):
+    for name in _RAW_STAT_VARS:
+      setattr(self, name, None)
+
+  def is_complete(self):
+    """Returns True when all variables are assigned."""
+    return all(getattr(self, name) is not None
+               for name in _RAW_STAT_VARS)
+
+  @property
+  def boot_time_ms(self):
+    return self.pre_plugin_time_ms + self.on_resume_time_ms
+
+
+def _build_raw_stats(stats_list):
+  """Builds a dict from stat key to a list of stat values."""
+  raw_stats = collections.defaultdict(list)
+  for stats in stats_list:
+    assert stats.is_complete()
+    for name in _ALL_STAT_VARS:
+      raw_stats[name].append(getattr(stats, name))
+  return raw_stats
+
+
+def print_raw_stats(stats):
+  """Prints the VRAWPERF= line of the given |stats|."""
+  print 'VRAWPERF=%s' % dict(_build_raw_stats([stats]))
+
+
+def print_aggregated_stats(stats_list):
+  """Prints the aggregated stats of given |stats_list|."""
+  # Skip incomplete stats (probably crashed during this run).  We collect
+  # enough runs to make up for an occasional missed run.
+  stat_list = [stats for stats in stats_list if stats.is_complete()]
+
+  raw_stats = _build_raw_stats(stats_list)
+
+  # Builds a dict from key to (median, 90-percentile).
+  aggregated_stats = {
+      key: statistics.compute_percentiles(value, (50, 90))
+      for key, value in raw_stats.iteritems()
+  }
+
+  # If there is more than 1 stats, print the VPERF= and VRAWPERF= lines.
+  if len(stats_list) > 1:
+    # Print VPERF= lines.
+    for name in _ALL_STAT_VARS:
+      unit = 'ms' if name.endswith('_ms') else 'MB'
+      median, p90 = aggregated_stats[name]
+      print 'VPERF=%(name)s: %(median).2f%(unit)s 90%%=%(p90).2f' % {
+          'name': name,
+          'unit': unit,
+          'median': median,
+          'p90': p90,
+      }
+
+    # Print VRAWPERF= line.
+    print 'VRAWPERF=%s' % dict(raw_stats)
+
+  # Note: since each value is the median for each data set, they are not
+  # guaranteed to add up.
+  print ('\nPERF=boot:%dms (preEmbed:%dms + pluginLoad:%dms + onResume:%dms),'
+         '\n     virt:%dMB, res:%dMB, runs:%d\n' % (
+             aggregated_stats['boot_time_ms'][0],
+             aggregated_stats['pre_embed_time_ms'][0],
+             aggregated_stats['plugin_load_time_ms'][0],
+             aggregated_stats['on_resume_time_ms'][0],
+             aggregated_stats['app_virt_mem'][0],
+             aggregated_stats['app_res_mem'][0],
+             len(stat_list)))
diff --git a/src/build/util/test/suite_results.py b/src/build/util/test/suite_results.py
index ad92292..d0aec2c 100644
--- a/src/build/util/test/suite_results.py
+++ b/src/build/util/test/suite_results.py
@@ -339,7 +339,7 @@
   def _write_list(self, writer, mode, label, tests):
     if len(tests):
       writer.header(mode, '%s (%d)' % (label, len(tests)))
-      writer.write(mode, '%s\n' % ('\n'.join(tests)))
+      writer.write(mode, '%s\n' % ('\n'.join(sorted(tests))))
 
   def _write_single_stat(self, writer, mode, value, label):
     mode = mode if value else _NORMAL
@@ -388,7 +388,8 @@
   def _write_results(self):
     if self._test_driver_list:
       self._writer.header(_INFO, 'Results')
-      for test_driver in self._test_driver_list:
+      for test_driver in sorted(self._test_driver_list,
+                                key=lambda driver: driver.name):
         sb = test_driver.scoreboard
         label = _pretty_label(test_driver.name)
         self._writer.write(_NORMAL, label)
@@ -437,7 +438,7 @@
     for key in _EXPECTED_STATUS_STRING:
       self._reverse_writer.write(
           _STATUS_MODE[key] if counts[key] else _NORMAL,
-          " % 4d %s " % (counts[key], _TERSE_STATUS[key]))
+          " % 5d %s " % (counts[key], _TERSE_STATUS[key]))
     self._writer.write(_STATUS_MODE[status],
                        "[%- 20s]" % _STATUS_STRING[status])
     self._writer.write(_NORMAL, " %s\n" % name)
diff --git a/src/common/irt_wrapper_util.h b/src/common/irt_wrapper_util.h
new file mode 100644
index 0000000..a4aa8f9
--- /dev/null
+++ b/src/common/irt_wrapper_util.h
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Utility macros to create IRT wrappers.
+
+#ifndef COMMON_IRT_WRAPPER_UTIL_H_
+#define COMMON_IRT_WRAPPER_UTIL_H_
+
+// A macro to wrap an IRT function. Note that the macro does not wrap IRT
+// calls made by the Bionic loader. For example, wrapping mmap with DO_WRAP
+// does not hook the mmap IRT calls in phdr_table_load_segments() in
+// mods/android/bionic/linker/linker_phdr.c. This is because the loader has
+// its own set of IRT function pointers that are not visible from non-linker
+// code.
+#define DO_WRAP(name)                                   \
+  __nacl_irt_ ## name ## _real = __nacl_irt_ ## name;   \
+  __nacl_irt_ ## name  = __nacl_irt_ ## name ## _wrap
+
+// A macro to define an IRT wrapper and a function pointer to store
+// the real IRT function. Note that initializing __nacl_irt_<name>_real
+// with __nacl_irt_<name> by default is not a good idea because it requires
+// a static initializer.
+#define IRT_WRAPPER(name, ...)                              \
+  extern int (*__nacl_irt_ ## name)(__VA_ARGS__);           \
+  static int (*__nacl_irt_ ## name ## _real)(__VA_ARGS__);  \
+  int (__nacl_irt_ ## name ## _wrap)(__VA_ARGS__)
+
+#endif  // COMMON_IRT_WRAPPER_UTIL_H_
diff --git a/src/common/tests/nacl_getdents_wrapper.h b/src/common/tests/nacl_getdents_wrapper.h
new file mode 100644
index 0000000..11eb1b7
--- /dev/null
+++ b/src/common/tests/nacl_getdents_wrapper.h
@@ -0,0 +1,78 @@
+// ARC MOD TRACK "third_party/nacl-glibc/sysdeps/nacl/nacl_getdents_wrapper.c"
+// ARC MOD BEGIN
+// We changed the extension of this file from .c to .h so
+// NinjaGenerator does not find this file automatically, and made it
+// C++ so this can be included from unittest_irthook.cc.
+// ARC MOD END
+
+/* The purpose of this file is to be #included by generic readdir
+   implementations.  */
+
+static const int d_name_shift = offsetof (DIRENT_TYPE, d_name) -
+    offsetof (struct nacl_abi_dirent, nacl_abi_d_name);
+
+/* Calls __nacl_irt_getdents and converts resulting buffer to glibc abi.
+   This wrapper is required since glibc abi for DIRENT_TYPE differs from
+   struct nacl_abi_dirent. */
+// ARC MOD BEGIN
+// Do not use glibc specific macros.
+static ssize_t nacl_getdents_wrapper(int fd, char *buf, size_t buf_size)
+// ARC MOD END
+{
+  /* __nacl_irt_getdents fills buffer with overlapped structures
+     nacl_abi_dirent. Right after d_reclen bytes of one structure end the next
+     structure begins, and so on. For example if nacl_abi_dirent contains 14
+     bytes long string in d_name field then it will occupy 10+14 bytes in the
+     buffer. This wrapper fills buf so that every DIRENT_TYPE occupies in it
+     one byte more than corresponding nacl_abi_dirent in buffer filled by nacl
+     syscall. To avoid overwhelming of buf it is necessary to make nacl_buf
+     smaller. It is ok to make nacl_buf_size equal buf_size * 0.9 because
+     minimal size of nacl_abi_dirent is 12 bytes. */
+  int nacl_buf_size = buf_size - buf_size / 10 - 1;
+  char nacl_buf[nacl_buf_size];
+  size_t nbytes;
+  // ARC MOD BEGIN
+  // Add a cast and use __nacl_irt_getdents_real instead of
+  // __nacl_irt_getdents, which is hooked.
+  int rv = __nacl_irt_getdents_real(fd,
+                                    reinterpret_cast<struct dirent*>(nacl_buf),
+                                    nacl_buf_size, &nbytes);
+  // ARC MOD END
+  struct nacl_abi_dirent *nacl_dp;
+  DIRENT_TYPE *dp;
+  size_t nacl_offset = 0;
+  int offset = 0;
+  int d_name_len;
+
+  if (rv > 0)
+    {
+      // ARC MOD BEGIN UPSTREAM nacl-getdents-return
+      errno = rv;
+      return -1;
+      // ARC MOD END UPSTREAM
+    }
+  while (nacl_offset < nbytes)
+    {
+      nacl_dp = (struct nacl_abi_dirent *) (nacl_buf + nacl_offset);
+      dp = (DIRENT_TYPE *) (buf + offset);
+      // ARC MOD BEGIN
+      // Add a cast.
+      if (static_cast<size_t>(offset + nacl_dp->nacl_abi_d_reclen +
+                              d_name_shift) >= buf_size)
+      // ARC MOD END
+        {
+          errno = EINVAL;
+          return -1;
+        }
+      dp->d_ino = nacl_dp->nacl_abi_d_ino;
+      dp->d_off = nacl_dp->nacl_abi_d_off;
+      dp->d_reclen = nacl_dp->nacl_abi_d_reclen + d_name_shift;
+      dp->d_type = 0;
+      d_name_len =  nacl_dp->nacl_abi_d_reclen -
+          offsetof (struct nacl_abi_dirent, nacl_abi_d_name);
+      memcpy (dp->d_name, nacl_dp->nacl_abi_d_name, d_name_len);
+      offset += dp->d_reclen;
+      nacl_offset += nacl_dp->nacl_abi_d_reclen;
+    }
+  return offset;
+}
diff --git a/src/common/tests/unittest_irthook.cc b/src/common/tests/unittest_irthook.cc
new file mode 100644
index 0000000..baa11a8
--- /dev/null
+++ b/src/common/tests/unittest_irthook.cc
@@ -0,0 +1,79 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <dirent.h>
+#include <errno.h>
+#include <irt_syscalls.h>
+#include <nacl_dirent.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "common/alog.h"
+#include "common/irt_wrapper_util.h"
+
+#if defined(__native_client__)
+static ssize_t nacl_getdents_wrapper(int fd, char* buf, size_t buf_size);
+#else
+#include <sys/syscall.h>
+#endif
+
+IRT_WRAPPER(getdents, int fd, struct dirent* ent, size_t count,
+            size_t* nread) {
+#if defined(__native_client__)
+  // NaCl's dirent lacks d_type field, so our getdents implementation
+  // assumes __nacl_irt_getdents is hooked by posix_translation and
+  // returns Bionic's dirent, not NaCl's. See also
+  // bionic/libc/arch-nacl/syscalls/__getdents64.c.
+  //
+  // Due to this reason, our getdents implementation does not work for
+  // __nacl_irt_getdents provided by NaCl's supervisor (e.g., sel_ldr)
+  // for unittests. We convert NaCl's dirent to Bionic's by this
+  // IRT wrapper.
+  ssize_t result = nacl_getdents_wrapper(
+      fd, reinterpret_cast<char*>(ent), count);
+#else
+  // nonsfi_loader does not implement __nacl_irt_getdents, so we call
+  // it directly.
+  ssize_t result = syscall(__NR_getdents64, fd, ent, count);
+#endif
+  if (result < 0)
+    return errno;
+  *nread = result;
+  return 0;
+}
+
+// NaCl IRT does not support O_DIRECTORY. We emulate it by calling
+// fstat for unittests. Production ARC does not have this issue
+// because posix_translation does support O_DIRECTORY.
+IRT_WRAPPER(open, const char* pathname, int oflags, mode_t cmode, int* newfd) {
+  // Do not pass O_DIRECTORY bit. nonsfi_loader on ARM does not
+  // understand ARM's O_DIRECTORY which is different from
+  // NACL_ABI_O_DIRECTORY.
+  int result = __nacl_irt_open_real(pathname, oflags & ~O_DIRECTORY,
+                                    cmode, newfd);
+  if (!result && (oflags & O_DIRECTORY)) {
+    struct stat st;
+    if (fstat(*newfd, &st))
+      LOG_ALWAYS_FATAL("fstat unexpectedly failed");
+    if (!S_ISDIR(st.st_mode)) {
+      if (close(*newfd))
+        LOG_ALWAYS_FATAL("close unexpectedly failed");
+      return ENOTDIR;
+    }
+  }
+  return result;
+}
+
+#if defined(__native_client__)
+// This should be defined after IRT_WRAPPER(getdents) because
+// nacl_getdents_wrapper.h uses __nacl_irt_getdents_real.
+#define DIRENT_TYPE struct dirent
+#include "nacl_getdents_wrapper.h"  // NOLINT
+#endif
+
+void InjectIrtHooks() {
+  DO_WRAP(getdents);
+  DO_WRAP(open);
+}
diff --git a/src/common/tests/unittest_main.cc b/src/common/tests/unittest_main.cc
index 192b830..f4027eb 100644
--- a/src/common/tests/unittest_main.cc
+++ b/src/common/tests/unittest_main.cc
@@ -5,11 +5,15 @@
 #include "common/options.h"
 #include "gtest/gtest.h"
 
+void InjectIrtHooks();
+
 int main(int argc, char **argv) {
   ::testing::InitGoogleTest(&argc, argv);
 
   // Set logging verbosity for unit testing.
   arc::Options::GetInstance()->ParseMinStderrLogPriority("W");
 
+  InjectIrtHooks();
+
   return RUN_ALL_TESTS();
 }
diff --git a/src/posix_translation/file_wrap.cc b/src/posix_translation/file_wrap.cc
index 68c6320..6050679 100644
--- a/src/posix_translation/file_wrap.cc
+++ b/src/posix_translation/file_wrap.cc
@@ -39,6 +39,7 @@
 #include "common/dlfcn_injection.h"
 #include "common/export.h"
 #include "common/file_util.h"
+#include "common/irt_wrapper_util.h"
 #include "common/logd_write.h"
 #include "common/memory_state.h"
 #include "common/options.h"
@@ -48,25 +49,6 @@
 #include "posix_translation/virtual_file_system.h"
 #include "posix_translation/wrap.h"
 
-// A macro to wrap an IRT function. Note that the macro does not wrap IRT
-// calls made by the Bionic loader. For example, wrapping mmap with DO_WRAP
-// does not hook the mmap IRT calls in phdr_table_load_segments() in
-// mods/android/bionic/linker/linker_phdr.c. This is because the loader has
-// its own set of IRT function pointers that are not visible from non-linker
-// code.
-#define DO_WRAP(name)                                   \
-  __nacl_irt_ ## name ## _real = __nacl_irt_ ## name;   \
-  __nacl_irt_ ## name  = __nacl_irt_ ## name ## _wrap
-
-// A macro to define an IRT wrapper and a function pointer to store
-// the real IRT function. Note that initializing __nacl_irt_<name>_real
-// with __nacl_irt_<name> by default is not a good idea because it requires
-// a static initializer.
-#define IRT_WRAPPER(name, ...)                              \
-  extern int (*__nacl_irt_ ## name)(__VA_ARGS__);           \
-  static int (*__nacl_irt_ ## name ## _real)(__VA_ARGS__);  \
-  int (__nacl_irt_ ## name ## _wrap)(__VA_ARGS__)
-
 // A helper macro to show both DIR pointer and its file descriptor in
 // ARC strace.
 #define PRETIFY_DIRP(dirp) (dirp) ? dirfd(dirp) : -1, (dirp)
diff --git a/src/posix_translation/passthrough.cc b/src/posix_translation/passthrough.cc
index 65dce55..d391f88 100644
--- a/src/posix_translation/passthrough.cc
+++ b/src/posix_translation/passthrough.cc
@@ -4,6 +4,9 @@
 
 #include "posix_translation/passthrough.h"
 
+#include <poll.h>
+#include <unistd.h>
+
 #include <string>
 
 #include "common/alog.h"
@@ -152,11 +155,13 @@
 
 bool PassthroughStream::IsSelectReadReady() const {
   ALOG_ASSERT(native_fd_ >= 0);
-  return false;
+  // Let us pretend we can always read from stdin.
+  return (native_fd_ == STDIN_FILENO);
 }
 bool PassthroughStream::IsSelectWriteReady() const {
   ALOG_ASSERT(native_fd_ >= 0);
-  return false;
+  // Let us pretend we can always write to stdout and stderr.
+  return (native_fd_ == STDOUT_FILENO || native_fd_ == STDERR_FILENO);
 }
 bool PassthroughStream::IsSelectExceptionReady() const {
   ALOG_ASSERT(native_fd_ >= 0);
@@ -165,7 +170,9 @@
 
 int16_t PassthroughStream::GetPollEvents() const {
   ALOG_ASSERT(native_fd_ >= 0);
-  return 0;
+  return ((IsSelectReadReady() ? POLLIN : 0) |
+          (IsSelectWriteReady() ? POLLOUT : 0) |
+          (IsSelectExceptionReady() ? POLLERR : 0));
 }
 
 size_t PassthroughStream::GetSize() const {
diff --git a/src/posix_translation/passthrough_test.cc b/src/posix_translation/passthrough_test.cc
index 4ac41ff..d41e61a 100644
--- a/src/posix_translation/passthrough_test.cc
+++ b/src/posix_translation/passthrough_test.cc
@@ -4,6 +4,7 @@
 
 #include <elf.h>  // For ELFMAG
 #include <fcntl.h>
+#include <poll.h>
 #include <sys/ioctl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
@@ -83,4 +84,51 @@
   EXPECT_EQ(new_flag, DoFcntl(stream, F_GETFL));
 }
 
+TEST_F(PassthroughTest, TestSelectStdin) {
+  PassthroughHandler handler;
+  scoped_refptr<FileStream> stream =
+      handler.open(STDIN_FILENO, "", O_RDONLY, 0);
+  EXPECT_TRUE(stream->IsSelectReadReady());
+  EXPECT_FALSE(stream->IsSelectWriteReady());
+  EXPECT_FALSE(stream->IsSelectExceptionReady());
+}
+
+TEST_F(PassthroughTest, TestSelectStdout) {
+  PassthroughHandler handler;
+  scoped_refptr<FileStream> stream =
+      handler.open(STDOUT_FILENO, "", O_RDONLY, 0);
+  EXPECT_FALSE(stream->IsSelectReadReady());
+  EXPECT_TRUE(stream->IsSelectWriteReady());
+  EXPECT_FALSE(stream->IsSelectExceptionReady());
+}
+
+TEST_F(PassthroughTest, TestSelectStderr) {
+  PassthroughHandler handler;
+  scoped_refptr<FileStream> stream =
+      handler.open(STDERR_FILENO, "", O_RDONLY, 0);
+  EXPECT_FALSE(stream->IsSelectReadReady());
+  EXPECT_TRUE(stream->IsSelectWriteReady());
+  EXPECT_FALSE(stream->IsSelectExceptionReady());
+}
+
+TEST_F(PassthroughTest, TestPollStdin) {
+  PassthroughHandler handler;
+  scoped_refptr<FileStream> stream =
+      handler.open(STDIN_FILENO, "", O_RDONLY, 0);
+  EXPECT_EQ(POLLIN, stream->GetPollEvents());
+}
+
+TEST_F(PassthroughTest, TestPollStdout) {
+  PassthroughHandler handler;
+  scoped_refptr<FileStream> stream =
+      handler.open(STDOUT_FILENO, "", O_RDONLY, 0);
+  EXPECT_EQ(POLLOUT, stream->GetPollEvents());
+}
+
+TEST_F(PassthroughTest, TestPollStderr) {
+  PassthroughHandler handler;
+  scoped_refptr<FileStream> stream =
+      handler.open(STDERR_FILENO, "", O_RDONLY, 0);
+  EXPECT_EQ(POLLOUT, stream->GetPollEvents());
+}
 }  // namespace posix_translation