Add Dr. Memory tool

This CL adds DrMemory-Windows-sfx.exe into tools/drmemory.
It also adds a set of scripts from chromium/src/tools/valgrind for
running Dr. Memory.  Now we can run pdfium tests with Dr. Memory:
$ .\tools\drmemory\scripts\pdfium_tests.bat -b out\Debug -t pdfium_unittests

More update on scripts is required, but this should be a good start.

Changes from chromium/src/tools/valgrind
- rename chrome_tests.py to pdfium_tests.py
- update pdfium_tests.bat
- remove chrome tests in pdfium_tests.py
- remove memcheck code in valgrind_test.py

R=thestig@chromium.org
BUG=pdfium:238

Review URL: https://codereview.chromium.org/1452293002 .
diff --git a/tools/drmemory/DrMemory-Windows-sfx.exe b/tools/drmemory/DrMemory-Windows-sfx.exe
new file mode 100644
index 0000000..ffe3e66
--- /dev/null
+++ b/tools/drmemory/DrMemory-Windows-sfx.exe
Binary files differ
diff --git a/tools/drmemory/scripts/common.py b/tools/drmemory/scripts/common.py
new file mode 100644
index 0000000..7e163e3
--- /dev/null
+++ b/tools/drmemory/scripts/common.py
@@ -0,0 +1,252 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import platform
+import os
+import signal
+import subprocess
+import sys
+import time
+
+
+class NotImplementedError(Exception):
+  pass
+
+
+class TimeoutError(Exception):
+  pass
+
+
+def RunSubprocessInBackground(proc):
+  """Runs a subprocess in the background. Returns a handle to the process."""
+  logging.info("running %s in the background" % " ".join(proc))
+  return subprocess.Popen(proc)
+
+
+def RunSubprocess(proc, timeout=0):
+  """ Runs a subprocess, until it finishes or |timeout| is exceeded and the
+  process is killed with taskkill.  A |timeout| <= 0  means no timeout.
+
+  Args:
+    proc: list of process components (exe + args)
+    timeout: how long to wait before killing, <= 0 means wait forever
+  """
+
+  logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
+  sys.stdout.flush()
+  sys.stderr.flush()
+
+  # Manually read and print out stdout and stderr.
+  # By default, the subprocess is supposed to inherit these from its parent,
+  # however when run under buildbot, it seems unable to read data from a
+  # grandchild process, so we have to read the child and print the data as if
+  # it came from us for buildbot to read it.  We're not sure why this is
+  # necessary.
+  # TODO(erikkay): should we buffer stderr and stdout separately?
+  p = subprocess.Popen(proc, universal_newlines=True,
+                       bufsize=0,  # unbuffered
+                       stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+  logging.info("started subprocess")
+
+  did_timeout = False
+  if timeout > 0:
+    wait_until = time.time() + timeout
+  while p.poll() is None and not did_timeout:
+    # Have to use readline rather than readlines() or "for line in p.stdout:",
+    # otherwise we get buffered even with bufsize=0.
+    line = p.stdout.readline()
+    while line and not did_timeout:
+      sys.stdout.write(line)
+      sys.stdout.flush()
+      line = p.stdout.readline()
+      if timeout > 0:
+        did_timeout = time.time() > wait_until
+
+  if did_timeout:
+    logging.info("process timed out")
+  else:
+    logging.info("process ended, did not time out")
+
+  if did_timeout:
+    if IsWindows():
+      subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
+    else:
+      # Does this kill all children, too?
+      os.kill(p.pid, signal.SIGINT)
+    logging.error("KILLED %d" % p.pid)
+    # Give the process a chance to actually die before continuing
+    # so that cleanup can happen safely.
+    time.sleep(1.0)
+    logging.error("TIMEOUT waiting for %s" % proc[0])
+    raise TimeoutError(proc[0])
+  else:
+    for line in p.stdout:
+      sys.stdout.write(line)
+    if not IsMac():   # stdout flush fails on Mac
+      logging.info("flushing stdout")
+      sys.stdout.flush()
+
+  logging.info("collecting result code")
+  result = p.poll()
+  if result:
+    logging.error("%s exited with non-zero result code %d" % (proc[0], result))
+  return result
+
+
+def IsLinux():
+  return sys.platform.startswith('linux')
+
+
+def IsMac():
+  return sys.platform.startswith('darwin')
+
+
+def IsWindows():
+  return sys.platform == 'cygwin' or sys.platform.startswith('win')
+
+
+def WindowsVersionName():
+  """Returns the name of the Windows version if it is known, or None.
+
+  Possible return values are: xp, vista, 7, 8, or None
+  """
+  if sys.platform == 'cygwin':
+    # Windows version number is hiding in system name.  Looks like:
+    # CYGWIN_NT-6.1-WOW64
+    try:
+      version_str = platform.uname()[0].split('-')[1]
+    except:
+      return None
+  elif sys.platform.startswith('win'):
+    # Normal Windows version string.  Mine: 6.1.7601
+    version_str = platform.version()
+  else:
+    return None
+
+  parts = version_str.split('.')
+  try:
+    major = int(parts[0])
+    minor = int(parts[1])
+  except:
+    return None  # Can't parse, unknown version.
+
+  if major == 5:
+    return 'xp'
+  elif major == 6 and minor == 0:
+    return 'vista'
+  elif major == 6 and minor == 1:
+    return '7'
+  elif major == 6 and minor == 2:
+    return '8'  # Future proof.  ;)
+  return None
+
+
+def PlatformNames():
+  """Return an array of string to be used in paths for the platform
+  (e.g. suppressions, gtest filters, ignore files etc.)
+  The first element of the array describes the 'main' platform
+  """
+  if IsLinux():
+    return ['linux']
+  if IsMac():
+    return ['mac']
+  if IsWindows():
+    names = ['win32']
+    version_name = WindowsVersionName()
+    if version_name is not None:
+      names.append('win-%s' % version_name)
+    return names
+  raise NotImplementedError('Unknown platform "%s".' % sys.platform)
+
+
+def PutEnvAndLog(env_name, env_value):
+  os.putenv(env_name, env_value)
+  logging.info('export %s=%s', env_name, env_value)
+
+def BoringCallers(mangled, use_re_wildcards):
+  """Return a list of 'boring' function names (optinally mangled)
+  with */? wildcards (optionally .*/.).
+  Boring = we drop off the bottom of stack traces below such functions.
+  """
+
+  need_mangling = [
+    # Don't show our testing framework:
+    ("testing::Test::Run",     "_ZN7testing4Test3RunEv"),
+    ("testing::TestInfo::Run", "_ZN7testing8TestInfo3RunEv"),
+    ("testing::internal::Handle*ExceptionsInMethodIfSupported*",
+     "_ZN7testing8internal3?Handle*ExceptionsInMethodIfSupported*"),
+
+    # Depend on scheduling:
+    ("MessageLoop::Run",     "_ZN11MessageLoop3RunEv"),
+    ("MessageLoop::RunTask", "_ZN11MessageLoop7RunTask*"),
+    ("RunnableMethod*",      "_ZN14RunnableMethod*"),
+    ("DispatchToMethod*",    "_Z*16DispatchToMethod*"),
+    ("base::internal::Invoker*::DoInvoke*",
+     "_ZN4base8internal8Invoker*DoInvoke*"),  # Invoker{1,2,3}
+    ("base::internal::RunnableAdapter*::Run*",
+     "_ZN4base8internal15RunnableAdapter*Run*"),
+  ]
+
+  ret = []
+  for pair in need_mangling:
+    ret.append(pair[1 if mangled else 0])
+
+  ret += [
+    # Also don't show the internals of libc/pthread.
+    "start_thread",
+    "main",
+    "BaseThreadInitThunk",
+  ]
+
+  if use_re_wildcards:
+    for i in range(0, len(ret)):
+      ret[i] = ret[i].replace('*', '.*').replace('?', '.')
+
+  return ret
+
+def NormalizeWindowsPath(path):
+  """If we're using Cygwin Python, turn the path into a Windows path.
+
+  Don't turn forward slashes into backslashes for easier copy-pasting and
+  escaping.
+
+  TODO(rnk): If we ever want to cut out the subprocess invocation, we can use
+  _winreg to get the root Cygwin directory from the registry key:
+  HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir.
+  """
+  if sys.platform.startswith("cygwin"):
+    p = subprocess.Popen(["cygpath", "-m", path],
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE)
+    (out, err) = p.communicate()
+    if err:
+      logging.warning("WARNING: cygpath error: %s", err)
+    return out.strip()
+  else:
+    return path
+
+############################
+# Common output format code
+
+def PrintUsedSuppressionsList(suppcounts):
+  """ Prints out the list of used suppressions in a format common to all the
+      memory tools. If the list is empty, prints nothing and returns False,
+      otherwise True.
+
+      suppcounts: a dictionary of used suppression counts,
+                  Key -> name, Value -> count.
+  """
+  if not suppcounts:
+    return False
+
+  print "-----------------------------------------------------"
+  print "Suppressions used:"
+  print "  count name"
+  for (name, count) in sorted(suppcounts.items(), key=lambda (k,v): (v,k)):
+    print "%7d %s" % (count, name)
+  print "-----------------------------------------------------"
+  sys.stdout.flush()
+  return True
diff --git a/tools/drmemory/scripts/drmemory_analyze.py b/tools/drmemory/scripts/drmemory_analyze.py
new file mode 100644
index 0000000..29fc0ed
--- /dev/null
+++ b/tools/drmemory/scripts/drmemory_analyze.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# drmemory_analyze.py
+
+''' Given a Dr. Memory output file, parses errors and uniques them.'''
+
+from collections import defaultdict
+import common
+import hashlib
+import logging
+import optparse
+import os
+import re
+import subprocess
+import sys
+import time
+
+class DrMemoryError:
+  def __init__(self, report, suppression, testcase):
+    self._report = report
+    self._testcase = testcase
+
+    # Chromium-specific transformations of the suppressions:
+    # Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
+    # Dr.Memory-generated error ids from the name= lines as they don't
+    # make sense in a multiprocess report.
+    supp_lines = suppression.split("\n")
+    for l in xrange(len(supp_lines)):
+      if supp_lines[l].startswith("name="):
+        supp_lines[l] = "name=<insert_a_suppression_name_here>"
+      if supp_lines[l].startswith("chrome.dll!"):
+        supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
+      bang_index = supp_lines[l].find("!")
+      d_exe_index = supp_lines[l].find(".exe!")
+      if bang_index >= 4 and d_exe_index + 4 == bang_index:
+        supp_lines[l] = "*" + supp_lines[l][bang_index:]
+    self._suppression = "\n".join(supp_lines)
+
+  def __str__(self):
+    output = ""
+    output += "### BEGIN MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
+        self.ErrorHash()
+    output += self._report + "\n"
+    if self._testcase:
+      output += "The report came from the `%s` test.\n" % self._testcase
+    output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
+    output += ("  For more info on using suppressions see "
+        "http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
+    output += "{\n%s\n}\n" % self._suppression
+    output += "### END MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
+        self.ErrorHash()
+    return output
+
+  # This is a device-independent hash identifying the suppression.
+  # By printing out this hash we can find duplicate reports between tests and
+  # different shards running on multiple buildbots
+  def ErrorHash(self):
+    return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
+
+  def __hash__(self):
+    return hash(self._suppression)
+
+  def __eq__(self, rhs):
+    return self._suppression == rhs
+
+
+class DrMemoryAnalyzer:
+  ''' Given a set of Dr.Memory output files, parse all the errors out of
+  them, unique them and output the results.'''
+
+  def __init__(self):
+    self.known_errors = set()
+    self.error_count = 0;
+
+  def ReadLine(self):
+    self.line_ = self.cur_fd_.readline()
+
+  def ReadSection(self):
+    result = [self.line_]
+    self.ReadLine()
+    while len(self.line_.strip()) > 0:
+      result.append(self.line_)
+      self.ReadLine()
+    return result
+
+  def ParseReportFile(self, filename, testcase):
+    ret = []
+
+    # First, read the generated suppressions file so we can easily lookup a
+    # suppression for a given error.
+    supp_fd = open(filename.replace("results", "suppress"), 'r')
+    generated_suppressions = {}  # Key -> Error #, Value -> Suppression text.
+    for line in supp_fd:
+      # NOTE: this regexp looks fragile. Might break if the generated
+      # suppression format slightly changes.
+      m = re.search("# Suppression for Error #([0-9]+)", line.strip())
+      if not m:
+        continue
+      error_id = int(m.groups()[0])
+      assert error_id not in generated_suppressions
+      # OK, now read the next suppression:
+      cur_supp = ""
+      for supp_line in supp_fd:
+        if supp_line.startswith("#") or supp_line.strip() == "":
+          break
+        cur_supp += supp_line
+      generated_suppressions[error_id] = cur_supp.strip()
+    supp_fd.close()
+
+    self.cur_fd_ = open(filename, 'r')
+    while True:
+      self.ReadLine()
+      if (self.line_ == ''): break
+
+      match = re.search("^Error #([0-9]+): (.*)", self.line_)
+      if match:
+        error_id = int(match.groups()[0])
+        self.line_ = match.groups()[1].strip() + "\n"
+        report = "".join(self.ReadSection()).strip()
+        suppression = generated_suppressions[error_id]
+        ret.append(DrMemoryError(report, suppression, testcase))
+
+      if re.search("SUPPRESSIONS USED:", self.line_):
+        self.ReadLine()
+        while self.line_.strip() != "":
+          line = self.line_.strip()
+          (count, name) = re.match(" *([0-9\?]+)x(?: \(.*?\))?: (.*)",
+                                   line).groups()
+          if (count == "?"):
+            # Whole-module have no count available: assume 1
+            count = 1
+          else:
+            count = int(count)
+          self.used_suppressions[name] += count
+          self.ReadLine()
+
+      if self.line_.startswith("ASSERT FAILURE"):
+        ret.append(self.line_.strip())
+
+    self.cur_fd_.close()
+    return ret
+
+  def Report(self, filenames, testcase, check_sanity):
+    sys.stdout.flush()
+    # TODO(timurrrr): support positive tests / check_sanity==True
+    self.used_suppressions = defaultdict(int)
+
+    to_report = []
+    reports_for_this_test = set()
+    for f in filenames:
+      cur_reports = self.ParseReportFile(f, testcase)
+
+      # Filter out the reports that were there in previous tests.
+      for r in cur_reports:
+        if r in reports_for_this_test:
+          # A similar report is about to be printed for this test.
+          pass
+        elif r in self.known_errors:
+          # A similar report has already been printed in one of the prev tests.
+          to_report.append("This error was already printed in some "
+                           "other test, see 'hash=#%016X#'" % r.ErrorHash())
+          reports_for_this_test.add(r)
+        else:
+          self.known_errors.add(r)
+          reports_for_this_test.add(r)
+          to_report.append(r)
+
+    common.PrintUsedSuppressionsList(self.used_suppressions)
+
+    if not to_report:
+      logging.info("PASS: No error reports found")
+      return 0
+
+    sys.stdout.flush()
+    sys.stderr.flush()
+    logging.info("Found %i error reports" % len(to_report))
+    for report in to_report:
+      self.error_count += 1
+      logging.info("Report #%d\n%s" % (self.error_count, report))
+    logging.info("Total: %i error reports" % len(to_report))
+    sys.stdout.flush()
+    return -1
+
+
+def main():
+  '''For testing only. The DrMemoryAnalyze class should be imported instead.'''
+  parser = optparse.OptionParser("usage: %prog <files to analyze>")
+
+  (options, args) = parser.parse_args()
+  if len(args) == 0:
+    parser.error("no filename specified")
+  filenames = args
+
+  logging.getLogger().setLevel(logging.INFO)
+  return DrMemoryAnalyzer().Report(filenames, None, False)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/tools/drmemory/scripts/logging_utils.py b/tools/drmemory/scripts/logging_utils.py
new file mode 100644
index 0000000..ef2d674
--- /dev/null
+++ b/tools/drmemory/scripts/logging_utils.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+''' Utility functions and objects for logging.
+'''
+
+import logging
+import sys
+
+class StdoutStderrHandler(logging.Handler):
+  ''' Subclass of logging.Handler which outputs to either stdout or stderr
+  based on a threshold level.
+  '''
+
+  def __init__(self, threshold=logging.WARNING, err=sys.stderr, out=sys.stdout):
+    ''' Args:
+          threshold: below this logging level messages are sent to stdout,
+            otherwise they are sent to stderr
+          err: a stream object that error messages are sent to, defaults to
+            sys.stderr
+          out: a stream object that non-error messages are sent to, defaults to
+            sys.stdout
+    '''
+    logging.Handler.__init__(self)
+    self._err = logging.StreamHandler(err)
+    self._out = logging.StreamHandler(out)
+    self._threshold = threshold
+    self._last_was_err = False
+
+  def setLevel(self, lvl):
+    logging.Handler.setLevel(self, lvl)
+    self._err.setLevel(lvl)
+    self._out.setLevel(lvl)
+
+  def setFormatter(self, formatter):
+    logging.Handler.setFormatter(self, formatter)
+    self._err.setFormatter(formatter)
+    self._out.setFormatter(formatter)
+
+  def emit(self, record):
+    if record.levelno < self._threshold:
+      self._out.emit(record)
+      self._last_was_err = False
+    else:
+      self._err.emit(record)
+      self._last_was_err = False
+
+  def flush(self):
+    # preserve order on the flushing, the stalest stream gets flushed first
+    if self._last_was_err:
+      self._out.flush()
+      self._err.flush()
+    else:
+      self._err.flush()
+      self._out.flush()
+
+
+FORMAT = "%(asctime)s %(filename)s [%(levelname)s] %(message)s"
+DATEFMT = "%H:%M:%S"
+
+def config_root(level=logging.INFO, threshold=logging.WARNING, format=FORMAT,
+         datefmt=DATEFMT):
+  ''' Configure the root logger to use a StdoutStderrHandler and some default
+  formatting.
+    Args:
+      level: messages below this level are ignored
+      threshold: below this logging level messages are sent to stdout,
+        otherwise they are sent to stderr
+      format: format for log messages, see logger.Format
+      datefmt: format for date in log messages
+
+  '''
+  # to set the handler of the root logging object, we need to do setup
+  # manually rather than using basicConfig
+  root = logging.getLogger()
+  root.setLevel(level)
+  formatter = logging.Formatter(format, datefmt)
+  handler = StdoutStderrHandler(threshold=threshold)
+  handler.setLevel(level)
+  handler.setFormatter(formatter)
+  root.addHandler(handler)
diff --git a/tools/drmemory/scripts/path_utils.py b/tools/drmemory/scripts/path_utils.py
new file mode 100644
index 0000000..6ab4312
--- /dev/null
+++ b/tools/drmemory/scripts/path_utils.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Some utility methods for getting and manipulating paths."""
+
+# TODO(pamg): Have the buildbot use these, too.
+
+
+import errno
+import os
+import sys
+
+class PathNotFound(Exception): pass
+
+def ScriptDir():
+  """Get the full path to the directory containing the current script."""
+  script_filename = os.path.abspath(sys.argv[0])
+  return os.path.dirname(script_filename)
+
+def FindAncestor(start_dir, ancestor):
+  """Finds an ancestor dir in a path.
+
+  For example, FindAncestor('c:\foo\bar\baz', 'bar') would return
+  'c:\foo\bar'.  Unlike FindUpward*, this only looks at direct path ancestors.
+  """
+  start_dir = os.path.abspath(start_dir)
+  path = start_dir
+  while True:
+    (parent, tail) = os.path.split(path)
+    if tail == ancestor:
+      return path
+    if not tail:
+      break
+    path = parent
+  raise PathNotFound("Unable to find ancestor %s in %s" % (ancestor, start_dir))
+
+def FindUpwardParent(start_dir, *desired_list):
+  """Finds the desired object's parent, searching upward from the start_dir.
+
+  Searches start_dir and all its parents looking for the desired directory
+  or file, which may be given in one or more path components. Returns the
+  first directory in which the top desired path component was found, or raises
+  PathNotFound if it wasn't.
+  """
+  desired_path = os.path.join(*desired_list)
+  last_dir = ''
+  cur_dir = start_dir
+  found_path = os.path.join(cur_dir, desired_path)
+  while not os.path.exists(found_path):
+    last_dir = cur_dir
+    cur_dir = os.path.dirname(cur_dir)
+    if last_dir == cur_dir:
+      raise PathNotFound('Unable to find %s above %s' %
+                         (desired_path, start_dir))
+    found_path = os.path.join(cur_dir, desired_path)
+  # Strip the entire original desired path from the end of the one found
+  # and remove a trailing path separator, if present.
+  found_path = found_path[:len(found_path) - len(desired_path)]
+  if found_path.endswith(os.sep):
+    found_path = found_path[:len(found_path) - 1]
+  return found_path
+
+
+def FindUpward(start_dir, *desired_list):
+  """Returns a path to the desired directory or file, searching upward.
+
+  Searches start_dir and all its parents looking for the desired directory
+  or file, which may be given in one or more path components. Returns the full
+  path to the desired object, or raises PathNotFound if it wasn't found.
+  """
+  parent = FindUpwardParent(start_dir, *desired_list)
+  return os.path.join(parent, *desired_list)
+
+
+def MaybeMakeDirectory(*path):
+  """Creates an entire path, if it doesn't already exist."""
+  file_path = os.path.join(*path)
+  try:
+    os.makedirs(file_path)
+  except OSError, e:
+    # errno.EEXIST is "File exists".  If we see another error, re-raise.
+    if e.errno != errno.EEXIST:
+      raise
diff --git a/tools/drmemory/scripts/pdfium_tests.bat b/tools/drmemory/scripts/pdfium_tests.bat
new file mode 100644
index 0000000..4618a0e
--- /dev/null
+++ b/tools/drmemory/scripts/pdfium_tests.bat
@@ -0,0 +1,24 @@
+@echo off

+:: Copyright (c) 2011 The Chromium Authors. All rights reserved.

+:: Use of this source code is governed by a BSD-style license that can be

+:: found in the LICENSE file.

+

+set THISDIR=%~dp0

+set TOOL_NAME="drmemory_full"

+

+:: Set up DRMEMORY_COMMAND to invoke Dr. Memory {{{1

+set DRMEMORY_PATH=%THISDIR%..

+set DRMEMORY_SFX=%DRMEMORY_PATH%\drmemory-windows-sfx.exe

+if EXIST %DRMEMORY_SFX% GOTO DRMEMORY_BINARY_OK

+echo "Can't find Dr. Memory executables."

+echo "See http://www.chromium.org/developers/how-tos/using-valgrind/dr-memory"

+echo "for the instructions on how to get them."

+exit /B 1

+

+:DRMEMORY_BINARY_OK

+%DRMEMORY_SFX% -o%DRMEMORY_PATH%\unpacked -y

+set DRMEMORY_COMMAND=%DRMEMORY_PATH%\unpacked\bin\drmemory.exe

+:: }}}

+

+:RUN_TESTS

+python %THISDIR%/pdfium_tests.py %*

diff --git a/tools/drmemory/scripts/pdfium_tests.py b/tools/drmemory/scripts/pdfium_tests.py
new file mode 100644
index 0000000..6cc7d57
--- /dev/null
+++ b/tools/drmemory/scripts/pdfium_tests.py
@@ -0,0 +1,370 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+''' Runs various chrome tests through valgrind_test.py.'''
+
+import glob
+import logging
+import optparse
+import os
+import subprocess
+import sys
+
+import logging_utils
+import path_utils
+
+import common
+import valgrind_test
+
+class TestNotFound(Exception): pass
+
+class MultipleGTestFiltersSpecified(Exception): pass
+
+class BuildDirNotFound(Exception): pass
+
+class BuildDirAmbiguous(Exception): pass
+
+class ExecutableNotFound(Exception): pass
+
+class BadBinary(Exception): pass
+
+class ChromeTests:
+  SLOW_TOOLS = ["drmemory"]
+
+  def __init__(self, options, args, test):
+    if ':' in test:
+      (self._test, self._gtest_filter) = test.split(':', 1)
+    else:
+      self._test = test
+      self._gtest_filter = options.gtest_filter
+
+    if self._test not in self._test_list:
+      raise TestNotFound("Unknown test: %s" % test)
+
+    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
+      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
+                                          "and --test %s" % test)
+
+    self._options = options
+    self._args = args
+
+    script_dir = path_utils.ScriptDir()
+    # Compute the top of the tree (the "source dir") from the script dir (where
+    # this script lives).  We assume that the script dir is in tools/valgrind/
+    # relative to the top of the tree.
+    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
+    # since this path is used for string matching, make sure it's always
+    # an absolute Unix-style path
+    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
+    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
+    self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
+
+    if not self._options.build_dir:
+      dirs = [
+        os.path.join(self._source_dir, "xcodebuild", "Debug"),
+        os.path.join(self._source_dir, "out", "Debug"),
+        os.path.join(self._source_dir, "build", "Debug"),
+      ]
+      build_dir = [d for d in dirs if os.path.isdir(d)]
+      if len(build_dir) > 1:
+        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
+                                "%s\nPlease specify just one "
+                                "using --build-dir" % ", ".join(build_dir))
+      elif build_dir:
+        self._options.build_dir = build_dir[0]
+      else:
+        self._options.build_dir = None
+
+    if self._options.build_dir:
+      build_dir = os.path.abspath(self._options.build_dir)
+      self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
+
+  def _EnsureBuildDirFound(self):
+    if not self._options.build_dir:
+      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
+                             "specify it manually using --build-dir")
+
+  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
+    '''Generates the default command array that most tests will use.'''
+    if exe and common.IsWindows():
+      exe += '.exe'
+
+    cmd = list(self._command_preamble)
+
+    # Find all suppressions matching the following pattern:
+    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
+    # and list them with --suppressions= prefix.
+    script_dir = path_utils.ScriptDir()
+    tool_name = tool.ToolName();
+    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
+    if os.path.exists(suppression_file):
+      cmd.append("--suppressions=%s" % suppression_file)
+    # Platform-specific suppression
+    for platform in common.PlatformNames():
+      platform_suppression_file = \
+          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
+      if os.path.exists(platform_suppression_file):
+        cmd.append("--suppressions=%s" % platform_suppression_file)
+
+    if self._options.valgrind_tool_flags:
+      cmd += self._options.valgrind_tool_flags.split(" ")
+    if self._options.keep_logs:
+      cmd += ["--keep_logs"]
+    if valgrind_test_args != None:
+      for arg in valgrind_test_args:
+        cmd.append(arg)
+    if exe:
+      self._EnsureBuildDirFound()
+      exe_path = os.path.join(self._options.build_dir, exe)
+      if not os.path.exists(exe_path):
+        raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
+
+      # Make sure we don't try to test ASan-built binaries
+      # with other dynamic instrumentation-based tools.
+      # TODO(timurrrr): also check TSan and MSan?
+      # `nm` might not be available, so use try-except.
+      try:
+        # Do not perform this check on OS X, as 'nm' on 10.6 can't handle
+        # binaries built with Clang 3.5+.
+        if not common.IsMac():
+          nm_output = subprocess.check_output(["nm", exe_path])
+          if nm_output.find("__asan_init") != -1:
+            raise BadBinary("You're trying to run an executable instrumented "
+                            "with AddressSanitizer under %s. Please provide "
+                            "an uninstrumented executable." % tool_name)
+      except OSError:
+        pass
+
+      cmd.append(exe_path)
+      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
+      # so we can find the slowpokes.
+      cmd.append("--gtest_print_time")
+      # Built-in test launcher for gtest-based executables runs tests using
+      # multiple process by default. Force the single-process mode back.
+      cmd.append("--single-process-tests")
+    if self._options.gtest_repeat:
+      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
+    if self._options.gtest_shuffle:
+      cmd.append("--gtest_shuffle")
+    if self._options.gtest_break_on_failure:
+      cmd.append("--gtest_break_on_failure")
+    if self._options.test_launcher_bot_mode:
+      cmd.append("--test-launcher-bot-mode")
+    if self._options.test_launcher_total_shards is not None:
+      cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher_total_shards)
+    if self._options.test_launcher_shard_index is not None:
+      cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_shard_index)
+    return cmd
+
+  def Run(self):
+    ''' Runs the test specified by command-line argument --test '''
+    logging.info("running test %s" % (self._test))
+    return self._test_list[self._test](self)
+
+  def _AppendGtestFilter(self, tool, name, cmd):
+    '''Append an appropriate --gtest_filter flag to the googletest binary
+       invocation.
+       If the user passed his own filter mentioning only one test, just use it.
+       Othewise, filter out tests listed in the appropriate gtest_exclude files.
+    '''
+    if (self._gtest_filter and
+        ":" not in self._gtest_filter and
+        "?" not in self._gtest_filter and
+        "*" not in self._gtest_filter):
+      cmd.append("--gtest_filter=%s" % self._gtest_filter)
+      return
+
+    filters = []
+    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
+
+    gtest_filter_files = [
+        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
+    # Use ".gtest.txt" files only for slow tools, as they now contain
+    # Valgrind- and Dr.Memory-specific filters.
+    # TODO(glider): rename the files to ".gtest_slow.txt"
+    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
+      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
+    for platform_suffix in common.PlatformNames():
+      gtest_filter_files += [
+        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
+        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
+            (tool.ToolName(), platform_suffix))]
+    logging.info("Reading gtest exclude filter files:")
+    for filename in gtest_filter_files:
+      # strip the leading absolute path (may be very long on the bot)
+      # and the following / or \.
+      readable_filename = filename.replace("\\", "/")  # '\' on Windows
+      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
+      if not os.path.exists(filename):
+        logging.info("  \"%s\" - not found" % readable_filename)
+        continue
+      logging.info("  \"%s\" - OK" % readable_filename)
+      f = open(filename, 'r')
+      for line in f.readlines():
+        if line.startswith("#") or line.startswith("//") or line.isspace():
+          continue
+        line = line.rstrip()
+        test_prefixes = ["FLAKY", "FAILS"]
+        for p in test_prefixes:
+          # Strip prefixes from the test names.
+          line = line.replace(".%s_" % p, ".")
+        # Exclude the original test name.
+        filters.append(line)
+        if line[-2:] != ".*":
+          # List all possible prefixes if line doesn't end with ".*".
+          for p in test_prefixes:
+            filters.append(line.replace(".", ".%s_" % p))
+    # Get rid of duplicates.
+    filters = set(filters)
+    gtest_filter = self._gtest_filter
+    if len(filters):
+      if gtest_filter:
+        gtest_filter += ":"
+        if gtest_filter.find("-") < 0:
+          gtest_filter += "-"
+      else:
+        gtest_filter = "-"
+      gtest_filter += ":".join(filters)
+    if gtest_filter:
+      cmd.append("--gtest_filter=%s" % gtest_filter)
+
+  @staticmethod
+  def ShowTests():
+    test_to_names = {}
+    for name, test_function in ChromeTests._test_list.iteritems():
+      test_to_names.setdefault(test_function, []).append(name)
+
+    name_to_aliases = {}
+    for names in test_to_names.itervalues():
+      names.sort(key=lambda name: len(name))
+      name_to_aliases[names[0]] = names[1:]
+
+    print
+    print "Available tests:"
+    print "----------------"
+    for name, aliases in sorted(name_to_aliases.iteritems()):
+      if aliases:
+        print "   {} (aka {})".format(name, ', '.join(aliases))
+      else:
+        print "   {}".format(name)
+
+  def SetupLdPath(self, requires_build_dir):
+    if requires_build_dir:
+      self._EnsureBuildDirFound()
+    elif not self._options.build_dir:
+      return
+
+    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
+    if (os.getenv("LD_LIBRARY_PATH")):
+      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
+                                              self._options.build_dir))
+    else:
+      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
+
+  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
+    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
+    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
+    self._AppendGtestFilter(tool, name, cmd)
+    cmd.extend(['--test-tiny-timeout=1000'])
+    if cmd_args:
+      cmd.extend(cmd_args)
+
+    self.SetupLdPath(True)
+    return tool.Run(cmd, module)
+
+  def RunCmdLine(self):
+    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
+    cmd = self._DefaultCommand(tool, None, self._args)
+    self.SetupLdPath(False)
+    return tool.Run(cmd, None)
+
+  def TestPDFiumUnitTests(self):
+    return self.SimpleTest("pdfium_unittests", "pdfium_unittests")
+
+  def TestPDFiumEmbedderTests(self):
+    return self.SimpleTest("pdfium_embeddertests", "pdfium_embeddertests")
+
+  # The known list of tests.
+  _test_list = {
+    "cmdline" : RunCmdLine,
+    "pdfium_unittests": TestPDFiumUnitTests,
+    "pdfium_embeddertests": TestPDFiumEmbedderTests,
+  }
+
+
+def _main():
+  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
+                                 "[-t <test> ...]")
+
+  parser.add_option("--help-tests", dest="help_tests", action="store_true",
+                    default=False, help="List all available tests")
+  parser.add_option("-b", "--build-dir",
+                    help="the location of the compiler output")
+  parser.add_option("--target", help="Debug or Release")
+  parser.add_option("-t", "--test", action="append", default=[],
+                    help="which test to run, supports test:gtest_filter format "
+                         "as well.")
+  parser.add_option("--gtest_filter",
+                    help="additional arguments to --gtest_filter")
+  parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
+  parser.add_option("--gtest_shuffle", action="store_true", default=False,
+                    help="Randomize tests' orders on every iteration.")
+  parser.add_option("--gtest_break_on_failure", action="store_true",
+                    default=False,
+                    help="Drop in to debugger on assertion failure. Also "
+                         "useful for forcing tests to exit with a stack dump "
+                         "on the first assertion failure when running with "
+                         "--gtest_repeat=-1")
+  parser.add_option("-v", "--verbose", action="store_true", default=False,
+                    help="verbose output - enable debug log messages")
+  parser.add_option("--tool", dest="valgrind_tool", default="drmemory_full",
+                    help="specify a valgrind tool to run the tests under")
+  parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
+                    help="specify custom flags for the selected valgrind tool")
+  parser.add_option("--keep_logs", action="store_true", default=False,
+                    help="store memory tool logs in the <tool>.logs directory "
+                         "instead of /tmp.\nThis can be useful for tool "
+                         "developers/maintainers.\nPlease note that the <tool>"
+                         ".logs directory will be clobbered on tool startup.")
+  parser.add_option("--test-launcher-bot-mode", action="store_true",
+                    help="run the tests with --test-launcher-bot-mode")
+  parser.add_option("--test-launcher-total-shards", type=int,
+                    help="run the tests with --test-launcher-total-shards")
+  parser.add_option("--test-launcher-shard-index", type=int,
+                    help="run the tests with --test-launcher-shard-index")
+
+  options, args = parser.parse_args()
+
+  # Bake target into build_dir.
+  if options.target and options.build_dir:
+    assert (options.target !=
+            os.path.basename(os.path.dirname(options.build_dir)))
+    options.build_dir = os.path.join(os.path.abspath(options.build_dir),
+                                     options.target)
+
+  if options.verbose:
+    logging_utils.config_root(logging.DEBUG)
+  else:
+    logging_utils.config_root()
+
+  if options.help_tests:
+    ChromeTests.ShowTests()
+    return 0
+
+  if not options.test:
+    parser.error("--test not specified")
+
+  if len(options.test) != 1 and options.gtest_filter:
+    parser.error("--gtest_filter and multiple tests don't make sense together")
+
+  for t in options.test:
+    tests = ChromeTests(options, args, t)
+    ret = tests.Run()
+    if ret: return ret
+  return 0
+
+
+if __name__ == "__main__":
+  sys.exit(_main())
diff --git a/tools/drmemory/scripts/valgrind_test.py b/tools/drmemory/scripts/valgrind_test.py
new file mode 100644
index 0000000..bde3002
--- /dev/null
+++ b/tools/drmemory/scripts/valgrind_test.py
@@ -0,0 +1,770 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs an exe through Valgrind and puts the intermediate files in a
+directory.
+"""
+
+import datetime
+import glob
+import logging
+import optparse
+import os
+import re
+import shutil
+import stat
+import subprocess
+import sys
+import tempfile
+
+import common
+
+import drmemory_analyze
+
+class BaseTool(object):
+  """Abstract class for running dynamic error detection tools.
+
+  Always subclass this and implement ToolCommand with framework- and
+  tool-specific stuff.
+  """
+
+  def __init__(self):
+    temp_parent_dir = None
+    self.log_parent_dir = ""
+    if common.IsWindows():
+      # gpu process on Windows Vista+ runs at Low Integrity and can only
+      # write to certain directories (http://crbug.com/119131)
+      #
+      # TODO(bruening): if scripts die in middle and don't clean up temp
+      # dir, we'll accumulate files in profile dir.  should remove
+      # really old files automatically.
+      profile = os.getenv("USERPROFILE")
+      if profile:
+        self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
+        if os.path.exists(self.log_parent_dir):
+          self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
+          temp_parent_dir = self.log_parent_dir
+    # Generated every time (even when overridden)
+    self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
+    self.log_dir = self.temp_dir # overridable by --keep_logs
+    self.option_parser_hooks = []
+    # TODO(glider): we may not need some of the env vars on some of the
+    # platforms.
+    self._env = {
+      "G_SLICE" : "always-malloc",
+      "NSS_DISABLE_UNLOAD" : "1",
+      "NSS_DISABLE_ARENA_FREE_LIST" : "1",
+      "GTEST_DEATH_TEST_USE_FORK": "1",
+    }
+
+  def ToolName(self):
+    raise NotImplementedError, "This method should be implemented " \
+                               "in the tool-specific subclass"
+
+  def Analyze(self, check_sanity=False):
+    raise NotImplementedError, "This method should be implemented " \
+                               "in the tool-specific subclass"
+
+  def RegisterOptionParserHook(self, hook):
+    # Frameworks and tools can add their own flags to the parser.
+    self.option_parser_hooks.append(hook)
+
+  def CreateOptionParser(self):
+    # Defines Chromium-specific flags.
+    self._parser = optparse.OptionParser("usage: %prog [options] <program to "
+                                         "test>")
+    self._parser.disable_interspersed_args()
+    self._parser.add_option("-t", "--timeout",
+                      dest="timeout", metavar="TIMEOUT", default=10000,
+                      help="timeout in seconds for the run (default 10000)")
+    self._parser.add_option("", "--build-dir",
+                            help="the location of the compiler output")
+    self._parser.add_option("", "--source-dir",
+                            help="path to top of source tree for this build"
+                                 "(used to normalize source paths in baseline)")
+    self._parser.add_option("", "--gtest_filter", default="",
+                            help="which test case to run")
+    self._parser.add_option("", "--gtest_repeat",
+                            help="how many times to run each test")
+    self._parser.add_option("", "--gtest_print_time", action="store_true",
+                            default=False,
+                            help="show how long each test takes")
+    self._parser.add_option("", "--ignore_exit_code", action="store_true",
+                            default=False,
+                            help="ignore exit code of the test "
+                                 "(e.g. test failures)")
+    self._parser.add_option("", "--keep_logs", action="store_true",
+                            default=False,
+                            help="store memory tool logs in the <tool>.logs "
+                                 "directory instead of /tmp.\nThis can be "
+                                 "useful for tool developers/maintainers.\n"
+                                 "Please note that the <tool>.logs directory "
+                                 "will be clobbered on tool startup.")
+
+    # To add framework- or tool-specific flags, please add a hook using
+    # RegisterOptionParserHook in the corresponding subclass.
+    # See ValgrindTool for an example.
+    for hook in self.option_parser_hooks:
+      hook(self, self._parser)
+
+  def ParseArgv(self, args):
+    self.CreateOptionParser()
+
+    # self._tool_flags will store those tool flags which we don't parse
+    # manually in this script.
+    self._tool_flags = []
+    known_args = []
+
+    """ We assume that the first argument not starting with "-" is a program
+    name and all the following flags should be passed to the program.
+    TODO(timurrrr): customize optparse instead
+    """
+    while len(args) > 0 and args[0][:1] == "-":
+      arg = args[0]
+      if (arg == "--"):
+        break
+      if self._parser.has_option(arg.split("=")[0]):
+        known_args += [arg]
+      else:
+        self._tool_flags += [arg]
+      args = args[1:]
+
+    if len(args) > 0:
+      known_args += args
+
+    self._options, self._args = self._parser.parse_args(known_args)
+
+    self._timeout = int(self._options.timeout)
+    self._source_dir = self._options.source_dir
+    if self._options.keep_logs:
+      # log_parent_dir has trailing slash if non-empty
+      self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
+      if os.path.exists(self.log_dir):
+        shutil.rmtree(self.log_dir)
+      os.mkdir(self.log_dir)
+      logging.info("Logs are in " + self.log_dir)
+
+    self._ignore_exit_code = self._options.ignore_exit_code
+    if self._options.gtest_filter != "":
+      self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
+    if self._options.gtest_repeat:
+      self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
+    if self._options.gtest_print_time:
+      self._args.append("--gtest_print_time")
+
+    return True
+
+  def Setup(self, args):
+    return self.ParseArgv(args)
+
+  def ToolCommand(self):
+    raise NotImplementedError, "This method should be implemented " \
+                               "in the tool-specific subclass"
+
+  def Cleanup(self):
+    # You may override it in the tool-specific subclass
+    pass
+
+  def Execute(self):
+    """ Execute the app to be tested after successful instrumentation.
+    Full execution command-line provided by subclassers via proc."""
+    logging.info("starting execution...")
+    proc = self.ToolCommand()
+    for var in self._env:
+      common.PutEnvAndLog(var, self._env[var])
+    return common.RunSubprocess(proc, self._timeout)
+
+  def RunTestsAndAnalyze(self, check_sanity):
+    exec_retcode = self.Execute()
+    analyze_retcode = self.Analyze(check_sanity)
+
+    if analyze_retcode:
+      logging.error("Analyze failed.")
+      logging.info("Search the log for '[ERROR]' to see the error reports.")
+      return analyze_retcode
+
+    if exec_retcode:
+      if self._ignore_exit_code:
+        logging.info("Test execution failed, but the exit code is ignored.")
+      else:
+        logging.error("Test execution failed.")
+        return exec_retcode
+    else:
+      logging.info("Test execution completed successfully.")
+
+    if not analyze_retcode:
+      logging.info("Analysis completed successfully.")
+
+    return 0
+
+  def Main(self, args, check_sanity, min_runtime_in_seconds):
+    """Call this to run through the whole process: Setup, Execute, Analyze"""
+    start_time = datetime.datetime.now()
+    retcode = -1
+    if self.Setup(args):
+      retcode = self.RunTestsAndAnalyze(check_sanity)
+      shutil.rmtree(self.temp_dir, ignore_errors=True)
+      self.Cleanup()
+    else:
+      logging.error("Setup failed")
+    end_time = datetime.datetime.now()
+    runtime_in_seconds = (end_time - start_time).seconds
+    hours = runtime_in_seconds / 3600
+    seconds = runtime_in_seconds % 3600
+    minutes = seconds / 60
+    seconds = seconds % 60
+    logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
+    if (min_runtime_in_seconds > 0 and
+        runtime_in_seconds < min_runtime_in_seconds):
+      logging.error("Layout tests finished too quickly. "
+                    "It should have taken at least %d seconds. "
+                    "Something went wrong?" % min_runtime_in_seconds)
+      retcode = -1
+    return retcode
+
+  def Run(self, args, module, min_runtime_in_seconds=0):
+    MODULES_TO_SANITY_CHECK = ["base"]
+
+    check_sanity = module in MODULES_TO_SANITY_CHECK
+    return self.Main(args, check_sanity, min_runtime_in_seconds)
+
+
+class ValgrindTool(BaseTool):
+  """Abstract class for running Valgrind tools.
+
+  Always subclass this and implement ToolSpecificFlags() and
+  ExtendOptionParser() for tool-specific stuff.
+  """
+  def __init__(self):
+    super(ValgrindTool, self).__init__()
+    self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
+
+  def UseXML(self):
+    # Override if tool prefers nonxml output
+    return True
+
+  def ExtendOptionParser(self, parser):
+    parser.add_option("", "--suppressions", default=[],
+                            action="append",
+                            help="path to a valgrind suppression file")
+    parser.add_option("", "--indirect", action="store_true",
+                            default=False,
+                            help="set BROWSER_WRAPPER rather than "
+                                 "running valgrind directly")
+    parser.add_option("", "--indirect_webkit_layout", action="store_true",
+                            default=False,
+                            help="set --wrapper rather than running Dr. Memory "
+                                 "directly.")
+    parser.add_option("", "--trace_children", action="store_true",
+                            default=False,
+                            help="also trace child processes")
+    parser.add_option("", "--num-callers",
+                            dest="num_callers", default=30,
+                            help="number of callers to show in stack traces")
+    parser.add_option("", "--generate_dsym", action="store_true",
+                          default=False,
+                          help="Generate .dSYM file on Mac if needed. Slow!")
+
+  def Setup(self, args):
+    if not BaseTool.Setup(self, args):
+      return False
+    if common.IsMac():
+      self.PrepareForTestMac()
+    return True
+
+  def PrepareForTestMac(self):
+    """Runs dsymutil if needed.
+
+    Valgrind for Mac OS X requires that debugging information be in a .dSYM
+    bundle generated by dsymutil.  It is not currently able to chase DWARF
+    data into .o files like gdb does, so executables without .dSYM bundles or
+    with the Chromium-specific "fake_dsym" bundles generated by
+    build/mac/strip_save_dsym won't give source file and line number
+    information in valgrind.
+
+    This function will run dsymutil if the .dSYM bundle is missing or if
+    it looks like a fake_dsym.  A non-fake dsym that already exists is assumed
+    to be up-to-date.
+    """
+    test_command = self._args[0]
+    dsym_bundle = self._args[0] + '.dSYM'
+    dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
+                             os.path.basename(test_command))
+    dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
+
+    needs_dsymutil = True
+    saved_test_command = None
+
+    if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
+      # Look for the special fake_dsym tag in dsym_info_plist.
+      dsym_info_plist_contents = open(dsym_info_plist).read()
+
+      if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
+                       re.MULTILINE):
+        # fake_dsym is not set, this is a real .dSYM bundle produced by
+        # dsymutil.  dsymutil does not need to be run again.
+        needs_dsymutil = False
+      else:
+        # fake_dsym is set.  dsym_file is a copy of the original test_command
+        # before it was stripped.  Copy it back to test_command so that
+        # dsymutil has unstripped input to work with.  Move the stripped
+        # test_command out of the way, it will be restored when this is
+        # done.
+        saved_test_command = test_command + '.stripped'
+        os.rename(test_command, saved_test_command)
+        shutil.copyfile(dsym_file, test_command)
+        shutil.copymode(saved_test_command, test_command)
+
+    if needs_dsymutil:
+      if self._options.generate_dsym:
+        # Remove the .dSYM bundle if it exists.
+        shutil.rmtree(dsym_bundle, True)
+
+        dsymutil_command = ['dsymutil', test_command]
+
+        # dsymutil is crazy slow.  Ideally we'd have a timeout here,
+        # but common.RunSubprocess' timeout is only checked
+        # after each line of output; dsymutil is silent
+        # until the end, and is then killed, which is silly.
+        common.RunSubprocess(dsymutil_command)
+
+        if saved_test_command:
+          os.rename(saved_test_command, test_command)
+      else:
+        logging.info("No real .dSYM for test_command.  Line numbers will "
+                     "not be shown.  Either tell xcode to generate .dSYM "
+                     "file, or use --generate_dsym option to this tool.")
+
+  def ToolCommand(self):
+    """Get the valgrind command to run."""
+    # Note that self._args begins with the exe to be run.
+    tool_name = self.ToolName()
+
+    # Construct the valgrind command.
+    if 'CHROME_VALGRIND' in os.environ:
+      path = os.path.join(os.environ['CHROME_VALGRIND'], "bin", "valgrind")
+    else:
+      path = "valgrind"
+    proc = [path, "--tool=%s" % tool_name]
+
+    proc += ["--num-callers=%i" % int(self._options.num_callers)]
+
+    if self._options.trace_children:
+      proc += ["--trace-children=yes"]
+      proc += ["--trace-children-skip='*dbus-daemon*'"]
+      proc += ["--trace-children-skip='*dbus-launch*'"]
+      proc += ["--trace-children-skip='*perl*'"]
+      proc += ["--trace-children-skip='*python*'"]
+      # This is really Python, but for some reason Valgrind follows it.
+      proc += ["--trace-children-skip='*lsb_release*'"]
+
+    proc += self.ToolSpecificFlags()
+    proc += self._tool_flags
+
+    suppression_count = 0
+    for suppression_file in self._options.suppressions:
+      if os.path.exists(suppression_file):
+        suppression_count += 1
+        proc += ["--suppressions=%s" % suppression_file]
+
+    if not suppression_count:
+      logging.warning("WARNING: NOT USING SUPPRESSIONS!")
+
+    logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
+    if self.UseXML():
+      proc += ["--xml=yes", "--xml-file=" + logfilename]
+    else:
+      proc += ["--log-file=" + logfilename]
+
+    # The Valgrind command is constructed.
+
+    # Handle --indirect_webkit_layout separately.
+    if self._options.indirect_webkit_layout:
+      # Need to create the wrapper before modifying |proc|.
+      wrapper = self.CreateBrowserWrapper(proc, webkit=True)
+      proc = self._args
+      proc.append("--wrapper")
+      proc.append(wrapper)
+      return proc
+
+    if self._options.indirect:
+      wrapper = self.CreateBrowserWrapper(proc)
+      os.environ["BROWSER_WRAPPER"] = wrapper
+      logging.info('export BROWSER_WRAPPER=' + wrapper)
+      proc = []
+    proc += self._args
+    return proc
+
+  def ToolSpecificFlags(self):
+    raise NotImplementedError, "This method should be implemented " \
+                               "in the tool-specific subclass"
+
+  def CreateBrowserWrapper(self, proc, webkit=False):
+    """The program being run invokes Python or something else that can't stand
+    to be valgrinded, and also invokes the Chrome browser. In this case, use a
+    magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
+    Returns the path to the wrapper. It's up to the caller to use the wrapper
+    appropriately.
+    """
+    command = " ".join(proc)
+    # Add the PID of the browser wrapper to the logfile names so we can
+    # separate log files for different UI tests at the analyze stage.
+    command = command.replace("%p", "$$.%p")
+
+    (fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
+                                            prefix="browser_wrapper.",
+                                            text=True)
+    f = os.fdopen(fd, "w")
+    f.write('#!/bin/bash\n'
+            'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
+
+    f.write('DIR=`dirname $0`\n'
+            'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
+
+    if webkit:
+      # Webkit layout_tests pass the URL as the first line of stdin.
+      f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
+    else:
+      # Try to get the test case name by looking at the program arguments.
+      # i.e. Chromium ui_tests used --test-name arg.
+      # TODO(timurrrr): This doesn't handle "--test-name Test.Name"
+      # TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
+      # wrapper now? browser_tests? What do they do?
+      f.write('for arg in $@\ndo\n'
+              '  if [[ "$arg" =~ --test-name=(.*) ]]\n  then\n'
+              '    echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
+              '  fi\n'
+              'done\n\n'
+              '%s "$@"\n' % command)
+
+    f.close()
+    os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
+    return indirect_fname
+
+  def CreateAnalyzer(self):
+    raise NotImplementedError, "This method should be implemented " \
+                               "in the tool-specific subclass"
+
+  def GetAnalyzeResults(self, check_sanity=False):
+    # Glob all the files in the log directory
+    filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
+
+    # If we have browser wrapper, the logfiles are named as
+    # "toolname.wrapper_PID.valgrind_PID".
+    # Let's extract the list of wrapper_PIDs and name it ppids
+    ppids = set([int(f.split(".")[-2]) \
+                for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
+
+    analyzer = self.CreateAnalyzer()
+    if len(ppids) == 0:
+      # Fast path - no browser wrapper was set.
+      return analyzer.Report(filenames, None, check_sanity)
+
+    ret = 0
+    for ppid in ppids:
+      testcase_name = None
+      try:
+        f = open(self.log_dir + ("/testcase.%d.name" % ppid))
+        testcase_name = f.read().strip()
+        f.close()
+        wk_layout_prefix="third_party/WebKit/LayoutTests/"
+        wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
+        if wk_prefix_at != -1:
+          testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
+      except IOError:
+        pass
+      print "====================================================="
+      print " Below is the report for valgrind wrapper PID=%d." % ppid
+      if testcase_name:
+        print " It was used while running the `%s` test." % testcase_name
+      else:
+        print " You can find the corresponding test"
+        print " by searching the above log for 'PID=%d'" % ppid
+      sys.stdout.flush()
+
+      ppid_filenames = [f for f in filenames \
+                        if re.search("\.%d\.[0-9]+$" % ppid, f)]
+      # check_sanity won't work with browser wrappers
+      assert check_sanity == False
+      ret |= analyzer.Report(ppid_filenames, testcase_name)
+      print "====================================================="
+      sys.stdout.flush()
+
+    if ret != 0:
+      print ""
+      print "The Valgrind reports are grouped by test names."
+      print "Each test has its PID printed in the log when the test was run"
+      print "and at the beginning of its Valgrind report."
+      print "Hint: you can search for the reports by Ctrl+F -> `=#`"
+      sys.stdout.flush()
+
+    return ret
+
+class DrMemory(BaseTool):
+  """Dr.Memory
+  Dynamic memory error detector for Windows.
+
+  http://dev.chromium.org/developers/how-tos/using-drmemory
+  It is not very mature at the moment, some things might not work properly.
+  """
+
+  def __init__(self, full_mode, pattern_mode):
+    super(DrMemory, self).__init__()
+    self.full_mode = full_mode
+    self.pattern_mode = pattern_mode
+    self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
+
+  def ToolName(self):
+    return "drmemory"
+
+  def ExtendOptionParser(self, parser):
+    parser.add_option("", "--suppressions", default=[],
+                      action="append",
+                      help="path to a drmemory suppression file")
+    parser.add_option("", "--follow_python", action="store_true",
+                      default=False, dest="follow_python",
+                      help="Monitor python child processes.  If off, neither "
+                      "python children nor any children of python children "
+                      "will be monitored.")
+    parser.add_option("", "--indirect", action="store_true",
+                      default=False,
+                      help="set BROWSER_WRAPPER rather than "
+                           "running Dr. Memory directly on the harness")
+    parser.add_option("", "--indirect_webkit_layout", action="store_true",
+                      default=False,
+                      help="set --wrapper rather than running valgrind "
+                      "directly.")
+    parser.add_option("", "--use_debug", action="store_true",
+                      default=False, dest="use_debug",
+                      help="Run Dr. Memory debug build")
+    parser.add_option("", "--trace_children", action="store_true",
+                            default=True,
+                            help="TODO: default value differs from Valgrind")
+
+  def ToolCommand(self):
+    """Get the tool command to run."""
+    # WINHEAP is what Dr. Memory supports as there are issues w/ both
+    # jemalloc (https://github.com/DynamoRIO/drmemory/issues/320) and
+    # tcmalloc (https://github.com/DynamoRIO/drmemory/issues/314)
+    add_env = {
+      "CHROME_ALLOCATOR" : "WINHEAP",
+      "JSIMD_FORCEMMX"   : "1",  # https://github.com/DynamoRIO/drmemory/issues/540
+    }
+    for k,v in add_env.iteritems():
+      logging.info("export %s=%s", k, v)
+      os.putenv(k, v)
+
+    drmem_cmd = os.getenv("DRMEMORY_COMMAND")
+    if not drmem_cmd:
+      raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
+                          "with the path to drmemory.exe"
+    proc = drmem_cmd.split(" ")
+
+    # By default, don't run python (this will exclude python's children as well)
+    # to reduce runtime.  We're not really interested in spending time finding
+    # bugs in the python implementation.
+    # With file-based config we must update the file every time, and
+    # it will affect simultaneous drmem uses by this user.  While file-based
+    # config has many advantages, here we may want this-instance-only
+    # (https://github.com/DynamoRIO/drmemory/issues/334).
+    drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
+    drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
+    run_drconfig = True
+    if self._options.follow_python:
+      logging.info("Following python children")
+      # -unreg fails if not already registered so query for that first
+      query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
+      query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
+                                    shell=True)
+      (query_out, query_err) = query_proc.communicate()
+      if re.search("exe not registered", query_out):
+        run_drconfig = False # all set
+      else:
+        drconfig_cmd += ["-unreg", "python.exe"]
+    else:
+      logging.info("Excluding python children")
+      drconfig_cmd += ["-reg", "python.exe", "-norun"]
+    if run_drconfig:
+      drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
+      if drconfig_retcode:
+        logging.error("Configuring whether to follow python children failed " \
+                      "with %d.", drconfig_retcode)
+        raise RuntimeError, "Configuring python children failed "
+
+    suppression_count = 0
+    supp_files = self._options.suppressions
+    if self.full_mode:
+      supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
+    for suppression_file in supp_files:
+      if os.path.exists(suppression_file):
+        suppression_count += 1
+        proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
+
+    if not suppression_count:
+      logging.warning("WARNING: NOT USING SUPPRESSIONS!")
+
+    # Un-comment to dump Dr.Memory events on error
+    #proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
+
+    # Un-comment and comment next line to debug Dr.Memory
+    #proc += ["-dr_ops", "-no_hide"]
+    #proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
+    #Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
+    # Ensure we see messages about Dr. Memory crashing!
+    proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
+
+    if self._options.use_debug:
+      proc += ["-debug"]
+
+    proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
+
+    if self.log_parent_dir:
+      # gpu process on Windows Vista+ runs at Low Integrity and can only
+      # write to certain directories (http://crbug.com/119131)
+      symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
+    elif self._options.build_dir:
+      # The other case is only possible with -t cmdline.
+      # Anyways, if we omit -symcache_dir the -logdir's value is used which
+      # should be fine.
+      symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
+    if symcache_dir:
+      if not os.path.exists(symcache_dir):
+        try:
+          os.mkdir(symcache_dir)
+        except OSError:
+          logging.warning("Can't create symcache dir?")
+      if os.path.exists(symcache_dir):
+        proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
+
+    # Use -no_summary to suppress DrMemory's summary and init-time
+    # notifications.  We generate our own with drmemory_analyze.py.
+    proc += ["-batch", "-no_summary"]
+
+    # Un-comment to disable interleaved output.  Will also suppress error
+    # messages normally printed to stderr.
+    #proc += ["-quiet", "-no_results_to_stderr"]
+
+    proc += ["-callstack_max_frames", "40"]
+
+    # disable leak scan for now
+    proc += ["-no_count_leaks", "-no_leak_scan"]
+
+    # disable warnings about unaddressable prefetches
+    proc += ["-no_check_prefetch"]
+
+    # crbug.com/413215, no heap mismatch check for Windows release build binary
+    if common.IsWindows() and "Release" in self._options.build_dir:
+        proc += ["-no_check_delete_mismatch"]
+
+    # make callstacks easier to read
+    proc += ["-callstack_srcfile_prefix",
+             "build\\src,chromium\\src,crt_build\\self_x86"]
+    proc += ["-callstack_modname_hide",
+             "*drmemory*,chrome.dll"]
+
+    boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
+    # TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
+    proc += ["-callstack_truncate_below", ",".join(boring_callers)]
+
+    if self.pattern_mode:
+      proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
+    elif not self.full_mode:
+      proc += ["-light"]
+
+    proc += self._tool_flags
+
+    # Dr.Memory requires -- to separate tool flags from the executable name.
+    proc += ["--"]
+
+    if self._options.indirect or self._options.indirect_webkit_layout:
+      wrapper_path = os.path.join(self._source_dir,
+                                  "tools", "valgrind", "browser_wrapper_win.py")
+      wrapper = " ".join(["python", wrapper_path] + proc)
+      self.CreateBrowserWrapper(wrapper)
+      logging.info("browser wrapper = " + " ".join(proc))
+      if self._options.indirect_webkit_layout:
+        proc = self._args
+        # Layout tests want forward slashes.
+        wrapper = wrapper.replace('\\', '/')
+        proc += ["--wrapper", wrapper]
+        return proc
+      else:
+        proc = []
+
+    # Note that self._args begins with the name of the exe to be run.
+    self._args[0] = common.NormalizeWindowsPath(self._args[0])
+    proc += self._args
+    return proc
+
+  def CreateBrowserWrapper(self, command):
+    os.putenv("BROWSER_WRAPPER", command)
+
+  def Analyze(self, check_sanity=False):
+    # Use one analyzer for all the log files to avoid printing duplicate reports
+    #
+    # TODO(timurrrr): unify this with Valgrind and other tools when we have
+    # https://github.com/DynamoRIO/drmemory/issues/684
+    analyzer = drmemory_analyze.DrMemoryAnalyzer()
+
+    ret = 0
+    if not self._options.indirect and not self._options.indirect_webkit_layout:
+      filenames = glob.glob(self.log_dir + "/*/results.txt")
+
+      ret = analyzer.Report(filenames, None, check_sanity)
+    else:
+      testcases = glob.glob(self.log_dir + "/testcase.*.logs")
+      # If we have browser wrapper, the per-test logdirs are named as
+      # "testcase.wrapper_PID.name".
+      # Let's extract the list of wrapper_PIDs and name it ppids.
+      # NOTE: ppids may contain '_', i.e. they are not ints!
+      ppids = set([f.split(".")[-2] for f in testcases])
+
+      for ppid in ppids:
+        testcase_name = None
+        try:
+          f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
+          testcase_name = f.read().strip()
+          f.close()
+        except IOError:
+          pass
+        print "====================================================="
+        print " Below is the report for drmemory wrapper PID=%s." % ppid
+        if testcase_name:
+          print " It was used while running the `%s` test." % testcase_name
+        else:
+          # TODO(timurrrr): hm, the PID line is suppressed on Windows...
+          print " You can find the corresponding test"
+          print " by searching the above log for 'PID=%s'" % ppid
+        sys.stdout.flush()
+        ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
+                                   (self.log_dir, ppid))
+        ret |= analyzer.Report(ppid_filenames, testcase_name, False)
+        print "====================================================="
+        sys.stdout.flush()
+
+    logging.info("Please see http://dev.chromium.org/developers/how-tos/"
+                 "using-drmemory for the info on Dr. Memory")
+    return ret
+
+
+class ToolFactory:
+  def Create(self, tool_name):
+    if tool_name == "drmemory" or tool_name == "drmemory_light":
+      # TODO(timurrrr): remove support for "drmemory" when buildbots are
+      # switched to drmemory_light OR make drmemory==drmemory_full the default
+      # mode when the tool is mature enough.
+      return DrMemory(False, False)
+    if tool_name == "drmemory_full":
+      return DrMemory(True, False)
+    if tool_name == "drmemory_pattern":
+      return DrMemory(False, True)
+    try:
+      platform_name = common.PlatformNames()[0]
+    except common.NotImplementedError:
+      platform_name = sys.platform + "(Unknown)"
+    raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
+                                                                 platform_name)
+
+def CreateTool(tool):
+  return ToolFactory().Create(tool)