diff --git a/googlemock/README.md b/googlemock/README.md
index 55c4b832..183fdb81 100644
--- a/googlemock/README.md
+++ b/googlemock/README.md
@@ -1,37 +1,44 @@
 # Googletest Mocking (gMock) Framework
 
 ### Overview
 
 Google's framework for writing and using C++ mock classes. It can help you
 derive better designs of your system and write better tests.
 
 It is inspired by:
 
 *   [jMock](http://www.jmock.org/),
 *   [EasyMock](http://www.easymock.org/), and
 *   [Hamcrest](http://code.google.com/p/hamcrest/),
 
 and designed with C++'s specifics in mind.
 
 gMock:
 
 -   provides a declarative syntax for defining mocks,
 -   can define partial (hybrid) mocks, which are a cross of real and mock
     objects,
 -   handles functions of arbitrary types and overloaded functions,
 -   comes with a rich set of matchers for validating function arguments,
 -   uses an intuitive syntax for controlling the behavior of a mock,
 -   does automatic verification of expectations (no record-and-replay needed),
 -   allows arbitrary (partial) ordering constraints on function calls to be
     expressed,
 -   lets a user extend it by defining new matchers and actions.
 -   does not use exceptions, and
 -   is easy to learn and use.
 
+Details and examples can be found here:
+
+*   [gMock for Dummies](docs/for_dummies.md)
+*   [Legacy gMock FAQ](docs/gmock_faq.md)
+*   [gMock Cookbook](docs/cook_book.md)
+*   [gMock Cheat Sheet](docs/cheat_sheet.md)
+
 Please note that code under scripts/generator/ is from the [cppclean
 project](http://code.google.com/p/cppclean/) and under the Apache
 License, which is different from Google Mock's license.
 
 Google Mock is a part of
 [Google Test C++ testing framework](http://github.com/google/googletest/) and a
 subject to the same requirements.
diff --git a/googlemock/scripts/upload.py b/googlemock/scripts/upload.py
index 4b574732..95239dc2 100755
--- a/googlemock/scripts/upload.py
+++ b/googlemock/scripts/upload.py
@@ -1,1387 +1,1387 @@
 #!/usr/bin/env python
 #
 # Copyright 2007 Google Inc.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
 #     http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
 """Tool for uploading diffs from a version control system to the codereview app.
 
 Usage summary: upload.py [options] [-- diff_options]
 
 Diff options are passed to the diff command of the underlying system.
 
 Supported version control systems:
   Git
   Mercurial
   Subversion
 
 It is important for Git/Mercurial users to specify a tree/node/branch to diff
 against by using the '--rev' option.
 """
 # This code is derived from appcfg.py in the App Engine SDK (open source),
 # and from ASPN recipe #146306.
 
 import cookielib
 import getpass
 import logging
 import md5
 import mimetypes
 import optparse
 import os
 import re
 import socket
 import subprocess
 import sys
 import urllib
 import urllib2
 import urlparse
 
 try:
   import readline
 except ImportError:
   pass
 
 # The logging verbosity:
 #  0: Errors only.
 #  1: Status messages.
 #  2: Info logs.
 #  3: Debug logs.
 verbosity = 1
 
 # Max size of patch or base file.
 MAX_UPLOAD_SIZE = 900 * 1024
 
 
 def GetEmail(prompt):
   """Prompts the user for their email address and returns it.
 
   The last used email address is saved to a file and offered up as a suggestion
   to the user. If the user presses enter without typing in anything the last
   used email address is used. If the user enters a new address, it is saved
   for next time we prompt.
 
   """
   last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
   last_email = ""
   if os.path.exists(last_email_file_name):
     try:
       last_email_file = open(last_email_file_name, "r")
       last_email = last_email_file.readline().strip("\n")
       last_email_file.close()
       prompt += " [%s]" % last_email
     except IOError, e:
       pass
   email = raw_input(prompt + ": ").strip()
   if email:
     try:
       last_email_file = open(last_email_file_name, "w")
       last_email_file.write(email)
       last_email_file.close()
     except IOError, e:
       pass
   else:
     email = last_email
   return email
 
 
 def StatusUpdate(msg):
   """Print a status message to stdout.
 
   If 'verbosity' is greater than 0, print the message.
 
   Args:
     msg: The string to print.
   """
   if verbosity > 0:
     print msg
 
 
 def ErrorExit(msg):
   """Print an error message to stderr and exit."""
   print >>sys.stderr, msg
   sys.exit(1)
 
 
 class ClientLoginError(urllib2.HTTPError):
   """Raised to indicate there was an error authenticating with ClientLogin."""
 
   def __init__(self, url, code, msg, headers, args):
     urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
     self.args = args
     self.reason = args["Error"]
 
 
 class AbstractRpcServer(object):
   """Provides a common interface for a simple RPC server."""
 
   def __init__(self, host, auth_function, host_override=None, extra_headers={},
                save_cookies=False):
     """Creates a new HttpRpcServer.
 
     Args:
       host: The host to send requests to.
       auth_function: A function that takes no arguments and returns an
         (email, password) tuple when called. Will be called if authentication
         is required.
       host_override: The host header to send to the server (defaults to host).
       extra_headers: A dict of extra headers to append to every request.
       save_cookies: If True, save the authentication cookies to local disk.
         If False, use an in-memory cookiejar instead.  Subclasses must
         implement this functionality.  Defaults to False.
     """
     self.host = host
     self.host_override = host_override
     self.auth_function = auth_function
     self.authenticated = False
     self.extra_headers = extra_headers
     self.save_cookies = save_cookies
     self.opener = self._GetOpener()
     if self.host_override:
       logging.info("Server: %s; Host: %s", self.host, self.host_override)
     else:
       logging.info("Server: %s", self.host)
 
   def _GetOpener(self):
     """Returns an OpenerDirector for making HTTP requests.
 
     Returns:
       A urllib2.OpenerDirector object.
     """
     raise NotImplementedError()
 
   def _CreateRequest(self, url, data=None):
     """Creates a new urllib request."""
     logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
     req = urllib2.Request(url, data=data)
     if self.host_override:
       req.add_header("Host", self.host_override)
     for key, value in self.extra_headers.iteritems():
       req.add_header(key, value)
     return req
 
   def _GetAuthToken(self, email, password):
     """Uses ClientLogin to authenticate the user, returning an auth token.
 
     Args:
       email:    The user's email address
       password: The user's password
 
     Raises:
       ClientLoginError: If there was an error authenticating with ClientLogin.
       HTTPError: If there was some other form of HTTP error.
 
     Returns:
       The authentication token returned by ClientLogin.
     """
     account_type = "GOOGLE"
     if self.host.endswith(".google.com"):
       # Needed for use inside Google.
       account_type = "HOSTED"
     req = self._CreateRequest(
         url="https://www.google.com/accounts/ClientLogin",
         data=urllib.urlencode({
             "Email": email,
             "Passwd": password,
             "service": "ah",
             "source": "rietveld-codereview-upload",
             "accountType": account_type,
         }),
     )
     try:
       response = self.opener.open(req)
       response_body = response.read()
       response_dict = dict(x.split("=")
                            for x in response_body.split("\n") if x)
       return response_dict["Auth"]
     except urllib2.HTTPError, e:
       if e.code == 403:
         body = e.read()
         response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
         raise ClientLoginError(req.get_full_url(), e.code, e.msg,
                                e.headers, response_dict)
       else:
         raise
 
   def _GetAuthCookie(self, auth_token):
     """Fetches authentication cookies for an authentication token.
 
     Args:
       auth_token: The authentication token returned by ClientLogin.
 
     Raises:
       HTTPError: If there was an error fetching the authentication cookies.
     """
     # This is a dummy value to allow us to identify when we're successful.
     continue_location = "http://localhost/"
     args = {"continue": continue_location, "auth": auth_token}
     req = self._CreateRequest("http://%s/_ah/login?%s" %
                               (self.host, urllib.urlencode(args)))
     try:
       response = self.opener.open(req)
     except urllib2.HTTPError, e:
       response = e
     if (response.code != 302 or
         response.info()["location"] != continue_location):
       raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
                               response.headers, response.fp)
     self.authenticated = True
 
   def _Authenticate(self):
     """Authenticates the user.
 
     The authentication process works as follows:
      1) We get a username and password from the user
      2) We use ClientLogin to obtain an AUTH token for the user
         (see https://developers.google.com/identity/protocols/AuthForInstalledApps).
      3) We pass the auth token to /_ah/login on the server to obtain an
         authentication cookie. If login was successful, it tries to redirect
         us to the URL we provided.
 
     If we attempt to access the upload API without first obtaining an
     authentication cookie, it returns a 401 response and directs us to
     authenticate ourselves with ClientLogin.
     """
     for i in range(3):
       credentials = self.auth_function()
       try:
         auth_token = self._GetAuthToken(credentials[0], credentials[1])
       except ClientLoginError, e:
         if e.reason == "BadAuthentication":
           print >>sys.stderr, "Invalid username or password."
           continue
         if e.reason == "CaptchaRequired":
           print >>sys.stderr, (
               "Please go to\n"
               "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
               "and verify you are a human.  Then try again.")
           break
         if e.reason == "NotVerified":
           print >>sys.stderr, "Account not verified."
           break
         if e.reason == "TermsNotAgreed":
           print >>sys.stderr, "User has not agreed to TOS."
           break
         if e.reason == "AccountDeleted":
           print >>sys.stderr, "The user account has been deleted."
           break
         if e.reason == "AccountDisabled":
           print >>sys.stderr, "The user account has been disabled."
           break
         if e.reason == "ServiceDisabled":
           print >>sys.stderr, ("The user's access to the service has been "
                                "disabled.")
           break
         if e.reason == "ServiceUnavailable":
           print >>sys.stderr, "The service is not available; try again later."
           break
         raise
       self._GetAuthCookie(auth_token)
       return
 
   def Send(self, request_path, payload=None,
            content_type="application/octet-stream",
            timeout=None,
            **kwargs):
     """Sends an RPC and returns the response.
 
     Args:
       request_path: The path to send the request to, eg /api/appversion/create.
       payload: The body of the request, or None to send an empty request.
       content_type: The Content-Type header to use.
       timeout: timeout in seconds; default None i.e. no timeout.
         (Note: for large requests on OS X, the timeout doesn't work right.)
       kwargs: Any keyword arguments are converted into query string parameters.
 
     Returns:
       The response body, as a string.
     """
     # TODO: Don't require authentication.  Let the server say
     # whether it is necessary.
     if not self.authenticated:
       self._Authenticate()
 
     old_timeout = socket.getdefaulttimeout()
     socket.setdefaulttimeout(timeout)
     try:
       tries = 0
       while True:
         tries += 1
         args = dict(kwargs)
         url = "http://%s%s" % (self.host, request_path)
         if args:
           url += "?" + urllib.urlencode(args)
         req = self._CreateRequest(url=url, data=payload)
         req.add_header("Content-Type", content_type)
         try:
           f = self.opener.open(req)
           response = f.read()
           f.close()
           return response
         except urllib2.HTTPError, e:
           if tries > 3:
             raise
           elif e.code == 401:
             self._Authenticate()
 ##           elif e.code >= 500 and e.code < 600:
 ##             # Server Error - try again.
 ##             continue
           else:
             raise
     finally:
       socket.setdefaulttimeout(old_timeout)
 
 
 class HttpRpcServer(AbstractRpcServer):
   """Provides a simplified RPC-style interface for HTTP requests."""
 
   def _Authenticate(self):
     """Save the cookie jar after authentication."""
     super(HttpRpcServer, self)._Authenticate()
     if self.save_cookies:
       StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
       self.cookie_jar.save()
 
   def _GetOpener(self):
     """Returns an OpenerDirector that supports cookies and ignores redirects.
 
     Returns:
       A urllib2.OpenerDirector object.
     """
     opener = urllib2.OpenerDirector()
     opener.add_handler(urllib2.ProxyHandler())
     opener.add_handler(urllib2.UnknownHandler())
     opener.add_handler(urllib2.HTTPHandler())
     opener.add_handler(urllib2.HTTPDefaultErrorHandler())
     opener.add_handler(urllib2.HTTPSHandler())
     opener.add_handler(urllib2.HTTPErrorProcessor())
     if self.save_cookies:
       self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
       self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
       if os.path.exists(self.cookie_file):
         try:
           self.cookie_jar.load()
           self.authenticated = True
           StatusUpdate("Loaded authentication cookies from %s" %
                        self.cookie_file)
         except (cookielib.LoadError, IOError):
           # Failed to load cookies - just ignore them.
           pass
       else:
         # Create an empty cookie file with mode 600
         fd = os.open(self.cookie_file, os.O_CREAT, 0600)
         os.close(fd)
       # Always chmod the cookie file
       os.chmod(self.cookie_file, 0600)
     else:
       # Don't save cookies across runs of update.py.
       self.cookie_jar = cookielib.CookieJar()
     opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
     return opener
 
 
 parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
 parser.add_option("-y", "--assume_yes", action="store_true",
                   dest="assume_yes", default=False,
                   help="Assume that the answer to yes/no questions is 'yes'.")
 # Logging
 group = parser.add_option_group("Logging options")
 group.add_option("-q", "--quiet", action="store_const", const=0,
                  dest="verbose", help="Print errors only.")
 group.add_option("-v", "--verbose", action="store_const", const=2,
                  dest="verbose", default=1,
                  help="Print info level logs (default).")
 group.add_option("--noisy", action="store_const", const=3,
                  dest="verbose", help="Print all logs.")
 # Review server
 group = parser.add_option_group("Review server options")
 group.add_option("-s", "--server", action="store", dest="server",
                  default="codereview.appspot.com",
                  metavar="SERVER",
                  help=("The server to upload to. The format is host[:port]. "
                        "Defaults to 'codereview.appspot.com'."))
 group.add_option("-e", "--email", action="store", dest="email",
                  metavar="EMAIL", default=None,
                  help="The username to use. Will prompt if omitted.")
 group.add_option("-H", "--host", action="store", dest="host",
                  metavar="HOST", default=None,
                  help="Overrides the Host header sent with all RPCs.")
 group.add_option("--no_cookies", action="store_false",
                  dest="save_cookies", default=True,
                  help="Do not save authentication cookies to local disk.")
 # Issue
 group = parser.add_option_group("Issue options")
 group.add_option("-d", "--description", action="store", dest="description",
                  metavar="DESCRIPTION", default=None,
                  help="Optional description when creating an issue.")
 group.add_option("-f", "--description_file", action="store",
                  dest="description_file", metavar="DESCRIPTION_FILE",
                  default=None,
                  help="Optional path of a file that contains "
                       "the description when creating an issue.")
 group.add_option("-r", "--reviewers", action="store", dest="reviewers",
                  metavar="REVIEWERS", default=None,
                  help="Add reviewers (comma separated email addresses).")
 group.add_option("--cc", action="store", dest="cc",
                  metavar="CC", default=None,
                  help="Add CC (comma separated email addresses).")
 # Upload options
 group = parser.add_option_group("Patch options")
 group.add_option("-m", "--message", action="store", dest="message",
                  metavar="MESSAGE", default=None,
                  help="A message to identify the patch. "
                       "Will prompt if omitted.")
 group.add_option("-i", "--issue", type="int", action="store",
                  metavar="ISSUE", default=None,
                  help="Issue number to which to add. Defaults to new issue.")
 group.add_option("--download_base", action="store_true",
                  dest="download_base", default=False,
                  help="Base files will be downloaded by the server "
                  "(side-by-side diffs may not work on files with CRs).")
 group.add_option("--rev", action="store", dest="revision",
                  metavar="REV", default=None,
                  help="Branch/tree/revision to diff against (used by DVCS).")
 group.add_option("--send_mail", action="store_true",
                  dest="send_mail", default=False,
                  help="Send notification email to reviewers.")
 
 
 def GetRpcServer(options):
   """Returns an instance of an AbstractRpcServer.
 
   Returns:
     A new AbstractRpcServer, on which RPC calls can be made.
   """
 
   rpc_server_class = HttpRpcServer
 
   def GetUserCredentials():
     """Prompts the user for a username and password."""
     email = options.email
     if email is None:
       email = GetEmail("Email (login for uploading to %s)" % options.server)
     password = getpass.getpass("Password for %s: " % email)
     return (email, password)
 
   # If this is the dev_appserver, use fake authentication.
   host = (options.host or options.server).lower()
   if host == "localhost" or host.startswith("localhost:"):
     email = options.email
     if email is None:
       email = "test@example.com"
       logging.info("Using debug user %s.  Override with --email" % email)
     server = rpc_server_class(
         options.server,
         lambda: (email, "password"),
         host_override=options.host,
         extra_headers={"Cookie":
                        'dev_appserver_login="%s:False"' % email},
         save_cookies=options.save_cookies)
     # Don't try to talk to ClientLogin.
     server.authenticated = True
     return server
 
   return rpc_server_class(options.server, GetUserCredentials,
                           host_override=options.host,
                           save_cookies=options.save_cookies)
 
 
 def EncodeMultipartFormData(fields, files):
   """Encode form fields for multipart/form-data.
 
   Args:
     fields: A sequence of (name, value) elements for regular form fields.
     files: A sequence of (name, filename, value) elements for data to be
            uploaded as files.
   Returns:
     (content_type, body) ready for httplib.HTTP instance.
 
   Source:
     https://web.archive.org/web/20160116052001/code.activestate.com/recipes/146306
   """
   BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
   CRLF = '\r\n'
   lines = []
   for (key, value) in fields:
     lines.append('--' + BOUNDARY)
     lines.append('Content-Disposition: form-data; name="%s"' % key)
     lines.append('')
     lines.append(value)
   for (key, filename, value) in files:
     lines.append('--' + BOUNDARY)
     lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
              (key, filename))
     lines.append('Content-Type: %s' % GetContentType(filename))
     lines.append('')
     lines.append(value)
   lines.append('--' + BOUNDARY + '--')
   lines.append('')
   body = CRLF.join(lines)
   content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
   return content_type, body
 
 
 def GetContentType(filename):
   """Helper to guess the content-type from the filename."""
   return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
 
 
 # Use a shell for subcommands on Windows to get a PATH search.
 use_shell = sys.platform.startswith("win")
 
 def RunShellWithReturnCode(command, print_output=False,
                            universal_newlines=True):
   """Executes a command and returns the output from stdout and the return code.
 
   Args:
     command: Command to execute.
     print_output: If True, the output is printed to stdout.
                   If False, both stdout and stderr are ignored.
     universal_newlines: Use universal_newlines flag (default: True).
 
   Returns:
     Tuple (output, return code)
   """
   logging.info("Running %s", command)
   p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                        shell=use_shell, universal_newlines=universal_newlines)
   if print_output:
     output_array = []
     while True:
       line = p.stdout.readline()
       if not line:
         break
       print line.strip("\n")
       output_array.append(line)
     output = "".join(output_array)
   else:
     output = p.stdout.read()
   p.wait()
   errout = p.stderr.read()
   if print_output and errout:
     print >>sys.stderr, errout
   p.stdout.close()
   p.stderr.close()
   return output, p.returncode
 
 
 def RunShell(command, silent_ok=False, universal_newlines=True,
              print_output=False):
   data, retcode = RunShellWithReturnCode(command, print_output,
                                          universal_newlines)
   if retcode:
     ErrorExit("Got error status from %s:\n%s" % (command, data))
   if not silent_ok and not data:
     ErrorExit("No output from %s" % command)
   return data
 
 
 class VersionControlSystem(object):
   """Abstract base class providing an interface to the VCS."""
 
   def __init__(self, options):
     """Constructor.
 
     Args:
       options: Command line options.
     """
     self.options = options
 
   def GenerateDiff(self, args):
     """Return the current diff as a string.
 
     Args:
       args: Extra arguments to pass to the diff command.
     """
     raise NotImplementedError(
         "abstract method -- subclass %s must override" % self.__class__)
 
   def GetUnknownFiles(self):
     """Return a list of files unknown to the VCS."""
     raise NotImplementedError(
         "abstract method -- subclass %s must override" % self.__class__)
 
   def CheckForUnknownFiles(self):
     """Show an "are you sure?" prompt if there are unknown files."""
     unknown_files = self.GetUnknownFiles()
     if unknown_files:
       print "The following files are not added to version control:"
       for line in unknown_files:
         print line
       prompt = "Are you sure to continue?(y/N) "
       answer = raw_input(prompt).strip()
       if answer != "y":
         ErrorExit("User aborted")
 
   def GetBaseFile(self, filename):
     """Get the content of the upstream version of a file.
 
     Returns:
       A tuple (base_content, new_content, is_binary, status)
         base_content: The contents of the base file.
         new_content: For text files, this is empty.  For binary files, this is
           the contents of the new file, since the diff output won't contain
           information to reconstruct the current file.
-        is_binary: True if the file is binary.
+        is_binary: True iff the file is binary.
         status: The status of the file.
     """
 
     raise NotImplementedError(
         "abstract method -- subclass %s must override" % self.__class__)
 
 
   def GetBaseFiles(self, diff):
     """Helper that calls GetBase file for each file in the patch.
 
     Returns:
       A dictionary that maps from filename to GetBaseFile's tuple.  Filenames
       are retrieved based on lines that start with "Index:" or
       "Property changes on:".
     """
     files = {}
     for line in diff.splitlines(True):
       if line.startswith('Index:') or line.startswith('Property changes on:'):
         unused, filename = line.split(':', 1)
         # On Windows if a file has property changes its filename uses '\'
         # instead of '/'.
         filename = filename.strip().replace('\\', '/')
         files[filename] = self.GetBaseFile(filename)
     return files
 
 
   def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
                       files):
     """Uploads the base files (and if necessary, the current ones as well)."""
 
     def UploadFile(filename, file_id, content, is_binary, status, is_base):
       """Uploads a file to the server."""
       file_too_large = False
       if is_base:
         type = "base"
       else:
         type = "current"
       if len(content) > MAX_UPLOAD_SIZE:
         print ("Not uploading the %s file for %s because it's too large." %
                (type, filename))
         file_too_large = True
         content = ""
       checksum = md5.new(content).hexdigest()
       if options.verbose > 0 and not file_too_large:
         print "Uploading %s file for %s" % (type, filename)
       url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
       form_fields = [("filename", filename),
                      ("status", status),
                      ("checksum", checksum),
                      ("is_binary", str(is_binary)),
                      ("is_current", str(not is_base)),
                     ]
       if file_too_large:
         form_fields.append(("file_too_large", "1"))
       if options.email:
         form_fields.append(("user", options.email))
       ctype, body = EncodeMultipartFormData(form_fields,
                                             [("data", filename, content)])
       response_body = rpc_server.Send(url, body,
                                       content_type=ctype)
       if not response_body.startswith("OK"):
         StatusUpdate("  --> %s" % response_body)
         sys.exit(1)
 
     patches = dict()
     [patches.setdefault(v, k) for k, v in patch_list]
     for filename in patches.keys():
       base_content, new_content, is_binary, status = files[filename]
       file_id_str = patches.get(filename)
       if file_id_str.find("nobase") != -1:
         base_content = None
         file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
       file_id = int(file_id_str)
       if base_content != None:
         UploadFile(filename, file_id, base_content, is_binary, status, True)
       if new_content != None:
         UploadFile(filename, file_id, new_content, is_binary, status, False)
 
   def IsImage(self, filename):
     """Returns true if the filename has an image extension."""
     mimetype =  mimetypes.guess_type(filename)[0]
     if not mimetype:
       return False
     return mimetype.startswith("image/")
 
 
 class SubversionVCS(VersionControlSystem):
   """Implementation of the VersionControlSystem interface for Subversion."""
 
   def __init__(self, options):
     super(SubversionVCS, self).__init__(options)
     if self.options.revision:
       match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
       if not match:
         ErrorExit("Invalid Subversion revision %s." % self.options.revision)
       self.rev_start = match.group(1)
       self.rev_end = match.group(3)
     else:
       self.rev_start = self.rev_end = None
     # Cache output from "svn list -r REVNO dirname".
     # Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
     self.svnls_cache = {}
     # SVN base URL is required to fetch files deleted in an older revision.
     # Result is cached to not guess it over and over again in GetBaseFile().
     required = self.options.download_base or self.options.revision is not None
     self.svn_base = self._GuessBase(required)
 
   def GuessBase(self, required):
     """Wrapper for _GuessBase."""
     return self.svn_base
 
   def _GuessBase(self, required):
     """Returns the SVN base URL.
 
     Args:
       required: If true, exits if the url can't be guessed, otherwise None is
         returned.
     """
     info = RunShell(["svn", "info"])
     for line in info.splitlines():
       words = line.split()
       if len(words) == 2 and words[0] == "URL:":
         url = words[1]
         scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
         username, netloc = urllib.splituser(netloc)
         if username:
           logging.info("Removed username from base URL")
         if netloc.endswith("svn.python.org"):
           if netloc == "svn.python.org":
             if path.startswith("/projects/"):
               path = path[9:]
           elif netloc != "pythondev@svn.python.org":
             ErrorExit("Unrecognized Python URL: %s" % url)
           base = "http://svn.python.org/view/*checkout*%s/" % path
           logging.info("Guessed Python base = %s", base)
         elif netloc.endswith("svn.collab.net"):
           if path.startswith("/repos/"):
             path = path[6:]
           base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
           logging.info("Guessed CollabNet base = %s", base)
         elif netloc.endswith(".googlecode.com"):
           path = path + "/"
           base = urlparse.urlunparse(("http", netloc, path, params,
                                       query, fragment))
           logging.info("Guessed Google Code base = %s", base)
         else:
           path = path + "/"
           base = urlparse.urlunparse((scheme, netloc, path, params,
                                       query, fragment))
           logging.info("Guessed base = %s", base)
         return base
     if required:
       ErrorExit("Can't find URL in output from svn info")
     return None
 
   def GenerateDiff(self, args):
     cmd = ["svn", "diff"]
     if self.options.revision:
       cmd += ["-r", self.options.revision]
     cmd.extend(args)
     data = RunShell(cmd)
     count = 0
     for line in data.splitlines():
       if line.startswith("Index:") or line.startswith("Property changes on:"):
         count += 1
         logging.info(line)
     if not count:
       ErrorExit("No valid patches found in output from svn diff")
     return data
 
   def _CollapseKeywords(self, content, keyword_str):
     """Collapses SVN keywords."""
     # svn cat translates keywords but svn diff doesn't. As a result of this
     # behavior patching.PatchChunks() fails with a chunk mismatch error.
     # This part was originally written by the Review Board development team
     # who had the same problem (https://reviews.reviewboard.org/r/276/).
     # Mapping of keywords to known aliases
     svn_keywords = {
       # Standard keywords
       'Date':                ['Date', 'LastChangedDate'],
       'Revision':            ['Revision', 'LastChangedRevision', 'Rev'],
       'Author':              ['Author', 'LastChangedBy'],
       'HeadURL':             ['HeadURL', 'URL'],
       'Id':                  ['Id'],
 
       # Aliases
       'LastChangedDate':     ['LastChangedDate', 'Date'],
       'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
       'LastChangedBy':       ['LastChangedBy', 'Author'],
       'URL':                 ['URL', 'HeadURL'],
     }
 
     def repl(m):
        if m.group(2):
          return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
        return "$%s$" % m.group(1)
     keywords = [keyword
                 for name in keyword_str.split(" ")
                 for keyword in svn_keywords.get(name, [])]
     return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
 
   def GetUnknownFiles(self):
     status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
     unknown_files = []
     for line in status.split("\n"):
       if line and line[0] == "?":
         unknown_files.append(line)
     return unknown_files
 
   def ReadFile(self, filename):
     """Returns the contents of a file."""
     file = open(filename, 'rb')
     result = ""
     try:
       result = file.read()
     finally:
       file.close()
     return result
 
   def GetStatus(self, filename):
     """Returns the status of a file."""
     if not self.options.revision:
       status = RunShell(["svn", "status", "--ignore-externals", filename])
       if not status:
         ErrorExit("svn status returned no output for %s" % filename)
       status_lines = status.splitlines()
       # If file is in a cl, the output will begin with
       # "\n--- Changelist 'cl_name':\n".  See
       # https://web.archive.org/web/20090918234815/svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
       if (len(status_lines) == 3 and
           not status_lines[0] and
           status_lines[1].startswith("--- Changelist")):
         status = status_lines[2]
       else:
         status = status_lines[0]
     # If we have a revision to diff against we need to run "svn list"
     # for the old and the new revision and compare the results to get
     # the correct status for a file.
     else:
       dirname, relfilename = os.path.split(filename)
       if dirname not in self.svnls_cache:
         cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
         out, returncode = RunShellWithReturnCode(cmd)
         if returncode:
           ErrorExit("Failed to get status for %s." % filename)
         old_files = out.splitlines()
         args = ["svn", "list"]
         if self.rev_end:
           args += ["-r", self.rev_end]
         cmd = args + [dirname or "."]
         out, returncode = RunShellWithReturnCode(cmd)
         if returncode:
           ErrorExit("Failed to run command %s" % cmd)
         self.svnls_cache[dirname] = (old_files, out.splitlines())
       old_files, new_files = self.svnls_cache[dirname]
       if relfilename in old_files and relfilename not in new_files:
         status = "D   "
       elif relfilename in old_files and relfilename in new_files:
         status = "M   "
       else:
         status = "A   "
     return status
 
   def GetBaseFile(self, filename):
     status = self.GetStatus(filename)
     base_content = None
     new_content = None
 
     # If a file is copied its status will be "A  +", which signifies
     # "addition-with-history".  See "svn st" for more information.  We need to
     # upload the original file or else diff parsing will fail if the file was
     # edited.
     if status[0] == "A" and status[3] != "+":
       # We'll need to upload the new content if we're adding a binary file
       # since diff's output won't contain it.
       mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
                           silent_ok=True)
       base_content = ""
       is_binary = mimetype and not mimetype.startswith("text/")
       if is_binary and self.IsImage(filename):
         new_content = self.ReadFile(filename)
     elif (status[0] in ("M", "D", "R") or
           (status[0] == "A" and status[3] == "+") or  # Copied file.
           (status[0] == " " and status[1] == "M")):  # Property change.
       args = []
       if self.options.revision:
         url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
       else:
         # Don't change filename, it's needed later.
         url = filename
         args += ["-r", "BASE"]
       cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
       mimetype, returncode = RunShellWithReturnCode(cmd)
       if returncode:
         # File does not exist in the requested revision.
         # Reset mimetype, it contains an error message.
         mimetype = ""
       get_base = False
       is_binary = mimetype and not mimetype.startswith("text/")
       if status[0] == " ":
         # Empty base content just to force an upload.
         base_content = ""
       elif is_binary:
         if self.IsImage(filename):
           get_base = True
           if status[0] == "M":
             if not self.rev_end:
               new_content = self.ReadFile(filename)
             else:
               url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
               new_content = RunShell(["svn", "cat", url],
                                      universal_newlines=True, silent_ok=True)
         else:
           base_content = ""
       else:
         get_base = True
 
       if get_base:
         if is_binary:
           universal_newlines = False
         else:
           universal_newlines = True
         if self.rev_start:
           # "svn cat -r REV delete_file.txt" doesn't work. cat requires
           # the full URL with "@REV" appended instead of using "-r" option.
           url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
           base_content = RunShell(["svn", "cat", url],
                                   universal_newlines=universal_newlines,
                                   silent_ok=True)
         else:
           base_content = RunShell(["svn", "cat", filename],
                                   universal_newlines=universal_newlines,
                                   silent_ok=True)
         if not is_binary:
           args = []
           if self.rev_start:
             url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
           else:
             url = filename
             args += ["-r", "BASE"]
           cmd = ["svn"] + args + ["propget", "svn:keywords", url]
           keywords, returncode = RunShellWithReturnCode(cmd)
           if keywords and not returncode:
             base_content = self._CollapseKeywords(base_content, keywords)
     else:
       StatusUpdate("svn status returned unexpected output: %s" % status)
       sys.exit(1)
     return base_content, new_content, is_binary, status[0:5]
 
 
 class GitVCS(VersionControlSystem):
   """Implementation of the VersionControlSystem interface for Git."""
 
   def __init__(self, options):
     super(GitVCS, self).__init__(options)
     # Map of filename -> hash of base file.
     self.base_hashes = {}
 
   def GenerateDiff(self, extra_args):
     # This is more complicated than svn's GenerateDiff because we must convert
     # the diff output to include an svn-style "Index:" line as well as record
     # the hashes of the base files, so we can upload them along with our diff.
     if self.options.revision:
       extra_args = [self.options.revision] + extra_args
     gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
     svndiff = []
     filecount = 0
     filename = None
     for line in gitdiff.splitlines():
       match = re.match(r"diff --git a/(.*) b/.*$", line)
       if match:
         filecount += 1
         filename = match.group(1)
         svndiff.append("Index: %s\n" % filename)
       else:
         # The "index" line in a git diff looks like this (long hashes elided):
         #   index 82c0d44..b2cee3f 100755
         # We want to save the left hash, as that identifies the base file.
         match = re.match(r"index (\w+)\.\.", line)
         if match:
           self.base_hashes[filename] = match.group(1)
       svndiff.append(line + "\n")
     if not filecount:
       ErrorExit("No valid patches found in output from git diff")
     return "".join(svndiff)
 
   def GetUnknownFiles(self):
     status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
                       silent_ok=True)
     return status.splitlines()
 
   def GetBaseFile(self, filename):
     hash = self.base_hashes[filename]
     base_content = None
     new_content = None
     is_binary = False
     if hash == "0" * 40:  # All-zero hash indicates no base file.
       status = "A"
       base_content = ""
     else:
       status = "M"
       base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
       if returncode:
         ErrorExit("Got error status from 'git show %s'" % hash)
     return (base_content, new_content, is_binary, status)
 
 
 class MercurialVCS(VersionControlSystem):
   """Implementation of the VersionControlSystem interface for Mercurial."""
 
   def __init__(self, options, repo_dir):
     super(MercurialVCS, self).__init__(options)
     # Absolute path to repository (we can be in a subdir)
     self.repo_dir = os.path.normpath(repo_dir)
     # Compute the subdir
     cwd = os.path.normpath(os.getcwd())
     assert cwd.startswith(self.repo_dir)
     self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
     if self.options.revision:
       self.base_rev = self.options.revision
     else:
       self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
 
   def _GetRelPath(self, filename):
     """Get relative path of a file according to the current directory,
     given its logical path in the repo."""
     assert filename.startswith(self.subdir), filename
     return filename[len(self.subdir):].lstrip(r"\/")
 
   def GenerateDiff(self, extra_args):
     # If no file specified, restrict to the current subdir
     extra_args = extra_args or ["."]
     cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
     data = RunShell(cmd, silent_ok=True)
     svndiff = []
     filecount = 0
     for line in data.splitlines():
       m = re.match("diff --git a/(\S+) b/(\S+)", line)
       if m:
         # Modify line to make it look like as it comes from svn diff.
         # With this modification no changes on the server side are required
         # to make upload.py work with Mercurial repos.
         # NOTE: for proper handling of moved/copied files, we have to use
         # the second filename.
         filename = m.group(2)
         svndiff.append("Index: %s" % filename)
         svndiff.append("=" * 67)
         filecount += 1
         logging.info(line)
       else:
         svndiff.append(line)
     if not filecount:
       ErrorExit("No valid patches found in output from hg diff")
     return "\n".join(svndiff) + "\n"
 
   def GetUnknownFiles(self):
     """Return a list of files unknown to the VCS."""
     args = []
     status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
         silent_ok=True)
     unknown_files = []
     for line in status.splitlines():
       st, fn = line.split(" ", 1)
       if st == "?":
         unknown_files.append(fn)
     return unknown_files
 
   def GetBaseFile(self, filename):
     # "hg status" and "hg cat" both take a path relative to the current subdir
     # rather than to the repo root, but "hg diff" has given us the full path
     # to the repo root.
     base_content = ""
     new_content = None
     is_binary = False
     oldrelpath = relpath = self._GetRelPath(filename)
     # "hg status -C" returns two lines for moved/copied files, one otherwise
     out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
     out = out.splitlines()
     # HACK: strip error message about missing file/directory if it isn't in
     # the working copy
     if out[0].startswith('%s: ' % relpath):
       out = out[1:]
     if len(out) > 1:
       # Moved/copied => considered as modified, use old filename to
       # retrieve base contents
       oldrelpath = out[1].strip()
       status = "M"
     else:
       status, _ = out[0].split(' ', 1)
     if status != "A":
       base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
         silent_ok=True)
       is_binary = "\0" in base_content  # Mercurial's heuristic
     if status != "R":
       new_content = open(relpath, "rb").read()
       is_binary = is_binary or "\0" in new_content
     if is_binary and base_content:
       # Fetch again without converting newlines
       base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
         silent_ok=True, universal_newlines=False)
     if not is_binary or not self.IsImage(relpath):
       new_content = None
     return base_content, new_content, is_binary, status
 
 
 # NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
 def SplitPatch(data):
   """Splits a patch into separate pieces for each file.
 
   Args:
     data: A string containing the output of svn diff.
 
   Returns:
     A list of 2-tuple (filename, text) where text is the svn diff output
       pertaining to filename.
   """
   patches = []
   filename = None
   diff = []
   for line in data.splitlines(True):
     new_filename = None
     if line.startswith('Index:'):
       unused, new_filename = line.split(':', 1)
       new_filename = new_filename.strip()
     elif line.startswith('Property changes on:'):
       unused, temp_filename = line.split(':', 1)
       # When a file is modified, paths use '/' between directories, however
       # when a property is modified '\' is used on Windows.  Make them the same
       # otherwise the file shows up twice.
       temp_filename = temp_filename.strip().replace('\\', '/')
       if temp_filename != filename:
         # File has property changes but no modifications, create a new diff.
         new_filename = temp_filename
     if new_filename:
       if filename and diff:
         patches.append((filename, ''.join(diff)))
       filename = new_filename
       diff = [line]
       continue
     if diff is not None:
       diff.append(line)
   if filename and diff:
     patches.append((filename, ''.join(diff)))
   return patches
 
 
 def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
   """Uploads a separate patch for each file in the diff output.
 
   Returns a list of [patch_key, filename] for each file.
   """
   patches = SplitPatch(data)
   rv = []
   for patch in patches:
     if len(patch[1]) > MAX_UPLOAD_SIZE:
       print ("Not uploading the patch for " + patch[0] +
              " because the file is too large.")
       continue
     form_fields = [("filename", patch[0])]
     if not options.download_base:
       form_fields.append(("content_upload", "1"))
     files = [("data", "data.diff", patch[1])]
     ctype, body = EncodeMultipartFormData(form_fields, files)
     url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
     print "Uploading patch for " + patch[0]
     response_body = rpc_server.Send(url, body, content_type=ctype)
     lines = response_body.splitlines()
     if not lines or lines[0] != "OK":
       StatusUpdate("  --> %s" % response_body)
       sys.exit(1)
     rv.append([lines[1], patch[0]])
   return rv
 
 
 def GuessVCS(options):
   """Helper to guess the version control system.
 
   This examines the current directory, guesses which VersionControlSystem
   we're using, and returns an instance of the appropriate class.  Exit with an
   error if we can't figure it out.
 
   Returns:
     A VersionControlSystem instance. Exits if the VCS can't be guessed.
   """
   # Mercurial has a command to get the base directory of a repository
   # Try running it, but don't die if we don't have hg installed.
   # NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
   try:
     out, returncode = RunShellWithReturnCode(["hg", "root"])
     if returncode == 0:
       return MercurialVCS(options, out.strip())
   except OSError, (errno, message):
     if errno != 2:  # ENOENT -- they don't have hg installed.
       raise
 
   # Subversion has a .svn in all working directories.
   if os.path.isdir('.svn'):
     logging.info("Guessed VCS = Subversion")
     return SubversionVCS(options)
 
   # Git has a command to test if you're in a git tree.
   # Try running it, but don't die if we don't have git installed.
   try:
     out, returncode = RunShellWithReturnCode(["git", "rev-parse",
                                               "--is-inside-work-tree"])
     if returncode == 0:
       return GitVCS(options)
   except OSError, (errno, message):
     if errno != 2:  # ENOENT -- they don't have git installed.
       raise
 
   ErrorExit(("Could not guess version control system. "
              "Are you in a working copy directory?"))
 
 
 def RealMain(argv, data=None):
   """The real main function.
 
   Args:
     argv: Command line arguments.
     data: Diff contents. If None (default) the diff is generated by
       the VersionControlSystem implementation returned by GuessVCS().
 
   Returns:
     A 2-tuple (issue id, patchset id).
     The patchset id is None if the base files are not uploaded by this
     script (applies only to SVN checkouts).
   """
   logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
                               "%(lineno)s %(message)s "))
   os.environ['LC_ALL'] = 'C'
   options, args = parser.parse_args(argv[1:])
   global verbosity
   verbosity = options.verbose
   if verbosity >= 3:
     logging.getLogger().setLevel(logging.DEBUG)
   elif verbosity >= 2:
     logging.getLogger().setLevel(logging.INFO)
   vcs = GuessVCS(options)
   if isinstance(vcs, SubversionVCS):
     # base field is only allowed for Subversion.
     # Note: Fetching base files may become deprecated in future releases.
     base = vcs.GuessBase(options.download_base)
   else:
     base = None
   if not base and options.download_base:
     options.download_base = True
     logging.info("Enabled upload of base file")
   if not options.assume_yes:
     vcs.CheckForUnknownFiles()
   if data is None:
     data = vcs.GenerateDiff(args)
   files = vcs.GetBaseFiles(data)
   if verbosity >= 1:
     print "Upload server:", options.server, "(change with -s/--server)"
   if options.issue:
     prompt = "Message describing this patch set: "
   else:
     prompt = "New issue subject: "
   message = options.message or raw_input(prompt).strip()
   if not message:
     ErrorExit("A non-empty message is required")
   rpc_server = GetRpcServer(options)
   form_fields = [("subject", message)]
   if base:
     form_fields.append(("base", base))
   if options.issue:
     form_fields.append(("issue", str(options.issue)))
   if options.email:
     form_fields.append(("user", options.email))
   if options.reviewers:
     for reviewer in options.reviewers.split(','):
       if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
         ErrorExit("Invalid email address: %s" % reviewer)
     form_fields.append(("reviewers", options.reviewers))
   if options.cc:
     for cc in options.cc.split(','):
       if "@" in cc and not cc.split("@")[1].count(".") == 1:
         ErrorExit("Invalid email address: %s" % cc)
     form_fields.append(("cc", options.cc))
   description = options.description
   if options.description_file:
     if options.description:
       ErrorExit("Can't specify description and description_file")
     file = open(options.description_file, 'r')
     description = file.read()
     file.close()
   if description:
     form_fields.append(("description", description))
   # Send a hash of all the base file so the server can determine if a copy
   # already exists in an earlier patchset.
   base_hashes = ""
   for file, info in files.iteritems():
     if not info[0] is None:
       checksum = md5.new(info[0]).hexdigest()
       if base_hashes:
         base_hashes += "|"
       base_hashes += checksum + ":" + file
   form_fields.append(("base_hashes", base_hashes))
   # If we're uploading base files, don't send the email before the uploads, so
   # that it contains the file status.
   if options.send_mail and options.download_base:
     form_fields.append(("send_mail", "1"))
   if not options.download_base:
     form_fields.append(("content_upload", "1"))
   if len(data) > MAX_UPLOAD_SIZE:
     print "Patch is large, so uploading file patches separately."
     uploaded_diff_file = []
     form_fields.append(("separate_patches", "1"))
   else:
     uploaded_diff_file = [("data", "data.diff", data)]
   ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
   response_body = rpc_server.Send("/upload", body, content_type=ctype)
   patchset = None
   if not options.download_base or not uploaded_diff_file:
     lines = response_body.splitlines()
     if len(lines) >= 2:
       msg = lines[0]
       patchset = lines[1].strip()
       patches = [x.split(" ", 1) for x in lines[2:]]
     else:
       msg = response_body
   else:
     msg = response_body
   StatusUpdate(msg)
   if not response_body.startswith("Issue created.") and \
   not response_body.startswith("Issue updated."):
     sys.exit(0)
   issue = msg[msg.rfind("/")+1:]
 
   if not uploaded_diff_file:
     result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
     if not options.download_base:
       patches = result
 
   if not options.download_base:
     vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
     if options.send_mail:
       rpc_server.Send("/" + issue + "/mail", payload="")
   return issue, patchset
 
 
 def main():
   try:
     RealMain(sys.argv)
   except KeyboardInterrupt:
     print
     StatusUpdate("Interrupted.")
     sys.exit(1)
 
 
 if __name__ == "__main__":
   main()
diff --git a/googletest/scripts/gen_gtest_pred_impl.py b/googletest/scripts/gen_gtest_pred_impl.py
index b00830d7..b43efdf4 100755
--- a/googletest/scripts/gen_gtest_pred_impl.py
+++ b/googletest/scripts/gen_gtest_pred_impl.py
@@ -1,730 +1,730 @@
 #!/usr/bin/env python
 #
 # Copyright 2006, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
 #
 #     * Redistributions of source code must retain the above copyright
 # notice, this list of conditions and the following disclaimer.
 #     * Redistributions in binary form must reproduce the above
 # copyright notice, this list of conditions and the following disclaimer
 # in the documentation and/or other materials provided with the
 # distribution.
 #     * Neither the name of Google Inc. nor the names of its
 # contributors may be used to endorse or promote products derived from
 # this software without specific prior written permission.
 #
 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 """gen_gtest_pred_impl.py v0.1
 
 Generates the implementation of Google Test predicate assertions and
 accompanying tests.
 
 Usage:
 
   gen_gtest_pred_impl.py MAX_ARITY
 
 where MAX_ARITY is a positive integer.
 
 The command generates the implementation of up-to MAX_ARITY-ary
 predicate assertions, and writes it to file gtest_pred_impl.h in the
 directory where the script is.  It also generates the accompanying
 unit test in file gtest_pred_impl_unittest.cc.
 """
 
 __author__ = 'wan@google.com (Zhanyong Wan)'
 
 import os
 import sys
 import time
 
 # Where this script is.
 SCRIPT_DIR = os.path.dirname(sys.argv[0])
 
 # Where to store the generated header.
 HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
 
 # Where to store the generated unit test.
 UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
 
 
 def HeaderPreamble(n):
   """Returns the preamble for the header file.
 
   Args:
     n:  the maximum arity of the predicate macros to be generated.
   """
 
   # A map that defines the values used in the preamble template.
   DEFS = {
     'today' : time.strftime('%m/%d/%Y'),
     'year' : time.strftime('%Y'),
     'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
     'n' : n
     }
 
   return (
 """// Copyright 2006, Google Inc.
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 //
 //     * Redistributions of source code must retain the above copyright
 // notice, this list of conditions and the following disclaimer.
 //     * Redistributions in binary form must reproduce the above
 // copyright notice, this list of conditions and the following disclaimer
 // in the documentation and/or other materials provided with the
 // distribution.
 //     * Neither the name of Google Inc. nor the names of its
 // contributors may be used to endorse or promote products derived from
 // this software without specific prior written permission.
 //
 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // This file is AUTOMATICALLY GENERATED on %(today)s by command
 // '%(command)s'.  DO NOT EDIT BY HAND!
 //
 // Implements a family of generic predicate assertion macros.
 
 #ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
 #define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
 
 #include "gtest/gtest.h"
 
 namespace testing {
 
 // This header implements a family of generic predicate assertion
 // macros:
 //
 //   ASSERT_PRED_FORMAT1(pred_format, v1)
 //   ASSERT_PRED_FORMAT2(pred_format, v1, v2)
 //   ...
 //
 // where pred_format is a function or functor that takes n (in the
 // case of ASSERT_PRED_FORMATn) values and their source expression
 // text, and returns a testing::AssertionResult.  See the definition
 // of ASSERT_EQ in gtest.h for an example.
 //
 // If you don't care about formatting, you can use the more
 // restrictive version:
 //
 //   ASSERT_PRED1(pred, v1)
 //   ASSERT_PRED2(pred, v1, v2)
 //   ...
 //
 // where pred is an n-ary function or functor that returns bool,
 // and the values v1, v2, ..., must support the << operator for
 // streaming to std::ostream.
 //
 // We also define the EXPECT_* variations.
 //
 // For now we only support predicates whose arity is at most %(n)s.
 // Please email googletestframework@googlegroups.com if you need
 // support for higher arities.
 
 // GTEST_ASSERT_ is the basic statement to which all of the assertions
 // in this file reduce.  Don't use this in your code.
 
 #define GTEST_ASSERT_(expression, on_failure) \\
   GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
   if (const ::testing::AssertionResult gtest_ar = (expression)) \\
     ; \\
   else \\
     on_failure(gtest_ar.failure_message())
 """ % DEFS)
 
 
 def Arity(n):
   """Returns the English name of the given arity."""
 
   if n < 0:
     return None
   elif n <= 3:
     return ['nullary', 'unary', 'binary', 'ternary'][n]
   else:
     return '%s-ary' % n
 
 
 def Title(word):
   """Returns the given word in title case.  The difference between
   this and string's title() method is that Title('4-ary') is '4-ary'
   while '4-ary'.title() is '4-Ary'."""
 
   return word[0].upper() + word[1:]
 
 
 def OneTo(n):
   """Returns the list [1, 2, 3, ..., n]."""
 
   return range(1, n + 1)
 
 
 def Iter(n, format, sep=''):
   """Given a positive integer n, a format string that contains 0 or
   more '%s' format specs, and optionally a separator string, returns
   the join of n strings, each formatted with the format string on an
   iterator ranged from 1 to n.
 
   Example:
 
   Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
   """
 
   # How many '%s' specs are in format?
   spec_count = len(format.split('%s')) - 1
   return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
 
 
 def ImplementationForArity(n):
   """Returns the implementation of n-ary predicate assertions."""
 
   # A map the defines the values used in the implementation template.
   DEFS = {
     'n' : str(n),
     'vs' : Iter(n, 'v%s', sep=', '),
     'vts' : Iter(n, '#v%s', sep=', '),
     'arity' : Arity(n),
     'Arity' : Title(Arity(n))
     }
 
   impl = """
 
 // Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s.  Don't use
 // this in your code.
 template <typename Pred""" % DEFS
 
   impl += Iter(n, """,
           typename T%s""")
 
   impl += """>
 AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
 
   impl += Iter(n, """,
                                   const char* e%s""")
 
   impl += """,
                                   Pred pred"""
 
   impl += Iter(n, """,
                                   const T%s& v%s""")
 
   impl += """) {
   if (pred(%(vs)s)) return AssertionSuccess();
 
 """ % DEFS
 
   impl += '  return AssertionFailure() << pred_text << "("'
 
   impl += Iter(n, """
                             << e%s""", sep=' << ", "')
 
   impl += ' << ") evaluates to false, where"'
 
   impl += Iter(n, """
                             << "\\n" << e%s << " evaluates to " << v%s""")
 
   impl += """;
 }
 
 // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
 // Don't use this in your code.
 #define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
   GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
                 on_failure)
 
 // Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s.  Don't use
 // this in your code.
 #define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
   GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
 
   impl += Iter(n, """, \\
                                              #v%s""")
 
   impl += """, \\
                                              pred"""
 
   impl += Iter(n, """, \\
                                              v%s""")
 
   impl += """), on_failure)
 
 // %(Arity)s predicate assertion macros.
 #define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
   GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
 #define EXPECT_PRED%(n)s(pred, %(vs)s) \\
   GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
 #define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
   GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
 #define ASSERT_PRED%(n)s(pred, %(vs)s) \\
   GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
 
 """ % DEFS
 
   return impl
 
 
 def HeaderPostamble():
   """Returns the postamble for the header file."""
 
   return """
 
 }  // namespace testing
 
 #endif  // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
 """
 
 
 def GenerateFile(path, content):
   """Given a file path and a content string
      overwrites it with the given content.
   """
   print 'Updating file %s . . .' % path
   f = file(path, 'w+')
   print >>f, content,
   f.close()
 
   print 'File %s has been updated.' % path
 
 
 def GenerateHeader(n):
   """Given the maximum arity n, updates the header file that implements
   the predicate assertions.
   """
   GenerateFile(HEADER,
                HeaderPreamble(n)
                + ''.join([ImplementationForArity(i) for i in OneTo(n)])
                + HeaderPostamble())
 
 
 def UnitTestPreamble():
   """Returns the preamble for the unit test file."""
 
   # A map that defines the values used in the preamble template.
   DEFS = {
     'today' : time.strftime('%m/%d/%Y'),
     'year' : time.strftime('%Y'),
     'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
     }
 
   return (
 """// Copyright 2006, Google Inc.
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 //
 //     * Redistributions of source code must retain the above copyright
 // notice, this list of conditions and the following disclaimer.
 //     * Redistributions in binary form must reproduce the above
 // copyright notice, this list of conditions and the following disclaimer
 // in the documentation and/or other materials provided with the
 // distribution.
 //     * Neither the name of Google Inc. nor the names of its
 // contributors may be used to endorse or promote products derived from
 // this software without specific prior written permission.
 //
 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // This file is AUTOMATICALLY GENERATED on %(today)s by command
 // '%(command)s'.  DO NOT EDIT BY HAND!
 
 // Regression test for gtest_pred_impl.h
 //
 // This file is generated by a script and quite long.  If you intend to
 // learn how Google Test works by reading its unit tests, read
 // gtest_unittest.cc instead.
 //
 // This is intended as a regression test for the Google Test predicate
 // assertions.  We compile it as part of the gtest_unittest target
 // only to keep the implementation tidy and compact, as it is quite
 // involved to set up the stage for testing Google Test using Google
 // Test itself.
 //
 // Currently, gtest_unittest takes ~11 seconds to run in the testing
 // daemon.  In the future, if it grows too large and needs much more
 // time to finish, we should consider separating this file into a
 // stand-alone regression test.
 
 #include <iostream>
 
 #include "gtest/gtest.h"
 #include "gtest/gtest-spi.h"
 
 // A user-defined data type.
 struct Bool {
   explicit Bool(int val) : value(val != 0) {}
 
   bool operator>(int n) const { return value > Bool(n).value; }
 
   Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
 
   bool operator==(const Bool& rhs) const { return value == rhs.value; }
 
   bool value;
 };
 
 // Enables Bool to be used in assertions.
 std::ostream& operator<<(std::ostream& os, const Bool& x) {
   return os << (x.value ? "true" : "false");
 }
 
 """ % DEFS)
 
 
 def TestsForArity(n):
   """Returns the tests for n-ary predicate assertions."""
 
   # A map that defines the values used in the template for the tests.
   DEFS = {
     'n' : n,
     'es' : Iter(n, 'e%s', sep=', '),
     'vs' : Iter(n, 'v%s', sep=', '),
     'vts' : Iter(n, '#v%s', sep=', '),
     'tvs' : Iter(n, 'T%s v%s', sep=', '),
     'int_vs' : Iter(n, 'int v%s', sep=', '),
     'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
     'types' : Iter(n, 'typename T%s', sep=', '),
     'v_sum' : Iter(n, 'v%s', sep=' + '),
     'arity' : Arity(n),
     'Arity' : Title(Arity(n)),
     }
 
   tests = (
 """// Sample functions/functors for testing %(arity)s predicate assertions.
 
 // A %(arity)s predicate function.
 template <%(types)s>
 bool PredFunction%(n)s(%(tvs)s) {
   return %(v_sum)s > 0;
 }
 
 // The following two functions are needed to circumvent a bug in
 // gcc 2.95.3, which sometimes has problem with the above template
 // function.
 bool PredFunction%(n)sInt(%(int_vs)s) {
   return %(v_sum)s > 0;
 }
 bool PredFunction%(n)sBool(%(Bool_vs)s) {
   return %(v_sum)s > 0;
 }
 """ % DEFS)
 
   tests += """
 // A %(arity)s predicate functor.
 struct PredFunctor%(n)s {
   template <%(types)s>
   bool operator()(""" % DEFS
 
   tests += Iter(n, 'const T%s& v%s', sep=""",
                   """)
 
   tests += """) {
     return %(v_sum)s > 0;
   }
 };
 """ % DEFS
 
   tests += """
 // A %(arity)s predicate-formatter function.
 template <%(types)s>
 testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
 
   tests += Iter(n, 'const char* e%s', sep=""",
                                              """)
 
   tests += Iter(n, """,
                                              const T%s& v%s""")
 
   tests += """) {
   if (PredFunction%(n)s(%(vs)s))
     return testing::AssertionSuccess();
 
   return testing::AssertionFailure()
       << """ % DEFS
 
   tests += Iter(n, 'e%s', sep=' << " + " << ')
 
   tests += """
       << " is expected to be positive, but evaluates to "
       << %(v_sum)s << ".";
 }
 """ % DEFS
 
   tests += """
 // A %(arity)s predicate-formatter functor.
 struct PredFormatFunctor%(n)s {
   template <%(types)s>
   testing::AssertionResult operator()(""" % DEFS
 
   tests += Iter(n, 'const char* e%s', sep=""",
                                       """)
 
   tests += Iter(n, """,
                                       const T%s& v%s""")
 
   tests += """) const {
     return PredFormatFunction%(n)s(%(es)s, %(vs)s);
   }
 };
 """ % DEFS
 
   tests += """
 // Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
 
 class Predicate%(n)sTest : public testing::Test {
  protected:
   virtual void SetUp() {
     expected_to_finish_ = true;
     finished_ = false;""" % DEFS
 
   tests += """
     """ + Iter(n, 'n%s_ = ') + """0;
   }
 """
 
   tests += """
   virtual void TearDown() {
     // Verifies that each of the predicate's arguments was evaluated
     // exactly once."""
 
   tests += ''.join(["""
     EXPECT_EQ(1, n%s_) <<
         "The predicate assertion didn't evaluate argument %s "
         "exactly once.";""" % (i, i + 1) for i in OneTo(n)])
 
   tests += """
 
     // Verifies that the control flow in the test function is expected.
     if (expected_to_finish_ && !finished_) {
       FAIL() << "The predicate assertion unexpactedly aborted the test.";
     } else if (!expected_to_finish_ && finished_) {
       FAIL() << "The failed predicate assertion didn't abort the test "
                 "as expected.";
     }
   }
 
-  // true if the test function is expected to run to finish.
+  // true iff the test function is expected to run to finish.
   static bool expected_to_finish_;
 
-  // true if the test function did run to finish.
+  // true iff the test function did run to finish.
   static bool finished_;
 """ % DEFS
 
   tests += Iter(n, """
   static int n%s_;""")
 
   tests += """
 };
 
 bool Predicate%(n)sTest::expected_to_finish_;
 bool Predicate%(n)sTest::finished_;
 """ % DEFS
 
   tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
 """) % DEFS
 
   tests += """
 typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
 typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
 typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
 typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
 """ % DEFS
 
   def GenTest(use_format, use_assert, expect_failure,
               use_functor, use_user_type):
     """Returns the test for a predicate assertion macro.
 
     Args:
-      use_format:     true if the assertion is a *_PRED_FORMAT*.
-      use_assert:     true if the assertion is a ASSERT_*.
-      expect_failure: true if the assertion is expected to fail.
-      use_functor:    true if the first argument of the assertion is
+      use_format:     true iff the assertion is a *_PRED_FORMAT*.
+      use_assert:     true iff the assertion is a ASSERT_*.
+      expect_failure: true iff the assertion is expected to fail.
+      use_functor:    true iff the first argument of the assertion is
                       a functor (as opposed to a function)
-      use_user_type:  true if the predicate functor/function takes
+      use_user_type:  true iff the predicate functor/function takes
                       argument(s) of a user-defined type.
 
     Example:
 
       GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
       of a successful EXPECT_PRED_FORMATn() that takes a functor
       whose arguments have built-in types."""
 
     if use_assert:
       assrt = 'ASSERT'  # 'assert' is reserved, so we cannot use
                         # that identifier here.
     else:
       assrt = 'EXPECT'
 
     assertion = assrt + '_PRED'
 
     if use_format:
       pred_format = 'PredFormat'
       assertion += '_FORMAT'
     else:
       pred_format = 'Pred'
 
     assertion += '%(n)s' % DEFS
 
     if use_functor:
       pred_format_type = 'functor'
       pred_format += 'Functor%(n)s()'
     else:
       pred_format_type = 'function'
       pred_format += 'Function%(n)s'
       if not use_format:
         if use_user_type:
           pred_format += 'Bool'
         else:
           pred_format += 'Int'
 
     test_name = pred_format_type.title()
 
     if use_user_type:
       arg_type = 'user-defined type (Bool)'
       test_name += 'OnUserType'
       if expect_failure:
         arg = 'Bool(n%s_++)'
       else:
         arg = 'Bool(++n%s_)'
     else:
       arg_type = 'built-in type (int)'
       test_name += 'OnBuiltInType'
       if expect_failure:
         arg = 'n%s_++'
       else:
         arg = '++n%s_'
 
     if expect_failure:
       successful_or_failed = 'failed'
       expected_or_not = 'expected.'
       test_name +=  'Failure'
     else:
       successful_or_failed = 'successful'
       expected_or_not = 'UNEXPECTED!'
       test_name +=  'Success'
 
     # A map that defines the values used in the test template.
     defs = DEFS.copy()
     defs.update({
       'assert' : assrt,
       'assertion' : assertion,
       'test_name' : test_name,
       'pf_type' : pred_format_type,
       'pf' : pred_format,
       'arg_type' : arg_type,
       'arg' : arg,
       'successful' : successful_or_failed,
       'expected' : expected_or_not,
       })
 
     test = """
 // Tests a %(successful)s %(assertion)s where the
 // predicate-formatter is a %(pf_type)s on a %(arg_type)s.
 TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
 
     indent = (len(assertion) + 3)*' '
     extra_indent = ''
 
     if expect_failure:
       extra_indent = '  '
       if use_assert:
         test += """
   expected_to_finish_ = false;
   EXPECT_FATAL_FAILURE({  // NOLINT"""
       else:
         test += """
   EXPECT_NONFATAL_FAILURE({  // NOLINT"""
 
     test += '\n' + extra_indent + """  %(assertion)s(%(pf)s""" % defs
 
     test = test % defs
     test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
     test += ');\n' + extra_indent + '  finished_ = true;\n'
 
     if expect_failure:
       test += '  }, "");\n'
 
     test += '}\n'
     return test
 
   # Generates tests for all 2**6 = 64 combinations.
   tests += ''.join([GenTest(use_format, use_assert, expect_failure,
                             use_functor, use_user_type)
                     for use_format in [0, 1]
                     for use_assert in [0, 1]
                     for expect_failure in [0, 1]
                     for use_functor in [0, 1]
                     for use_user_type in [0, 1]
                     ])
 
   return tests
 
 
 def UnitTestPostamble():
   """Returns the postamble for the tests."""
 
   return ''
 
 
 def GenerateUnitTest(n):
   """Returns the tests for up-to n-ary predicate assertions."""
 
   GenerateFile(UNIT_TEST,
                UnitTestPreamble()
                + ''.join([TestsForArity(i) for i in OneTo(n)])
                + UnitTestPostamble())
 
 
 def _Main():
   """The entry point of the script.  Generates the header file and its
   unit test."""
 
   if len(sys.argv) != 2:
     print __doc__
     print 'Author: ' + __author__
     sys.exit(1)
 
   n = int(sys.argv[1])
   GenerateHeader(n)
   GenerateUnitTest(n)
 
 
 if __name__ == '__main__':
   _Main()
diff --git a/googletest/scripts/pump.py b/googletest/scripts/pump.py
index 7dfb87a4..5efb653c 100755
--- a/googletest/scripts/pump.py
+++ b/googletest/scripts/pump.py
@@ -1,855 +1,855 @@
 #!/usr/bin/env python
 #
 # Copyright 2008, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
 #
 #     * Redistributions of source code must retain the above copyright
 # notice, this list of conditions and the following disclaimer.
 #     * Redistributions in binary form must reproduce the above
 # copyright notice, this list of conditions and the following disclaimer
 # in the documentation and/or other materials provided with the
 # distribution.
 #     * Neither the name of Google Inc. nor the names of its
 # contributors may be used to endorse or promote products derived from
 # this software without specific prior written permission.
 #
 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 """pump v0.2.0 - Pretty Useful for Meta Programming.
 
 A tool for preprocessor meta programming.  Useful for generating
 repetitive boilerplate code.  Especially useful for writing C++
 classes, functions, macros, and templates that need to work with
 various number of arguments.
 
 USAGE:
        pump.py SOURCE_FILE
 
 EXAMPLES:
        pump.py foo.cc.pump
          Converts foo.cc.pump to foo.cc.
 
 GRAMMAR:
        CODE ::= ATOMIC_CODE*
        ATOMIC_CODE ::= $var ID = EXPRESSION
            | $var ID = [[ CODE ]]
            | $range ID EXPRESSION..EXPRESSION
            | $for ID SEPARATOR [[ CODE ]]
            | $($)
            | $ID
            | $(EXPRESSION)
            | $if EXPRESSION [[ CODE ]] ELSE_BRANCH
            | [[ CODE ]]
            | RAW_CODE
        SEPARATOR ::= RAW_CODE | EMPTY
        ELSE_BRANCH ::= $else [[ CODE ]]
            | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
            | EMPTY
        EXPRESSION has Python syntax.
 """
 
 __author__ = 'wan@google.com (Zhanyong Wan)'
 
 import os
 import re
 import sys
 
 
 TOKEN_TABLE = [
     (re.compile(r'\$var\s+'), '$var'),
     (re.compile(r'\$elif\s+'), '$elif'),
     (re.compile(r'\$else\s+'), '$else'),
     (re.compile(r'\$for\s+'), '$for'),
     (re.compile(r'\$if\s+'), '$if'),
     (re.compile(r'\$range\s+'), '$range'),
     (re.compile(r'\$[_A-Za-z]\w*'), '$id'),
     (re.compile(r'\$\(\$\)'), '$($)'),
     (re.compile(r'\$'), '$'),
     (re.compile(r'\[\[\n?'), '[['),
     (re.compile(r'\]\]\n?'), ']]'),
     ]
 
 
 class Cursor:
   """Represents a position (line and column) in a text file."""
 
   def __init__(self, line=-1, column=-1):
     self.line = line
     self.column = column
 
   def __eq__(self, rhs):
     return self.line == rhs.line and self.column == rhs.column
 
   def __ne__(self, rhs):
     return not self == rhs
 
   def __lt__(self, rhs):
     return self.line < rhs.line or (
         self.line == rhs.line and self.column < rhs.column)
 
   def __le__(self, rhs):
     return self < rhs or self == rhs
 
   def __gt__(self, rhs):
     return rhs < self
 
   def __ge__(self, rhs):
     return rhs <= self
 
   def __str__(self):
     if self == Eof():
       return 'EOF'
     else:
       return '%s(%s)' % (self.line + 1, self.column)
 
   def __add__(self, offset):
     return Cursor(self.line, self.column + offset)
 
   def __sub__(self, offset):
     return Cursor(self.line, self.column - offset)
 
   def Clone(self):
     """Returns a copy of self."""
 
     return Cursor(self.line, self.column)
 
 
 # Special cursor to indicate the end-of-file.
 def Eof():
   """Returns the special cursor to denote the end-of-file."""
   return Cursor(-1, -1)
 
 
 class Token:
   """Represents a token in a Pump source file."""
 
   def __init__(self, start=None, end=None, value=None, token_type=None):
     if start is None:
       self.start = Eof()
     else:
       self.start = start
     if end is None:
       self.end = Eof()
     else:
       self.end = end
     self.value = value
     self.token_type = token_type
 
   def __str__(self):
     return 'Token @%s: \'%s\' type=%s' % (
         self.start, self.value, self.token_type)
 
   def Clone(self):
     """Returns a copy of self."""
 
     return Token(self.start.Clone(), self.end.Clone(), self.value,
                  self.token_type)
 
 
 def StartsWith(lines, pos, string):
-  """Returns True if the given position in lines starts with 'string'."""
+  """Returns True iff the given position in lines starts with 'string'."""
 
   return lines[pos.line][pos.column:].startswith(string)
 
 
 def FindFirstInLine(line, token_table):
   best_match_start = -1
   for (regex, token_type) in token_table:
     m = regex.search(line)
     if m:
       # We found regex in lines
       if best_match_start < 0 or m.start() < best_match_start:
         best_match_start = m.start()
         best_match_length = m.end() - m.start()
         best_match_token_type = token_type
 
   if best_match_start < 0:
     return None
 
   return (best_match_start, best_match_length, best_match_token_type)
 
 
 def FindFirst(lines, token_table, cursor):
   """Finds the first occurrence of any string in strings in lines."""
 
   start = cursor.Clone()
   cur_line_number = cursor.line
   for line in lines[start.line:]:
     if cur_line_number == start.line:
       line = line[start.column:]
     m = FindFirstInLine(line, token_table)
     if m:
       # We found a regex in line.
       (start_column, length, token_type) = m
       if cur_line_number == start.line:
         start_column += start.column
       found_start = Cursor(cur_line_number, start_column)
       found_end = found_start + length
       return MakeToken(lines, found_start, found_end, token_type)
     cur_line_number += 1
   # We failed to find str in lines
   return None
 
 
 def SubString(lines, start, end):
   """Returns a substring in lines."""
 
   if end == Eof():
     end = Cursor(len(lines) - 1, len(lines[-1]))
 
   if start >= end:
     return ''
 
   if start.line == end.line:
     return lines[start.line][start.column:end.column]
 
   result_lines = ([lines[start.line][start.column:]] +
                   lines[start.line + 1:end.line] +
                   [lines[end.line][:end.column]])
   return ''.join(result_lines)
 
 
 def StripMetaComments(str):
   """Strip meta comments from each line in the given string."""
 
   # First, completely remove lines containing nothing but a meta
   # comment, including the trailing \n.
   str = re.sub(r'^\s*\$\$.*\n', '', str)
 
   # Then, remove meta comments from contentful lines.
   return re.sub(r'\s*\$\$.*', '', str)
 
 
 def MakeToken(lines, start, end, token_type):
   """Creates a new instance of Token."""
 
   return Token(start, end, SubString(lines, start, end), token_type)
 
 
 def ParseToken(lines, pos, regex, token_type):
   line = lines[pos.line][pos.column:]
   m = regex.search(line)
   if m and not m.start():
     return MakeToken(lines, pos, pos + m.end(), token_type)
   else:
     print 'ERROR: %s expected at %s.' % (token_type, pos)
     sys.exit(1)
 
 
 ID_REGEX = re.compile(r'[_A-Za-z]\w*')
 EQ_REGEX = re.compile(r'=')
 REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
 OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
 WHITE_SPACE_REGEX = re.compile(r'\s')
 DOT_DOT_REGEX = re.compile(r'\.\.')
 
 
 def Skip(lines, pos, regex):
   line = lines[pos.line][pos.column:]
   m = re.search(regex, line)
   if m and not m.start():
     return pos + m.end()
   else:
     return pos
 
 
 def SkipUntil(lines, pos, regex, token_type):
   line = lines[pos.line][pos.column:]
   m = re.search(regex, line)
   if m:
     return pos + m.start()
   else:
     print ('ERROR: %s expected on line %s after column %s.' %
            (token_type, pos.line + 1, pos.column))
     sys.exit(1)
 
 
 def ParseExpTokenInParens(lines, pos):
   def ParseInParens(pos):
     pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
     pos = Skip(lines, pos, r'\(')
     pos = Parse(pos)
     pos = Skip(lines, pos, r'\)')
     return pos
 
   def Parse(pos):
     pos = SkipUntil(lines, pos, r'\(|\)', ')')
     if SubString(lines, pos, pos + 1) == '(':
       pos = Parse(pos + 1)
       pos = Skip(lines, pos, r'\)')
       return Parse(pos)
     else:
       return pos
 
   start = pos.Clone()
   pos = ParseInParens(pos)
   return MakeToken(lines, start, pos, 'exp')
 
 
 def RStripNewLineFromToken(token):
   if token.value.endswith('\n'):
     return Token(token.start, token.end, token.value[:-1], token.token_type)
   else:
     return token
 
 
 def TokenizeLines(lines, pos):
   while True:
     found = FindFirst(lines, TOKEN_TABLE, pos)
     if not found:
       yield MakeToken(lines, pos, Eof(), 'code')
       return
 
     if found.start == pos:
       prev_token = None
       prev_token_rstripped = None
     else:
       prev_token = MakeToken(lines, pos, found.start, 'code')
       prev_token_rstripped = RStripNewLineFromToken(prev_token)
 
     if found.token_type == '$var':
       if prev_token_rstripped:
         yield prev_token_rstripped
       yield found
       id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
       yield id_token
       pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
 
       eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
       yield eq_token
       pos = Skip(lines, eq_token.end, r'\s*')
 
       if SubString(lines, pos, pos + 2) != '[[':
         exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
         yield exp_token
         pos = Cursor(exp_token.end.line + 1, 0)
     elif found.token_type == '$for':
       if prev_token_rstripped:
         yield prev_token_rstripped
       yield found
       id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
       yield id_token
       pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
     elif found.token_type == '$range':
       if prev_token_rstripped:
         yield prev_token_rstripped
       yield found
       id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
       yield id_token
       pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
 
       dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
       yield MakeToken(lines, pos, dots_pos, 'exp')
       yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
       pos = dots_pos + 2
       new_pos = Cursor(pos.line + 1, 0)
       yield MakeToken(lines, pos, new_pos, 'exp')
       pos = new_pos
     elif found.token_type == '$':
       if prev_token:
         yield prev_token
       yield found
       exp_token = ParseExpTokenInParens(lines, found.end)
       yield exp_token
       pos = exp_token.end
     elif (found.token_type == ']]' or found.token_type == '$if' or
           found.token_type == '$elif' or found.token_type == '$else'):
       if prev_token_rstripped:
         yield prev_token_rstripped
       yield found
       pos = found.end
     else:
       if prev_token:
         yield prev_token
       yield found
       pos = found.end
 
 
 def Tokenize(s):
   """A generator that yields the tokens in the given string."""
   if s != '':
     lines = s.splitlines(True)
     for token in TokenizeLines(lines, Cursor(0, 0)):
       yield token
 
 
 class CodeNode:
   def __init__(self, atomic_code_list=None):
     self.atomic_code = atomic_code_list
 
 
 class VarNode:
   def __init__(self, identifier=None, atomic_code=None):
     self.identifier = identifier
     self.atomic_code = atomic_code
 
 
 class RangeNode:
   def __init__(self, identifier=None, exp1=None, exp2=None):
     self.identifier = identifier
     self.exp1 = exp1
     self.exp2 = exp2
 
 
 class ForNode:
   def __init__(self, identifier=None, sep=None, code=None):
     self.identifier = identifier
     self.sep = sep
     self.code = code
 
 
 class ElseNode:
   def __init__(self, else_branch=None):
     self.else_branch = else_branch
 
 
 class IfNode:
   def __init__(self, exp=None, then_branch=None, else_branch=None):
     self.exp = exp
     self.then_branch = then_branch
     self.else_branch = else_branch
 
 
 class RawCodeNode:
   def __init__(self, token=None):
     self.raw_code = token
 
 
 class LiteralDollarNode:
   def __init__(self, token):
     self.token = token
 
 
 class ExpNode:
   def __init__(self, token, python_exp):
     self.token = token
     self.python_exp = python_exp
 
 
 def PopFront(a_list):
   head = a_list[0]
   a_list[:1] = []
   return head
 
 
 def PushFront(a_list, elem):
   a_list[:0] = [elem]
 
 
 def PopToken(a_list, token_type=None):
   token = PopFront(a_list)
   if token_type is not None and token.token_type != token_type:
     print 'ERROR: %s expected at %s' % (token_type, token.start)
     print 'ERROR: %s found instead' % (token,)
     sys.exit(1)
 
   return token
 
 
 def PeekToken(a_list):
   if not a_list:
     return None
 
   return a_list[0]
 
 
 def ParseExpNode(token):
   python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
   return ExpNode(token, python_exp)
 
 
 def ParseElseNode(tokens):
   def Pop(token_type=None):
     return PopToken(tokens, token_type)
 
   next = PeekToken(tokens)
   if not next:
     return None
   if next.token_type == '$else':
     Pop('$else')
     Pop('[[')
     code_node = ParseCodeNode(tokens)
     Pop(']]')
     return code_node
   elif next.token_type == '$elif':
     Pop('$elif')
     exp = Pop('code')
     Pop('[[')
     code_node = ParseCodeNode(tokens)
     Pop(']]')
     inner_else_node = ParseElseNode(tokens)
     return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
   elif not next.value.strip():
     Pop('code')
     return ParseElseNode(tokens)
   else:
     return None
 
 
 def ParseAtomicCodeNode(tokens):
   def Pop(token_type=None):
     return PopToken(tokens, token_type)
 
   head = PopFront(tokens)
   t = head.token_type
   if t == 'code':
     return RawCodeNode(head)
   elif t == '$var':
     id_token = Pop('id')
     Pop('=')
     next = PeekToken(tokens)
     if next.token_type == 'exp':
       exp_token = Pop()
       return VarNode(id_token, ParseExpNode(exp_token))
     Pop('[[')
     code_node = ParseCodeNode(tokens)
     Pop(']]')
     return VarNode(id_token, code_node)
   elif t == '$for':
     id_token = Pop('id')
     next_token = PeekToken(tokens)
     if next_token.token_type == 'code':
       sep_token = next_token
       Pop('code')
     else:
       sep_token = None
     Pop('[[')
     code_node = ParseCodeNode(tokens)
     Pop(']]')
     return ForNode(id_token, sep_token, code_node)
   elif t == '$if':
     exp_token = Pop('code')
     Pop('[[')
     code_node = ParseCodeNode(tokens)
     Pop(']]')
     else_node = ParseElseNode(tokens)
     return IfNode(ParseExpNode(exp_token), code_node, else_node)
   elif t == '$range':
     id_token = Pop('id')
     exp1_token = Pop('exp')
     Pop('..')
     exp2_token = Pop('exp')
     return RangeNode(id_token, ParseExpNode(exp1_token),
                      ParseExpNode(exp2_token))
   elif t == '$id':
     return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
   elif t == '$($)':
     return LiteralDollarNode(head)
   elif t == '$':
     exp_token = Pop('exp')
     return ParseExpNode(exp_token)
   elif t == '[[':
     code_node = ParseCodeNode(tokens)
     Pop(']]')
     return code_node
   else:
     PushFront(tokens, head)
     return None
 
 
 def ParseCodeNode(tokens):
   atomic_code_list = []
   while True:
     if not tokens:
       break
     atomic_code_node = ParseAtomicCodeNode(tokens)
     if atomic_code_node:
       atomic_code_list.append(atomic_code_node)
     else:
       break
   return CodeNode(atomic_code_list)
 
 
 def ParseToAST(pump_src_text):
   """Convert the given Pump source text into an AST."""
   tokens = list(Tokenize(pump_src_text))
   code_node = ParseCodeNode(tokens)
   return code_node
 
 
 class Env:
   def __init__(self):
     self.variables = []
     self.ranges = []
 
   def Clone(self):
     clone = Env()
     clone.variables = self.variables[:]
     clone.ranges = self.ranges[:]
     return clone
 
   def PushVariable(self, var, value):
     # If value looks like an int, store it as an int.
     try:
       int_value = int(value)
       if ('%s' % int_value) == value:
         value = int_value
     except Exception:
       pass
     self.variables[:0] = [(var, value)]
 
   def PopVariable(self):
     self.variables[:1] = []
 
   def PushRange(self, var, lower, upper):
     self.ranges[:0] = [(var, lower, upper)]
 
   def PopRange(self):
     self.ranges[:1] = []
 
   def GetValue(self, identifier):
     for (var, value) in self.variables:
       if identifier == var:
         return value
 
     print 'ERROR: meta variable %s is undefined.' % (identifier,)
     sys.exit(1)
 
   def EvalExp(self, exp):
     try:
       result = eval(exp.python_exp)
     except Exception, e:
       print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
       print ('ERROR: failed to evaluate meta expression %s at %s' %
              (exp.python_exp, exp.token.start))
       sys.exit(1)
     return result
 
   def GetRange(self, identifier):
     for (var, lower, upper) in self.ranges:
       if identifier == var:
         return (lower, upper)
 
     print 'ERROR: range %s is undefined.' % (identifier,)
     sys.exit(1)
 
 
 class Output:
   def __init__(self):
     self.string = ''
 
   def GetLastLine(self):
     index = self.string.rfind('\n')
     if index < 0:
       return ''
 
     return self.string[index + 1:]
 
   def Append(self, s):
     self.string += s
 
 
 def RunAtomicCode(env, node, output):
   if isinstance(node, VarNode):
     identifier = node.identifier.value.strip()
     result = Output()
     RunAtomicCode(env.Clone(), node.atomic_code, result)
     value = result.string
     env.PushVariable(identifier, value)
   elif isinstance(node, RangeNode):
     identifier = node.identifier.value.strip()
     lower = int(env.EvalExp(node.exp1))
     upper = int(env.EvalExp(node.exp2))
     env.PushRange(identifier, lower, upper)
   elif isinstance(node, ForNode):
     identifier = node.identifier.value.strip()
     if node.sep is None:
       sep = ''
     else:
       sep = node.sep.value
     (lower, upper) = env.GetRange(identifier)
     for i in range(lower, upper + 1):
       new_env = env.Clone()
       new_env.PushVariable(identifier, i)
       RunCode(new_env, node.code, output)
       if i != upper:
         output.Append(sep)
   elif isinstance(node, RawCodeNode):
     output.Append(node.raw_code.value)
   elif isinstance(node, IfNode):
     cond = env.EvalExp(node.exp)
     if cond:
       RunCode(env.Clone(), node.then_branch, output)
     elif node.else_branch is not None:
       RunCode(env.Clone(), node.else_branch, output)
   elif isinstance(node, ExpNode):
     value = env.EvalExp(node)
     output.Append('%s' % (value,))
   elif isinstance(node, LiteralDollarNode):
     output.Append('$')
   elif isinstance(node, CodeNode):
     RunCode(env.Clone(), node, output)
   else:
     print 'BAD'
     print node
     sys.exit(1)
 
 
 def RunCode(env, code_node, output):
   for atomic_code in code_node.atomic_code:
     RunAtomicCode(env, atomic_code, output)
 
 
 def IsSingleLineComment(cur_line):
   return '//' in cur_line
 
 
 def IsInPreprocessorDirective(prev_lines, cur_line):
   if cur_line.lstrip().startswith('#'):
     return True
   return prev_lines and prev_lines[-1].endswith('\\')
 
 
 def WrapComment(line, output):
   loc = line.find('//')
   before_comment = line[:loc].rstrip()
   if before_comment == '':
     indent = loc
   else:
     output.append(before_comment)
     indent = len(before_comment) - len(before_comment.lstrip())
   prefix = indent*' ' + '// '
   max_len = 80 - len(prefix)
   comment = line[loc + 2:].strip()
   segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
   cur_line = ''
   for seg in segs:
     if len((cur_line + seg).rstrip()) < max_len:
       cur_line += seg
     else:
       if cur_line.strip() != '':
         output.append(prefix + cur_line.rstrip())
       cur_line = seg.lstrip()
   if cur_line.strip() != '':
     output.append(prefix + cur_line.strip())
 
 
 def WrapCode(line, line_concat, output):
   indent = len(line) - len(line.lstrip())
   prefix = indent*' '  # Prefix of the current line
   max_len = 80 - indent - len(line_concat)  # Maximum length of the current line
   new_prefix = prefix + 4*' '  # Prefix of a continuation line
   new_max_len = max_len - 4  # Maximum length of a continuation line
   # Prefers to wrap a line after a ',' or ';'.
   segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
   cur_line = ''  # The current line without leading spaces.
   for seg in segs:
     # If the line is still too long, wrap at a space.
     while cur_line == '' and len(seg.strip()) > max_len:
       seg = seg.lstrip()
       split_at = seg.rfind(' ', 0, max_len)
       output.append(prefix + seg[:split_at].strip() + line_concat)
       seg = seg[split_at + 1:]
       prefix = new_prefix
       max_len = new_max_len
 
     if len((cur_line + seg).rstrip()) < max_len:
       cur_line = (cur_line + seg).lstrip()
     else:
       output.append(prefix + cur_line.rstrip() + line_concat)
       prefix = new_prefix
       max_len = new_max_len
       cur_line = seg.lstrip()
   if cur_line.strip() != '':
     output.append(prefix + cur_line.strip())
 
 
 def WrapPreprocessorDirective(line, output):
   WrapCode(line, ' \\', output)
 
 
 def WrapPlainCode(line, output):
   WrapCode(line, '', output)
 
 
 def IsMultiLineIWYUPragma(line):
   return re.search(r'/\* IWYU pragma: ', line)
 
 
 def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
   return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
           re.match(r'^#include\s', line) or
           # Don't break IWYU pragmas, either; that causes iwyu.py problems.
           re.search(r'// IWYU pragma: ', line))
 
 
 def WrapLongLine(line, output):
   line = line.rstrip()
   if len(line) <= 80:
     output.append(line)
   elif IsSingleLineComment(line):
     if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
       # The style guide made an exception to allow long header guard lines,
       # includes and IWYU pragmas.
       output.append(line)
     else:
       WrapComment(line, output)
   elif IsInPreprocessorDirective(output, line):
     if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
       # The style guide made an exception to allow long header guard lines,
       # includes and IWYU pragmas.
       output.append(line)
     else:
       WrapPreprocessorDirective(line, output)
   elif IsMultiLineIWYUPragma(line):
     output.append(line)
   else:
     WrapPlainCode(line, output)
 
 
 def BeautifyCode(string):
   lines = string.splitlines()
   output = []
   for line in lines:
     WrapLongLine(line, output)
   output2 = [line.rstrip() for line in output]
   return '\n'.join(output2) + '\n'
 
 
 def ConvertFromPumpSource(src_text):
   """Return the text generated from the given Pump source text."""
   ast = ParseToAST(StripMetaComments(src_text))
   output = Output()
   RunCode(Env(), ast, output)
   return BeautifyCode(output.string)
 
 
 def main(argv):
   if len(argv) == 1:
     print __doc__
     sys.exit(1)
 
   file_path = argv[-1]
   output_str = ConvertFromPumpSource(file(file_path, 'r').read())
   if file_path.endswith('.pump'):
     output_file_path = file_path[:-5]
   else:
     output_file_path = '-'
   if output_file_path == '-':
     print output_str,
   else:
     output_file = file(output_file_path, 'w')
     output_file.write('// This file was GENERATED by command:\n')
     output_file.write('//     %s %s\n' %
                       (os.path.basename(__file__), os.path.basename(file_path)))
     output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
     output_file.write(output_str)
     output_file.close()
 
 
 if __name__ == '__main__':
   main(sys.argv)
diff --git a/googletest/scripts/upload.py b/googletest/scripts/upload.py
index 8563e5f4..c852e4c9 100755
--- a/googletest/scripts/upload.py
+++ b/googletest/scripts/upload.py
@@ -1,1387 +1,1387 @@
 #!/usr/bin/env python
 #
 # Copyright 2007 Google Inc.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
 #     http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
 """Tool for uploading diffs from a version control system to the codereview app.
 
 Usage summary: upload.py [options] [-- diff_options]
 
 Diff options are passed to the diff command of the underlying system.
 
 Supported version control systems:
   Git
   Mercurial
   Subversion
 
 It is important for Git/Mercurial users to specify a tree/node/branch to diff
 against by using the '--rev' option.
 """
 # This code is derived from appcfg.py in the App Engine SDK (open source),
 # and from ASPN recipe #146306.
 
 import cookielib
 import getpass
 import logging
 import md5
 import mimetypes
 import optparse
 import os
 import re
 import socket
 import subprocess
 import sys
 import urllib
 import urllib2
 import urlparse
 
 try:
   import readline
 except ImportError:
   pass
 
 # The logging verbosity:
 #  0: Errors only.
 #  1: Status messages.
 #  2: Info logs.
 #  3: Debug logs.
 verbosity = 1
 
 # Max size of patch or base file.
 MAX_UPLOAD_SIZE = 900 * 1024
 
 
 def GetEmail(prompt):
   """Prompts the user for their email address and returns it.
 
   The last used email address is saved to a file and offered up as a suggestion
   to the user. If the user presses enter without typing in anything the last
   used email address is used. If the user enters a new address, it is saved
   for next time we prompt.
 
   """
   last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
   last_email = ""
   if os.path.exists(last_email_file_name):
     try:
       last_email_file = open(last_email_file_name, "r")
       last_email = last_email_file.readline().strip("\n")
       last_email_file.close()
       prompt += " [%s]" % last_email
     except IOError, e:
       pass
   email = raw_input(prompt + ": ").strip()
   if email:
     try:
       last_email_file = open(last_email_file_name, "w")
       last_email_file.write(email)
       last_email_file.close()
     except IOError, e:
       pass
   else:
     email = last_email
   return email
 
 
 def StatusUpdate(msg):
   """Print a status message to stdout.
 
   If 'verbosity' is greater than 0, print the message.
 
   Args:
     msg: The string to print.
   """
   if verbosity > 0:
     print msg
 
 
 def ErrorExit(msg):
   """Print an error message to stderr and exit."""
   print >>sys.stderr, msg
   sys.exit(1)
 
 
 class ClientLoginError(urllib2.HTTPError):
   """Raised to indicate there was an error authenticating with ClientLogin."""
 
   def __init__(self, url, code, msg, headers, args):
     urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
     self.args = args
     self.reason = args["Error"]
 
 
 class AbstractRpcServer(object):
   """Provides a common interface for a simple RPC server."""
 
   def __init__(self, host, auth_function, host_override=None, extra_headers={},
                save_cookies=False):
     """Creates a new HttpRpcServer.
 
     Args:
       host: The host to send requests to.
       auth_function: A function that takes no arguments and returns an
         (email, password) tuple when called. Will be called if authentication
         is required.
       host_override: The host header to send to the server (defaults to host).
       extra_headers: A dict of extra headers to append to every request.
       save_cookies: If True, save the authentication cookies to local disk.
         If False, use an in-memory cookiejar instead.  Subclasses must
         implement this functionality.  Defaults to False.
     """
     self.host = host
     self.host_override = host_override
     self.auth_function = auth_function
     self.authenticated = False
     self.extra_headers = extra_headers
     self.save_cookies = save_cookies
     self.opener = self._GetOpener()
     if self.host_override:
       logging.info("Server: %s; Host: %s", self.host, self.host_override)
     else:
       logging.info("Server: %s", self.host)
 
   def _GetOpener(self):
     """Returns an OpenerDirector for making HTTP requests.
 
     Returns:
       A urllib2.OpenerDirector object.
     """
     raise NotImplementedError()
 
   def _CreateRequest(self, url, data=None):
     """Creates a new urllib request."""
     logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
     req = urllib2.Request(url, data=data)
     if self.host_override:
       req.add_header("Host", self.host_override)
     for key, value in self.extra_headers.iteritems():
       req.add_header(key, value)
     return req
 
   def _GetAuthToken(self, email, password):
     """Uses ClientLogin to authenticate the user, returning an auth token.
 
     Args:
       email:    The user's email address
       password: The user's password
 
     Raises:
       ClientLoginError: If there was an error authenticating with ClientLogin.
       HTTPError: If there was some other form of HTTP error.
 
     Returns:
       The authentication token returned by ClientLogin.
     """
     account_type = "GOOGLE"
     if self.host.endswith(".google.com"):
       # Needed for use inside Google.
       account_type = "HOSTED"
     req = self._CreateRequest(
         url="https://www.google.com/accounts/ClientLogin",
         data=urllib.urlencode({
             "Email": email,
             "Passwd": password,
             "service": "ah",
             "source": "rietveld-codereview-upload",
             "accountType": account_type,
         }),
     )
     try:
       response = self.opener.open(req)
       response_body = response.read()
       response_dict = dict(x.split("=")
                            for x in response_body.split("\n") if x)
       return response_dict["Auth"]
     except urllib2.HTTPError, e:
       if e.code == 403:
         body = e.read()
         response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
         raise ClientLoginError(req.get_full_url(), e.code, e.msg,
                                e.headers, response_dict)
       else:
         raise
 
   def _GetAuthCookie(self, auth_token):
     """Fetches authentication cookies for an authentication token.
 
     Args:
       auth_token: The authentication token returned by ClientLogin.
 
     Raises:
       HTTPError: If there was an error fetching the authentication cookies.
     """
     # This is a dummy value to allow us to identify when we're successful.
     continue_location = "http://localhost/"
     args = {"continue": continue_location, "auth": auth_token}
     req = self._CreateRequest("http://%s/_ah/login?%s" %
                               (self.host, urllib.urlencode(args)))
     try:
       response = self.opener.open(req)
     except urllib2.HTTPError, e:
       response = e
     if (response.code != 302 or
         response.info()["location"] != continue_location):
       raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
                               response.headers, response.fp)
     self.authenticated = True
 
   def _Authenticate(self):
     """Authenticates the user.
 
     The authentication process works as follows:
      1) We get a username and password from the user
      2) We use ClientLogin to obtain an AUTH token for the user
         (see https://developers.google.com/identity/protocols/AuthForInstalledApps).
      3) We pass the auth token to /_ah/login on the server to obtain an
         authentication cookie. If login was successful, it tries to redirect
         us to the URL we provided.
 
     If we attempt to access the upload API without first obtaining an
     authentication cookie, it returns a 401 response and directs us to
     authenticate ourselves with ClientLogin.
     """
     for i in range(3):
       credentials = self.auth_function()
       try:
         auth_token = self._GetAuthToken(credentials[0], credentials[1])
       except ClientLoginError, e:
         if e.reason == "BadAuthentication":
           print >>sys.stderr, "Invalid username or password."
           continue
         if e.reason == "CaptchaRequired":
           print >>sys.stderr, (
               "Please go to\n"
               "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
               "and verify you are a human.  Then try again.")
           break
         if e.reason == "NotVerified":
           print >>sys.stderr, "Account not verified."
           break
         if e.reason == "TermsNotAgreed":
           print >>sys.stderr, "User has not agreed to TOS."
           break
         if e.reason == "AccountDeleted":
           print >>sys.stderr, "The user account has been deleted."
           break
         if e.reason == "AccountDisabled":
           print >>sys.stderr, "The user account has been disabled."
           break
         if e.reason == "ServiceDisabled":
           print >>sys.stderr, ("The user's access to the service has been "
                                "disabled.")
           break
         if e.reason == "ServiceUnavailable":
           print >>sys.stderr, "The service is not available; try again later."
           break
         raise
       self._GetAuthCookie(auth_token)
       return
 
   def Send(self, request_path, payload=None,
            content_type="application/octet-stream",
            timeout=None,
            **kwargs):
     """Sends an RPC and returns the response.
 
     Args:
       request_path: The path to send the request to, eg /api/appversion/create.
       payload: The body of the request, or None to send an empty request.
       content_type: The Content-Type header to use.
       timeout: timeout in seconds; default None i.e. no timeout.
         (Note: for large requests on OS X, the timeout doesn't work right.)
       kwargs: Any keyword arguments are converted into query string parameters.
 
     Returns:
       The response body, as a string.
     """
     # TODO: Don't require authentication.  Let the server say
     # whether it is necessary.
     if not self.authenticated:
       self._Authenticate()
 
     old_timeout = socket.getdefaulttimeout()
     socket.setdefaulttimeout(timeout)
     try:
       tries = 0
       while True:
         tries += 1
         args = dict(kwargs)
         url = "http://%s%s" % (self.host, request_path)
         if args:
           url += "?" + urllib.urlencode(args)
         req = self._CreateRequest(url=url, data=payload)
         req.add_header("Content-Type", content_type)
         try:
           f = self.opener.open(req)
           response = f.read()
           f.close()
           return response
         except urllib2.HTTPError, e:
           if tries > 3:
             raise
           elif e.code == 401:
             self._Authenticate()
 ##           elif e.code >= 500 and e.code < 600:
 ##             # Server Error - try again.
 ##             continue
           else:
             raise
     finally:
       socket.setdefaulttimeout(old_timeout)
 
 
 class HttpRpcServer(AbstractRpcServer):
   """Provides a simplified RPC-style interface for HTTP requests."""
 
   def _Authenticate(self):
     """Save the cookie jar after authentication."""
     super(HttpRpcServer, self)._Authenticate()
     if self.save_cookies:
       StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
       self.cookie_jar.save()
 
   def _GetOpener(self):
     """Returns an OpenerDirector that supports cookies and ignores redirects.
 
     Returns:
       A urllib2.OpenerDirector object.
     """
     opener = urllib2.OpenerDirector()
     opener.add_handler(urllib2.ProxyHandler())
     opener.add_handler(urllib2.UnknownHandler())
     opener.add_handler(urllib2.HTTPHandler())
     opener.add_handler(urllib2.HTTPDefaultErrorHandler())
     opener.add_handler(urllib2.HTTPSHandler())
     opener.add_handler(urllib2.HTTPErrorProcessor())
     if self.save_cookies:
       self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
       self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
       if os.path.exists(self.cookie_file):
         try:
           self.cookie_jar.load()
           self.authenticated = True
           StatusUpdate("Loaded authentication cookies from %s" %
                        self.cookie_file)
         except (cookielib.LoadError, IOError):
           # Failed to load cookies - just ignore them.
           pass
       else:
         # Create an empty cookie file with mode 600
         fd = os.open(self.cookie_file, os.O_CREAT, 0600)
         os.close(fd)
       # Always chmod the cookie file
       os.chmod(self.cookie_file, 0600)
     else:
       # Don't save cookies across runs of update.py.
       self.cookie_jar = cookielib.CookieJar()
     opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
     return opener
 
 
 parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
 parser.add_option("-y", "--assume_yes", action="store_true",
                   dest="assume_yes", default=False,
                   help="Assume that the answer to yes/no questions is 'yes'.")
 # Logging
 group = parser.add_option_group("Logging options")
 group.add_option("-q", "--quiet", action="store_const", const=0,
                  dest="verbose", help="Print errors only.")
 group.add_option("-v", "--verbose", action="store_const", const=2,
                  dest="verbose", default=1,
                  help="Print info level logs (default).")
 group.add_option("--noisy", action="store_const", const=3,
                  dest="verbose", help="Print all logs.")
 # Review server
 group = parser.add_option_group("Review server options")
 group.add_option("-s", "--server", action="store", dest="server",
                  default="codereview.appspot.com",
                  metavar="SERVER",
                  help=("The server to upload to. The format is host[:port]. "
                        "Defaults to 'codereview.appspot.com'."))
 group.add_option("-e", "--email", action="store", dest="email",
                  metavar="EMAIL", default=None,
                  help="The username to use. Will prompt if omitted.")
 group.add_option("-H", "--host", action="store", dest="host",
                  metavar="HOST", default=None,
                  help="Overrides the Host header sent with all RPCs.")
 group.add_option("--no_cookies", action="store_false",
                  dest="save_cookies", default=True,
                  help="Do not save authentication cookies to local disk.")
 # Issue
 group = parser.add_option_group("Issue options")
 group.add_option("-d", "--description", action="store", dest="description",
                  metavar="DESCRIPTION", default=None,
                  help="Optional description when creating an issue.")
 group.add_option("-f", "--description_file", action="store",
                  dest="description_file", metavar="DESCRIPTION_FILE",
                  default=None,
                  help="Optional path of a file that contains "
                       "the description when creating an issue.")
 group.add_option("-r", "--reviewers", action="store", dest="reviewers",
                  metavar="REVIEWERS", default=None,
                  help="Add reviewers (comma separated email addresses).")
 group.add_option("--cc", action="store", dest="cc",
                  metavar="CC", default=None,
                  help="Add CC (comma separated email addresses).")
 # Upload options
 group = parser.add_option_group("Patch options")
 group.add_option("-m", "--message", action="store", dest="message",
                  metavar="MESSAGE", default=None,
                  help="A message to identify the patch. "
                       "Will prompt if omitted.")
 group.add_option("-i", "--issue", type="int", action="store",
                  metavar="ISSUE", default=None,
                  help="Issue number to which to add. Defaults to new issue.")
 group.add_option("--download_base", action="store_true",
                  dest="download_base", default=False,
                  help="Base files will be downloaded by the server "
                  "(side-by-side diffs may not work on files with CRs).")
 group.add_option("--rev", action="store", dest="revision",
                  metavar="REV", default=None,
                  help="Branch/tree/revision to diff against (used by DVCS).")
 group.add_option("--send_mail", action="store_true",
                  dest="send_mail", default=False,
                  help="Send notification email to reviewers.")
 
 
 def GetRpcServer(options):
   """Returns an instance of an AbstractRpcServer.
 
   Returns:
     A new AbstractRpcServer, on which RPC calls can be made.
   """
 
   rpc_server_class = HttpRpcServer
 
   def GetUserCredentials():
     """Prompts the user for a username and password."""
     email = options.email
     if email is None:
       email = GetEmail("Email (login for uploading to %s)" % options.server)
     password = getpass.getpass("Password for %s: " % email)
     return (email, password)
 
   # If this is the dev_appserver, use fake authentication.
   host = (options.host or options.server).lower()
   if host == "localhost" or host.startswith("localhost:"):
     email = options.email
     if email is None:
       email = "test@example.com"
       logging.info("Using debug user %s.  Override with --email" % email)
     server = rpc_server_class(
         options.server,
         lambda: (email, "password"),
         host_override=options.host,
         extra_headers={"Cookie":
                        'dev_appserver_login="%s:False"' % email},
         save_cookies=options.save_cookies)
     # Don't try to talk to ClientLogin.
     server.authenticated = True
     return server
 
   return rpc_server_class(options.server, GetUserCredentials,
                           host_override=options.host,
                           save_cookies=options.save_cookies)
 
 
 def EncodeMultipartFormData(fields, files):
   """Encode form fields for multipart/form-data.
 
   Args:
     fields: A sequence of (name, value) elements for regular form fields.
     files: A sequence of (name, filename, value) elements for data to be
            uploaded as files.
   Returns:
     (content_type, body) ready for httplib.HTTP instance.
 
   Source:
     https://web.archive.org/web/20160116052001/code.activestate.com/recipes/146306
   """
   BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
   CRLF = '\r\n'
   lines = []
   for (key, value) in fields:
     lines.append('--' + BOUNDARY)
     lines.append('Content-Disposition: form-data; name="%s"' % key)
     lines.append('')
     lines.append(value)
   for (key, filename, value) in files:
     lines.append('--' + BOUNDARY)
     lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
              (key, filename))
     lines.append('Content-Type: %s' % GetContentType(filename))
     lines.append('')
     lines.append(value)
   lines.append('--' + BOUNDARY + '--')
   lines.append('')
   body = CRLF.join(lines)
   content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
   return content_type, body
 
 
 def GetContentType(filename):
   """Helper to guess the content-type from the filename."""
   return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
 
 
 # Use a shell for subcommands on Windows to get a PATH search.
 use_shell = sys.platform.startswith("win")
 
 def RunShellWithReturnCode(command, print_output=False,
                            universal_newlines=True):
   """Executes a command and returns the output from stdout and the return code.
 
   Args:
     command: Command to execute.
     print_output: If True, the output is printed to stdout.
                   If False, both stdout and stderr are ignored.
     universal_newlines: Use universal_newlines flag (default: True).
 
   Returns:
     Tuple (output, return code)
   """
   logging.info("Running %s", command)
   p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                        shell=use_shell, universal_newlines=universal_newlines)
   if print_output:
     output_array = []
     while True:
       line = p.stdout.readline()
       if not line:
         break
       print line.strip("\n")
       output_array.append(line)
     output = "".join(output_array)
   else:
     output = p.stdout.read()
   p.wait()
   errout = p.stderr.read()
   if print_output and errout:
     print >>sys.stderr, errout
   p.stdout.close()
   p.stderr.close()
   return output, p.returncode
 
 
 def RunShell(command, silent_ok=False, universal_newlines=True,
              print_output=False):
   data, retcode = RunShellWithReturnCode(command, print_output,
                                          universal_newlines)
   if retcode:
     ErrorExit("Got error status from %s:\n%s" % (command, data))
   if not silent_ok and not data:
     ErrorExit("No output from %s" % command)
   return data
 
 
 class VersionControlSystem(object):
   """Abstract base class providing an interface to the VCS."""
 
   def __init__(self, options):
     """Constructor.
 
     Args:
       options: Command line options.
     """
     self.options = options
 
   def GenerateDiff(self, args):
     """Return the current diff as a string.
 
     Args:
       args: Extra arguments to pass to the diff command.
     """
     raise NotImplementedError(
         "abstract method -- subclass %s must override" % self.__class__)
 
   def GetUnknownFiles(self):
     """Return a list of files unknown to the VCS."""
     raise NotImplementedError(
         "abstract method -- subclass %s must override" % self.__class__)
 
   def CheckForUnknownFiles(self):
     """Show an "are you sure?" prompt if there are unknown files."""
     unknown_files = self.GetUnknownFiles()
     if unknown_files:
       print "The following files are not added to version control:"
       for line in unknown_files:
         print line
       prompt = "Are you sure to continue?(y/N) "
       answer = raw_input(prompt).strip()
       if answer != "y":
         ErrorExit("User aborted")
 
   def GetBaseFile(self, filename):
     """Get the content of the upstream version of a file.
 
     Returns:
       A tuple (base_content, new_content, is_binary, status)
         base_content: The contents of the base file.
         new_content: For text files, this is empty.  For binary files, this is
           the contents of the new file, since the diff output won't contain
           information to reconstruct the current file.
-        is_binary: True if the file is binary.
+        is_binary: True iff the file is binary.
         status: The status of the file.
     """
 
     raise NotImplementedError(
         "abstract method -- subclass %s must override" % self.__class__)
 
 
   def GetBaseFiles(self, diff):
     """Helper that calls GetBase file for each file in the patch.
 
     Returns:
       A dictionary that maps from filename to GetBaseFile's tuple.  Filenames
       are retrieved based on lines that start with "Index:" or
       "Property changes on:".
     """
     files = {}
     for line in diff.splitlines(True):
       if line.startswith('Index:') or line.startswith('Property changes on:'):
         unused, filename = line.split(':', 1)
         # On Windows if a file has property changes its filename uses '\'
         # instead of '/'.
         filename = filename.strip().replace('\\', '/')
         files[filename] = self.GetBaseFile(filename)
     return files
 
 
   def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
                       files):
     """Uploads the base files (and if necessary, the current ones as well)."""
 
     def UploadFile(filename, file_id, content, is_binary, status, is_base):
       """Uploads a file to the server."""
       file_too_large = False
       if is_base:
         type = "base"
       else:
         type = "current"
       if len(content) > MAX_UPLOAD_SIZE:
         print ("Not uploading the %s file for %s because it's too large." %
                (type, filename))
         file_too_large = True
         content = ""
       checksum = md5.new(content).hexdigest()
       if options.verbose > 0 and not file_too_large:
         print "Uploading %s file for %s" % (type, filename)
       url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
       form_fields = [("filename", filename),
                      ("status", status),
                      ("checksum", checksum),
                      ("is_binary", str(is_binary)),
                      ("is_current", str(not is_base)),
                     ]
       if file_too_large:
         form_fields.append(("file_too_large", "1"))
       if options.email:
         form_fields.append(("user", options.email))
       ctype, body = EncodeMultipartFormData(form_fields,
                                             [("data", filename, content)])
       response_body = rpc_server.Send(url, body,
                                       content_type=ctype)
       if not response_body.startswith("OK"):
         StatusUpdate("  --> %s" % response_body)
         sys.exit(1)
 
     patches = dict()
     [patches.setdefault(v, k) for k, v in patch_list]
     for filename in patches.keys():
       base_content, new_content, is_binary, status = files[filename]
       file_id_str = patches.get(filename)
       if file_id_str.find("nobase") != -1:
         base_content = None
         file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
       file_id = int(file_id_str)
       if base_content != None:
         UploadFile(filename, file_id, base_content, is_binary, status, True)
       if new_content != None:
         UploadFile(filename, file_id, new_content, is_binary, status, False)
 
   def IsImage(self, filename):
     """Returns true if the filename has an image extension."""
     mimetype =  mimetypes.guess_type(filename)[0]
     if not mimetype:
       return False
     return mimetype.startswith("image/")
 
 
 class SubversionVCS(VersionControlSystem):
   """Implementation of the VersionControlSystem interface for Subversion."""
 
   def __init__(self, options):
     super(SubversionVCS, self).__init__(options)
     if self.options.revision:
       match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
       if not match:
         ErrorExit("Invalid Subversion revision %s." % self.options.revision)
       self.rev_start = match.group(1)
       self.rev_end = match.group(3)
     else:
       self.rev_start = self.rev_end = None
     # Cache output from "svn list -r REVNO dirname".
     # Keys: dirname, Values: 2-tuple (output for start rev and end rev).
     self.svnls_cache = {}
     # SVN base URL is required to fetch files deleted in an older revision.
     # Result is cached to not guess it over and over again in GetBaseFile().
     required = self.options.download_base or self.options.revision is not None
     self.svn_base = self._GuessBase(required)
 
   def GuessBase(self, required):
     """Wrapper for _GuessBase."""
     return self.svn_base
 
   def _GuessBase(self, required):
     """Returns the SVN base URL.
 
     Args:
       required: If true, exits if the url can't be guessed, otherwise None is
         returned.
     """
     info = RunShell(["svn", "info"])
     for line in info.splitlines():
       words = line.split()
       if len(words) == 2 and words[0] == "URL:":
         url = words[1]
         scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
         username, netloc = urllib.splituser(netloc)
         if username:
           logging.info("Removed username from base URL")
         if netloc.endswith("svn.python.org"):
           if netloc == "svn.python.org":
             if path.startswith("/projects/"):
               path = path[9:]
           elif netloc != "pythondev@svn.python.org":
             ErrorExit("Unrecognized Python URL: %s" % url)
           base = "http://svn.python.org/view/*checkout*%s/" % path
           logging.info("Guessed Python base = %s", base)
         elif netloc.endswith("svn.collab.net"):
           if path.startswith("/repos/"):
             path = path[6:]
           base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
           logging.info("Guessed CollabNet base = %s", base)
         elif netloc.endswith(".googlecode.com"):
           path = path + "/"
           base = urlparse.urlunparse(("http", netloc, path, params,
                                       query, fragment))
           logging.info("Guessed Google Code base = %s", base)
         else:
           path = path + "/"
           base = urlparse.urlunparse((scheme, netloc, path, params,
                                       query, fragment))
           logging.info("Guessed base = %s", base)
         return base
     if required:
       ErrorExit("Can't find URL in output from svn info")
     return None
 
   def GenerateDiff(self, args):
     cmd = ["svn", "diff"]
     if self.options.revision:
       cmd += ["-r", self.options.revision]
     cmd.extend(args)
     data = RunShell(cmd)
     count = 0
     for line in data.splitlines():
       if line.startswith("Index:") or line.startswith("Property changes on:"):
         count += 1
         logging.info(line)
     if not count:
       ErrorExit("No valid patches found in output from svn diff")
     return data
 
   def _CollapseKeywords(self, content, keyword_str):
     """Collapses SVN keywords."""
     # svn cat translates keywords but svn diff doesn't. As a result of this
     # behavior patching.PatchChunks() fails with a chunk mismatch error.
     # This part was originally written by the Review Board development team
     # who had the same problem (https://reviews.reviewboard.org/r/276/).
     # Mapping of keywords to known aliases
     svn_keywords = {
       # Standard keywords
       'Date':                ['Date', 'LastChangedDate'],
       'Revision':            ['Revision', 'LastChangedRevision', 'Rev'],
       'Author':              ['Author', 'LastChangedBy'],
       'HeadURL':             ['HeadURL', 'URL'],
       'Id':                  ['Id'],
 
       # Aliases
       'LastChangedDate':     ['LastChangedDate', 'Date'],
       'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
       'LastChangedBy':       ['LastChangedBy', 'Author'],
       'URL':                 ['URL', 'HeadURL'],
     }
 
     def repl(m):
        if m.group(2):
          return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
        return "$%s$" % m.group(1)
     keywords = [keyword
                 for name in keyword_str.split(" ")
                 for keyword in svn_keywords.get(name, [])]
     return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
 
   def GetUnknownFiles(self):
     status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
     unknown_files = []
     for line in status.split("\n"):
       if line and line[0] == "?":
         unknown_files.append(line)
     return unknown_files
 
   def ReadFile(self, filename):
     """Returns the contents of a file."""
     file = open(filename, 'rb')
     result = ""
     try:
       result = file.read()
     finally:
       file.close()
     return result
 
   def GetStatus(self, filename):
     """Returns the status of a file."""
     if not self.options.revision:
       status = RunShell(["svn", "status", "--ignore-externals", filename])
       if not status:
         ErrorExit("svn status returned no output for %s" % filename)
       status_lines = status.splitlines()
       # If file is in a cl, the output will begin with
       # "\n--- Changelist 'cl_name':\n".  See
       # https://web.archive.org/web/20090918234815/svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
       if (len(status_lines) == 3 and
           not status_lines[0] and
           status_lines[1].startswith("--- Changelist")):
         status = status_lines[2]
       else:
         status = status_lines[0]
     # If we have a revision to diff against we need to run "svn list"
     # for the old and the new revision and compare the results to get
     # the correct status for a file.
     else:
       dirname, relfilename = os.path.split(filename)
       if dirname not in self.svnls_cache:
         cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
         out, returncode = RunShellWithReturnCode(cmd)
         if returncode:
           ErrorExit("Failed to get status for %s." % filename)
         old_files = out.splitlines()
         args = ["svn", "list"]
         if self.rev_end:
           args += ["-r", self.rev_end]
         cmd = args + [dirname or "."]
         out, returncode = RunShellWithReturnCode(cmd)
         if returncode:
           ErrorExit("Failed to run command %s" % cmd)
         self.svnls_cache[dirname] = (old_files, out.splitlines())
       old_files, new_files = self.svnls_cache[dirname]
       if relfilename in old_files and relfilename not in new_files:
         status = "D   "
       elif relfilename in old_files and relfilename in new_files:
         status = "M   "
       else:
         status = "A   "
     return status
 
   def GetBaseFile(self, filename):
     status = self.GetStatus(filename)
     base_content = None
     new_content = None
 
     # If a file is copied its status will be "A  +", which signifies
     # "addition-with-history".  See "svn st" for more information.  We need to
     # upload the original file or else diff parsing will fail if the file was
     # edited.
     if status[0] == "A" and status[3] != "+":
       # We'll need to upload the new content if we're adding a binary file
       # since diff's output won't contain it.
       mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
                           silent_ok=True)
       base_content = ""
       is_binary = mimetype and not mimetype.startswith("text/")
       if is_binary and self.IsImage(filename):
         new_content = self.ReadFile(filename)
     elif (status[0] in ("M", "D", "R") or
           (status[0] == "A" and status[3] == "+") or  # Copied file.
           (status[0] == " " and status[1] == "M")):  # Property change.
       args = []
       if self.options.revision:
         url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
       else:
         # Don't change filename, it's needed later.
         url = filename
         args += ["-r", "BASE"]
       cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
       mimetype, returncode = RunShellWithReturnCode(cmd)
       if returncode:
         # File does not exist in the requested revision.
         # Reset mimetype, it contains an error message.
         mimetype = ""
       get_base = False
       is_binary = mimetype and not mimetype.startswith("text/")
       if status[0] == " ":
         # Empty base content just to force an upload.
         base_content = ""
       elif is_binary:
         if self.IsImage(filename):
           get_base = True
           if status[0] == "M":
             if not self.rev_end:
               new_content = self.ReadFile(filename)
             else:
               url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
               new_content = RunShell(["svn", "cat", url],
                                      universal_newlines=True, silent_ok=True)
         else:
           base_content = ""
       else:
         get_base = True
 
       if get_base:
         if is_binary:
           universal_newlines = False
         else:
           universal_newlines = True
         if self.rev_start:
           # "svn cat -r REV delete_file.txt" doesn't work. cat requires
           # the full URL with "@REV" appended instead of using "-r" option.
           url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
           base_content = RunShell(["svn", "cat", url],
                                   universal_newlines=universal_newlines,
                                   silent_ok=True)
         else:
           base_content = RunShell(["svn", "cat", filename],
                                   universal_newlines=universal_newlines,
                                   silent_ok=True)
         if not is_binary:
           args = []
           if self.rev_start:
             url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
           else:
             url = filename
             args += ["-r", "BASE"]
           cmd = ["svn"] + args + ["propget", "svn:keywords", url]
           keywords, returncode = RunShellWithReturnCode(cmd)
           if keywords and not returncode:
             base_content = self._CollapseKeywords(base_content, keywords)
     else:
       StatusUpdate("svn status returned unexpected output: %s" % status)
       sys.exit(1)
     return base_content, new_content, is_binary, status[0:5]
 
 
 class GitVCS(VersionControlSystem):
   """Implementation of the VersionControlSystem interface for Git."""
 
   def __init__(self, options):
     super(GitVCS, self).__init__(options)
     # Map of filename -> hash of base file.
     self.base_hashes = {}
 
   def GenerateDiff(self, extra_args):
     # This is more complicated than svn's GenerateDiff because we must convert
     # the diff output to include an svn-style "Index:" line as well as record
     # the hashes of the base files, so we can upload them along with our diff.
     if self.options.revision:
       extra_args = [self.options.revision] + extra_args
     gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
     svndiff = []
     filecount = 0
     filename = None
     for line in gitdiff.splitlines():
       match = re.match(r"diff --git a/(.*) b/.*$", line)
       if match:
         filecount += 1
         filename = match.group(1)
         svndiff.append("Index: %s\n" % filename)
       else:
         # The "index" line in a git diff looks like this (long hashes elided):
         #   index 82c0d44..b2cee3f 100755
         # We want to save the left hash, as that identifies the base file.
         match = re.match(r"index (\w+)\.\.", line)
         if match:
           self.base_hashes[filename] = match.group(1)
       svndiff.append(line + "\n")
     if not filecount:
       ErrorExit("No valid patches found in output from git diff")
     return "".join(svndiff)
 
   def GetUnknownFiles(self):
     status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
                       silent_ok=True)
     return status.splitlines()
 
   def GetBaseFile(self, filename):
     hash = self.base_hashes[filename]
     base_content = None
     new_content = None
     is_binary = False
     if hash == "0" * 40:  # All-zero hash indicates no base file.
       status = "A"
       base_content = ""
     else:
       status = "M"
       base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
       if returncode:
         ErrorExit("Got error status from 'git show %s'" % hash)
     return (base_content, new_content, is_binary, status)
 
 
 class MercurialVCS(VersionControlSystem):
   """Implementation of the VersionControlSystem interface for Mercurial."""
 
   def __init__(self, options, repo_dir):
     super(MercurialVCS, self).__init__(options)
     # Absolute path to repository (we can be in a subdir)
     self.repo_dir = os.path.normpath(repo_dir)
     # Compute the subdir
     cwd = os.path.normpath(os.getcwd())
     assert cwd.startswith(self.repo_dir)
     self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
     if self.options.revision:
       self.base_rev = self.options.revision
     else:
       self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
 
   def _GetRelPath(self, filename):
     """Get relative path of a file according to the current directory,
     given its logical path in the repo."""
     assert filename.startswith(self.subdir), filename
     return filename[len(self.subdir):].lstrip(r"\/")
 
   def GenerateDiff(self, extra_args):
     # If no file specified, restrict to the current subdir
     extra_args = extra_args or ["."]
     cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
     data = RunShell(cmd, silent_ok=True)
     svndiff = []
     filecount = 0
     for line in data.splitlines():
       m = re.match("diff --git a/(\S+) b/(\S+)", line)
       if m:
         # Modify line to make it look like as it comes from svn diff.
         # With this modification no changes on the server side are required
         # to make upload.py work with Mercurial repos.
         # NOTE: for proper handling of moved/copied files, we have to use
         # the second filename.
         filename = m.group(2)
         svndiff.append("Index: %s" % filename)
         svndiff.append("=" * 67)
         filecount += 1
         logging.info(line)
       else:
         svndiff.append(line)
     if not filecount:
       ErrorExit("No valid patches found in output from hg diff")
     return "\n".join(svndiff) + "\n"
 
   def GetUnknownFiles(self):
     """Return a list of files unknown to the VCS."""
     args = []
     status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
         silent_ok=True)
     unknown_files = []
     for line in status.splitlines():
       st, fn = line.split(" ", 1)
       if st == "?":
         unknown_files.append(fn)
     return unknown_files
 
   def GetBaseFile(self, filename):
     # "hg status" and "hg cat" both take a path relative to the current subdir
     # rather than to the repo root, but "hg diff" has given us the full path
     # to the repo root.
     base_content = ""
     new_content = None
     is_binary = False
     oldrelpath = relpath = self._GetRelPath(filename)
     # "hg status -C" returns two lines for moved/copied files, one otherwise
     out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
     out = out.splitlines()
     # HACK: strip error message about missing file/directory if it isn't in
     # the working copy
     if out[0].startswith('%s: ' % relpath):
       out = out[1:]
     if len(out) > 1:
       # Moved/copied => considered as modified, use old filename to
       # retrieve base contents
       oldrelpath = out[1].strip()
       status = "M"
     else:
       status, _ = out[0].split(' ', 1)
     if status != "A":
       base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
         silent_ok=True)
       is_binary = "\0" in base_content  # Mercurial's heuristic
     if status != "R":
       new_content = open(relpath, "rb").read()
       is_binary = is_binary or "\0" in new_content
     if is_binary and base_content:
       # Fetch again without converting newlines
       base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
         silent_ok=True, universal_newlines=False)
     if not is_binary or not self.IsImage(relpath):
       new_content = None
     return base_content, new_content, is_binary, status
 
 
 # NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
 def SplitPatch(data):
   """Splits a patch into separate pieces for each file.
 
   Args:
     data: A string containing the output of svn diff.
 
   Returns:
     A list of 2-tuple (filename, text) where text is the svn diff output
       pertaining to filename.
   """
   patches = []
   filename = None
   diff = []
   for line in data.splitlines(True):
     new_filename = None
     if line.startswith('Index:'):
       unused, new_filename = line.split(':', 1)
       new_filename = new_filename.strip()
     elif line.startswith('Property changes on:'):
       unused, temp_filename = line.split(':', 1)
       # When a file is modified, paths use '/' between directories, however
       # when a property is modified '\' is used on Windows.  Make them the same
       # otherwise the file shows up twice.
       temp_filename = temp_filename.strip().replace('\\', '/')
       if temp_filename != filename:
         # File has property changes but no modifications, create a new diff.
         new_filename = temp_filename
     if new_filename:
       if filename and diff:
         patches.append((filename, ''.join(diff)))
       filename = new_filename
       diff = [line]
       continue
     if diff is not None:
       diff.append(line)
   if filename and diff:
     patches.append((filename, ''.join(diff)))
   return patches
 
 
 def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
   """Uploads a separate patch for each file in the diff output.
 
   Returns a list of [patch_key, filename] for each file.
   """
   patches = SplitPatch(data)
   rv = []
   for patch in patches:
     if len(patch[1]) > MAX_UPLOAD_SIZE:
       print ("Not uploading the patch for " + patch[0] +
              " because the file is too large.")
       continue
     form_fields = [("filename", patch[0])]
     if not options.download_base:
       form_fields.append(("content_upload", "1"))
     files = [("data", "data.diff", patch[1])]
     ctype, body = EncodeMultipartFormData(form_fields, files)
     url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
     print "Uploading patch for " + patch[0]
     response_body = rpc_server.Send(url, body, content_type=ctype)
     lines = response_body.splitlines()
     if not lines or lines[0] != "OK":
       StatusUpdate("  --> %s" % response_body)
       sys.exit(1)
     rv.append([lines[1], patch[0]])
   return rv
 
 
 def GuessVCS(options):
   """Helper to guess the version control system.
 
   This examines the current directory, guesses which VersionControlSystem
   we're using, and returns an instance of the appropriate class.  Exit with an
   error if we can't figure it out.
 
   Returns:
     A VersionControlSystem instance. Exits if the VCS can't be guessed.
   """
   # Mercurial has a command to get the base directory of a repository
   # Try running it, but don't die if we don't have hg installed.
   # NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
   try:
     out, returncode = RunShellWithReturnCode(["hg", "root"])
     if returncode == 0:
       return MercurialVCS(options, out.strip())
   except OSError, (errno, message):
     if errno != 2:  # ENOENT -- they don't have hg installed.
       raise
 
   # Subversion has a .svn in all working directories.
   if os.path.isdir('.svn'):
     logging.info("Guessed VCS = Subversion")
     return SubversionVCS(options)
 
   # Git has a command to test if you're in a git tree.
   # Try running it, but don't die if we don't have git installed.
   try:
     out, returncode = RunShellWithReturnCode(["git", "rev-parse",
                                               "--is-inside-work-tree"])
     if returncode == 0:
       return GitVCS(options)
   except OSError, (errno, message):
     if errno != 2:  # ENOENT -- they don't have git installed.
       raise
 
   ErrorExit(("Could not guess version control system. "
              "Are you in a working copy directory?"))
 
 
 def RealMain(argv, data=None):
   """The real main function.
 
   Args:
     argv: Command line arguments.
     data: Diff contents. If None (default) the diff is generated by
       the VersionControlSystem implementation returned by GuessVCS().
 
   Returns:
     A 2-tuple (issue id, patchset id).
     The patchset id is None if the base files are not uploaded by this
     script (applies only to SVN checkouts).
   """
   logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
                               "%(lineno)s %(message)s "))
   os.environ['LC_ALL'] = 'C'
   options, args = parser.parse_args(argv[1:])
   global verbosity
   verbosity = options.verbose
   if verbosity >= 3:
     logging.getLogger().setLevel(logging.DEBUG)
   elif verbosity >= 2:
     logging.getLogger().setLevel(logging.INFO)
   vcs = GuessVCS(options)
   if isinstance(vcs, SubversionVCS):
     # base field is only allowed for Subversion.
     # Note: Fetching base files may become deprecated in future releases.
     base = vcs.GuessBase(options.download_base)
   else:
     base = None
   if not base and options.download_base:
     options.download_base = True
     logging.info("Enabled upload of base file")
   if not options.assume_yes:
     vcs.CheckForUnknownFiles()
   if data is None:
     data = vcs.GenerateDiff(args)
   files = vcs.GetBaseFiles(data)
   if verbosity >= 1:
     print "Upload server:", options.server, "(change with -s/--server)"
   if options.issue:
     prompt = "Message describing this patch set: "
   else:
     prompt = "New issue subject: "
   message = options.message or raw_input(prompt).strip()
   if not message:
     ErrorExit("A non-empty message is required")
   rpc_server = GetRpcServer(options)
   form_fields = [("subject", message)]
   if base:
     form_fields.append(("base", base))
   if options.issue:
     form_fields.append(("issue", str(options.issue)))
   if options.email:
     form_fields.append(("user", options.email))
   if options.reviewers:
     for reviewer in options.reviewers.split(','):
       if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
         ErrorExit("Invalid email address: %s" % reviewer)
     form_fields.append(("reviewers", options.reviewers))
   if options.cc:
     for cc in options.cc.split(','):
       if "@" in cc and not cc.split("@")[1].count(".") == 1:
         ErrorExit("Invalid email address: %s" % cc)
     form_fields.append(("cc", options.cc))
   description = options.description
   if options.description_file:
     if options.description:
       ErrorExit("Can't specify description and description_file")
     file = open(options.description_file, 'r')
     description = file.read()
     file.close()
   if description:
     form_fields.append(("description", description))
   # Send a hash of all the base file so the server can determine if a copy
   # already exists in an earlier patchset.
   base_hashes = ""
   for file, info in files.iteritems():
     if not info[0] is None:
       checksum = md5.new(info[0]).hexdigest()
       if base_hashes:
         base_hashes += "|"
       base_hashes += checksum + ":" + file
   form_fields.append(("base_hashes", base_hashes))
   # If we're uploading base files, don't send the email before the uploads, so
   # that it contains the file status.
   if options.send_mail and options.download_base:
     form_fields.append(("send_mail", "1"))
   if not options.download_base:
     form_fields.append(("content_upload", "1"))
   if len(data) > MAX_UPLOAD_SIZE:
     print "Patch is large, so uploading file patches separately."
     uploaded_diff_file = []
     form_fields.append(("separate_patches", "1"))
   else:
     uploaded_diff_file = [("data", "data.diff", data)]
   ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
   response_body = rpc_server.Send("/upload", body, content_type=ctype)
   patchset = None
   if not options.download_base or not uploaded_diff_file:
     lines = response_body.splitlines()
     if len(lines) >= 2:
       msg = lines[0]
       patchset = lines[1].strip()
       patches = [x.split(" ", 1) for x in lines[2:]]
     else:
       msg = response_body
   else:
     msg = response_body
   StatusUpdate(msg)
   if not response_body.startswith("Issue created.") and \
   not response_body.startswith("Issue updated."):
     sys.exit(0)
   issue = msg[msg.rfind("/")+1:]
 
   if not uploaded_diff_file:
     result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
     if not options.download_base:
       patches = result
 
   if not options.download_base:
     vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
     if options.send_mail:
       rpc_server.Send("/" + issue + "/mail", payload="")
   return issue, patchset
 
 
 def main():
   try:
     RealMain(sys.argv)
   except KeyboardInterrupt:
     print
     StatusUpdate("Interrupted.")
     sys.exit(1)
 
 
 if __name__ == "__main__":
   main()