diff --git a/modules/webstyle/lib/webinterface_handler.py b/modules/webstyle/lib/webinterface_handler.py index dac024b79..6c71c1904 100644 --- a/modules/webstyle/lib/webinterface_handler.py +++ b/modules/webstyle/lib/webinterface_handler.py @@ -1,496 +1,502 @@ # -*- coding: utf-8 -*- ## This file is part of Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ Apache request handler mechanism. It gives the tools to map url to functions, handles the legacy url scheme (/search.py queries), HTTP/HTTPS switching, language specification,... """ __revision__ = "$Id$" import urlparse import cgi import sys import re import os import gc from invenio import webinterface_handler_config as apache from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_TMPDIR from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO from invenio.messages import wash_language from invenio.urlutils import redirect_to_url from invenio.errorlib import register_exception from invenio.webuser import get_preferred_user_language, isGuestUser, \ getUid, loginUser, update_Uid, isUserSuperAdmin, collect_user_info from invenio.webinterface_handler_wsgi_utils import StringField ## The following variable is True if the installation make any difference ## between HTTP Vs. HTTPS connections. CFG_HAS_HTTPS_SUPPORT = CFG_SITE_URL != CFG_SITE_SECURE_URL ## Set this to True in order to log some more information. DEBUG = False # List of URIs for which the 'ln' argument must not be added # automatically CFG_NO_LANG_RECOGNITION_URIS = ['/rss', '/oai2d', '/journal'] RE_SLASHES = re.compile('/+') RE_SPECIAL_URI = re.compile('^/record/\d+|^/collection/.+') +_RE_BAD_MSIE = re.compile("MSIE\s+(\d+\.\d+)") def _debug(req, msg): """ Log the message. @param req: the request. @param msg: the message. @type msg: string """ if DEBUG: req.log_error(msg) def _check_result(req, result): """ Check that a page handler actually wrote something, and properly finish the apache request. @param req: the request. @param result: the produced output. @type result: string @return: an apache error code @rtype: int @raise apache.SERVER_RETURN: in case of a HEAD request. @note: that this function actually takes care of writing the result to the client. """ if result or req.bytes_sent > 0: if result is None: result = "" else: result = str(result) # unless content_type was manually set, we will attempt # to guess it if not req.content_type_set_p: # make an attempt to guess content-type if result[:100].strip()[:6].lower() == '' \ or result.find(' 0: req.content_type = 'text/html' else: req.content_type = 'text/plain' if req.header_only: if req.status in (apache.HTTP_NOT_FOUND, ): raise apache.SERVER_RETURN, req.status else: req.write(result) return apache.OK else: req.log_error("publisher: %s returned nothing." % `object`) return apache.HTTP_INTERNAL_SERVER_ERROR class TraversalError(Exception): """ Exception raised in case of an error in parsing the URL of the request. """ pass class WebInterfaceDirectory(object): """ A directory groups web pages, and can delegate dispatching of requests to the actual handler. This has been heavily borrowed from Quixote's dispatching mechanism, with specific adaptations. """ # Lists the valid URLs contained in this directory. _exports = [] # Set this to True in order to redirect queries over HTTPS _force_https = False def _translate(self, component): """(component : string) -> string | None Translate a path component into a Python identifier. Returning None signifies that the component does not exist. """ if component in self._exports: if component == '': return 'index' # implicit mapping else: return component else: # check for an explicit external to internal mapping for value in self._exports: if isinstance(value, tuple): if value[0] == component: return value[1] else: return None def _lookup(self, component, path): """ Override this method if you need to map dynamic URLs. It can eat up as much of the remaining path as needed, and return the remaining parts, so that the traversal can continue. """ return None, path def _traverse(self, req, path, do_head=False, guest_p=True): """ Locate the handler of an URI by traversing the elements of the path.""" _debug(req, 'traversing %r' % path) component, path = path[0], path[1:] name = self._translate(component) if name is None: obj, path = self._lookup(component, path) else: obj = getattr(self, name) if obj is None: _debug(req, 'could not resolve %s' % repr((component, path))) raise TraversalError() # We have found the next segment. If we know that from this # point our subpages are over HTTPS, do the switch. if CFG_HAS_HTTPS_SUPPORT and self._force_https and not req.is_https(): # We need to isolate the part of the URI that is after # CFG_SITE_URL, and append that to our CFG_SITE_SECURE_URL. original_parts = urlparse.urlparse(req.unparsed_uri) plain_prefix_parts = urlparse.urlparse(CFG_SITE_URL) secure_prefix_parts = urlparse.urlparse(CFG_SITE_SECURE_URL) # Compute the new path plain_path = original_parts[2] plain_path = secure_prefix_parts[2] + \ plain_path[len(plain_prefix_parts[2]):] # ...and recompose the complete URL final_parts = list(secure_prefix_parts) final_parts[2] = plain_path final_parts[-3:] = original_parts[-3:] target = urlparse.urlunparse(final_parts) redirect_to_url(req, target) if CFG_EXTERNAL_AUTH_USING_SSO and req.is_https() and guest_p: (iden, p_un, dummy, dummy) = loginUser(req, '', '', CFG_EXTERNAL_AUTH_USING_SSO) if len(iden)>0: update_Uid(req, p_un) guest_p = False # Continue the traversal. If there is a path, continue # resolving, otherwise call the method as it is our final # renderer. We even pass it the parsed form arguments. if path: if hasattr(obj, '_traverse'): return obj._traverse(req, path, do_head, guest_p) else: raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND if do_head: req.content_type = "text/html; charset=UTF-8" raise apache.SERVER_RETURN, apache.DONE form = req.form if 'ln' not in form and \ req.uri not in CFG_NO_LANG_RECOGNITION_URIS: ln = get_preferred_user_language(req) form.add_field('ln', ln) result = _check_result(req, obj(req, form)) return result def __call__(self, req, form): """ Maybe resolve the final / of a directory """ # When this method is called, we either are a directory which # has an 'index' method, and we redirect to it, or we don't # have such a method, in which case it is a traversal error. if "" in self._exports: if not form: # Fix missing trailing slash as a convenience, unless # we are processing a form (in which case it is better # to fix the form posting). redirect_to_url(req, req.uri + "/", apache.HTTP_MOVED_PERMANENTLY) _debug(req, 'directory %r is not callable' % self) raise TraversalError() def create_handler(root): """ Return a handler function that will dispatch apache requests through the URL layout passed in parameter.""" def _profiler(req): """ This handler wrap the default handler with a profiler. Profiling data is written into CFG_TMPDIR/invenio-profile-stats-datetime.raw, and is displayed at the bottom of the webpage. To use add profile=1 to your url. To change sorting algorithm you can provide profile=algorithm_name. You can add more than one profile requirement like ?profile=time&profile=cumulative. The list of available algorithm is displayed at the end of the profile. """ args = {} if req.args: args = cgi.parse_qs(req.args) if 'profile' in args: if not isUserSuperAdmin(collect_user_info(req)): return _handler(req) if 'memory' in args['profile']: gc.set_debug(gc.DEBUG_LEAK) ret = _handler(req) req.write("\n
%s
" % gc.garbage) gc.collect() req.write("\n
%s
" % gc.garbage) gc.set_debug(0) return ret from cStringIO import StringIO try: import pstats except ImportError: ret = _handler(req) req.write("
%s
" % "The Python Profiler is not installed!") return ret import datetime date = datetime.datetime.now().strftime('%Y%m%d%H%M%S') filename = '%s/invenio-profile-stats-%s.raw' % (CFG_TMPDIR, date) existing_sorts = pstats.Stats.sort_arg_dict_default.keys() required_sorts = [] profile_dump = [] for sort in args['profile']: if sort not in existing_sorts: sort = 'cumulative' if sort not in required_sorts: required_sorts.append(sort) if sys.hexversion < 0x02050000: import hotshot import hotshot.stats pr = hotshot.Profile(filename) ret = pr.runcall(_handler, req) for sort_type in required_sorts: tmp_out = sys.stdout sys.stdout = StringIO() hotshot.stats.load(filename).strip_dirs().sort_stats(sort_type).print_stats() # pylint: disable=E1103 # This is a hack. sys.stdout was replaced by a StringIO. profile_dump.append(sys.stdout.getvalue()) # pylint: enable=E1103 sys.stdout = tmp_out else: import cProfile pr = cProfile.Profile() ret = pr.runcall(_handler, req) pr.dump_stats(filename) for sort_type in required_sorts: strstream = StringIO() pstats.Stats(filename, stream=strstream).strip_dirs().sort_stats(sort_type).print_stats() profile_dump.append(strstream.getvalue()) profile_dump = '\n'.join(profile_dump) profile_dump += '\nYou can use profile=%s or profile=memory' % existing_sorts req.write("\n
%s
" % profile_dump) return ret else: return _handler(req) def _handler(req): """ This handler is invoked by mod_python with the apache request.""" try: allowed_methods = ("GET", "POST", "HEAD", "OPTIONS") req.allow_methods(allowed_methods, 1) if req.method not in allowed_methods: raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED if req.method == 'OPTIONS': ## OPTIONS is used to now which method are allowed req.headers_out['Allow'] = ', '.join(allowed_methods) raise apache.SERVER_RETURN, apache.OK # Set user agent for fckeditor.py, which needs it here os.environ["HTTP_USER_AGENT"] = req.headers_in.get('User-Agent', '') guest_p = isGuestUser(getUid(req)) uri = req.uri if uri == '/': path = [''] else: ## Let's collapse multiple slashes into a single / uri = RE_SLASHES.sub('/', uri) path = uri[1:].split('/') + g = _RE_BAD_MSIE.search(req.headers_in.get('User-Agent', "MSIE 6.0")) + bad_msie = g and float(g.group(1)) < 9.0 if uri.startswith('/yours') or not guest_p: ## Private/personalized request should not be cached - req.headers_out['Cache-Control'] = 'private, no-cache, no-store, max-age=0, must-revalidate' - req.headers_out['Pragma'] = 'no-cache' - req.headers_out['Vary'] = '*' - else: + if bad_msie and req.is_https(): + req.headers_out['Cache-Control'] = 'private, max-age=0, must-revalidate' + else: + req.headers_out['Cache-Control'] = 'private, no-cache, no-store, max-age=0, must-revalidate' + req.headers_out['Pragma'] = 'no-cache' + req.headers_out['Vary'] = '*' + elif not (bad_msie and req.is_https()): req.headers_out['Cache-Control'] = 'public, max-age=3600' req.headers_out['Vary'] = 'Cookie, ETag, Cache-Control' try: if req.header_only and not RE_SPECIAL_URI.match(req.uri): return root._traverse(req, path, True, guest_p) else: ## bibdocfile have a special treatment for HEAD return root._traverse(req, path, False, guest_p) except TraversalError: raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND except apache.SERVER_RETURN: ## This is one of mod_python way of communicating raise except IOError, exc: if 'Write failed, client closed connection' not in "%s" % exc: ## Workaround for considering as false positive exceptions ## rised by mod_python when the user close the connection ## or in some other rare and not well identified cases. register_exception(req=req, alert_admin=True) raise except Exception: register_exception(req=req, alert_admin=True) raise # Serve an error by default. raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND finally: if hasattr(req, '_session'): ## The session handler saves for caching a request_wrapper ## in req. ## This saves req as an attribute, creating a circular ## reference. ## Since we have have reached the end of the request handler ## we can safely drop the request_wrapper so to avoid ## memory leaks. delattr(req, '_session') if hasattr(req, '_user_info'): ## For the same reason we can delete the user_info. delattr(req, '_user_info') ## as suggested in ## del gc.garbage[:] return _profiler def wash_urlargd(form, content): """ Wash the complete form based on the specification in content. Content is a dictionary containing the field names as a key, and a tuple (type, default) as value. 'type' can be list, str, invenio.webinterface_handler_wsgi_utils.StringField, int, tuple, or invenio.webinterface_handler_wsgi_utils.Field (for file uploads). The specification automatically includes the 'ln' field, which is common to all queries. Arguments that are not defined in 'content' are discarded. Note that in case {list,tuple} were asked for, we assume that {list,tuple} of strings is to be returned. Therefore beware when you want to use wash_urlargd() for multiple file upload forms. @Return: argd dictionary that can be used for passing function parameters by keywords. """ result = {} content['ln'] = (str, '') for k, (dst_type, default) in content.items(): try: value = form[k] except KeyError: result[k] = default continue src_type = type(value) # First, handle the case where we want all the results. In # this case, we need to ensure all the elements are strings, # and not Field instances. if src_type in (list, tuple): if dst_type is list: result[k] = [str(x) for x in value] continue if dst_type is tuple: result[k] = tuple([str(x) for x in value]) continue # in all the other cases, we are only interested in the # first value. value = value[0] # Maybe we already have what is expected? Then don't change # anything. if isinstance(value, dst_type): if isinstance(value, StringField): result[k] = str(value) else: result[k] = value continue # Since we got here, 'value' is sure to be a single symbol, # not a list kind of structure anymore. if dst_type in (str, int): try: result[k] = dst_type(value) except: result[k] = default elif dst_type is tuple: result[k] = (str(value), ) elif dst_type is list: result[k] = [str(value)] else: raise ValueError('cannot cast form value %s of type %r into type %r' % (value, src_type, dst_type)) result['ln'] = wash_language(result['ln']) return result diff --git a/modules/webstyle/lib/webinterface_handler_wsgi_utils.py b/modules/webstyle/lib/webinterface_handler_wsgi_utils.py index c2277aaee..44cfcefb9 100644 --- a/modules/webstyle/lib/webinterface_handler_wsgi_utils.py +++ b/modules/webstyle/lib/webinterface_handler_wsgi_utils.py @@ -1,885 +1,889 @@ # -*- coding: utf-8 -*- ## This file is part of Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ mod_python->WSGI Framework utilities This code has been taken from mod_python original source code and rearranged here to easying the migration from mod_python to wsgi. The code taken from mod_python is under the following License. """ # Copyright 2004 Apache Software Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # Originally developed by Gregory Trubetskoy. # # $Id: apache.py 468216 2006-10-27 00:54:12Z grahamd $ try: import threading except: import dummy_threading as threading from wsgiref.headers import Headers import time import re import os import cgi import cStringIO import tempfile from types import TypeType, ClassType, BuiltinFunctionType, MethodType, ListType from invenio.config import CFG_TMPDIR from invenio.webinterface_handler_config import \ SERVER_RETURN, \ HTTP_LENGTH_REQUIRED, \ HTTP_BAD_REQUEST, \ InvenioWebInterfaceWSGIContentLenghtError, \ InvenioWebInterfaceWSGIContentTypeError, \ InvenioWebInterfaceWSGIContentMD5Error # Cache for values of PythonPath that have been seen already. _path_cache = {} _path_cache_lock = threading.Lock() class table(Headers): add = Headers.add_header iteritems = Headers.items def __getitem__(self, name): ret = Headers.__getitem__(self, name) if ret is None: return '' else: return str(ret) ## Some functions made public exists_config_define = lambda dummy: True ## Some constants class metaCookie(type): def __new__(cls, clsname, bases, clsdict): _valid_attr = ( "version", "path", "domain", "secure", "comment", "expires", "max_age", # RFC 2965 "commentURL", "discard", "port", # Microsoft Extension "httponly" ) # _valid_attr + property values # (note __slots__ is a new Python feature, it # prevents any other attribute from being set) __slots__ = _valid_attr + ("name", "value", "_value", "_expires", "__data__") clsdict["_valid_attr"] = _valid_attr clsdict["__slots__"] = __slots__ def set_expires(self, value): if type(value) == type(""): # if it's a string, it should be # valid format as per Netscape spec try: t = time.strptime(value, "%a, %d-%b-%Y %H:%M:%S GMT") except ValueError: raise ValueError, "Invalid expires time: %s" % value t = time.mktime(t) else: # otherwise assume it's a number # representing time as from time.time() t = value value = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", time.gmtime(t)) self._expires = "%s" % value def get_expires(self): return self._expires clsdict["expires"] = property(fget=get_expires, fset=set_expires) return type.__new__(cls, clsname, bases, clsdict) class Cookie(object): """ This class implements the basic Cookie functionality. Note that unlike the Python Standard Library Cookie class, this class represents a single cookie (not a list of Morsels). """ __metaclass__ = metaCookie DOWNGRADE = 0 IGNORE = 1 EXCEPTION = 3 def parse(Class, str, **kw): """ Parse a Cookie or Set-Cookie header value, and return a dict of Cookies. Note: the string should NOT include the header name, only the value. """ dict = _parse_cookie(str, Class, **kw) return dict parse = classmethod(parse) def __init__(self, name, value, **kw): """ This constructor takes at least a name and value as the arguments, as well as optionally any of allowed cookie attributes as defined in the existing cookie standards. """ self.name, self.value = name, value for k in kw: setattr(self, k.lower(), kw[k]) # subclasses can use this for internal stuff self.__data__ = {} def __str__(self): """ Provides the string representation of the Cookie suitable for sending to the browser. Note that the actual header name will not be part of the string. This method makes no attempt to automatically double-quote strings that contain special characters, even though the RFC's dictate this. This is because doing so seems to confuse most browsers out there. """ result = ["%s=%s" % (self.name, self.value)] # pylint: disable=E1101 # The attribute _valid_attr is provided by the metaclass 'metaCookie'. for name in self._valid_attr: if hasattr(self, name): if name in ("secure", "discard", "httponly"): result.append(name) else: result.append("%s=%s" % (name, getattr(self, name))) # pylint: enable=E1101 return "; ".join(result) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, str(self)) # This is a simplified and in some places corrected # (at least I think it is) pattern from standard lib Cookie.py _cookiePattern = re.compile( r"(?x)" # Verbose pattern r"[,\ ]*" # space/comma (RFC2616 4.2) before attr-val is eaten r"(?P" # Start of group 'key' r"[^;\ =]+" # anything but ';', ' ' or '=' r")" # End of group 'key' r"\ *(=\ *)?" # a space, then may be "=", more space r"(?P" # Start of group 'val' r'"(?:[^\\"]|\\.)*"' # a doublequoted string r"|" # or r"[^;]*" # any word or empty string r")" # End of group 'val' r"\s*;?" # probably ending in a semi-colon ) def _parse_cookie(str, Class, names=None): # XXX problem is we should allow duplicate # strings result = {} matchIter = _cookiePattern.finditer(str) for match in matchIter: key, val = match.group("key"), match.group("val") # We just ditch the cookies names which start with a dollar sign since # those are in fact RFC2965 cookies attributes. See bug [#MODPYTHON-3]. if key[0] != '$' and names is None or key in names: result[key] = Class(key, val) return result +_RE_BAD_MSIE = re.compile("MSIE\s+(\d+\.\d+)") def add_cookie(req, cookie, value="", **kw): """ Sets a cookie in outgoing headers and adds a cache directive so that caches don't cache the cookie. """ # is this a cookie? if not isinstance(cookie, Cookie): # make a cookie cookie = Cookie(cookie, value, **kw) if not req.headers_out.has_key("Set-Cookie"): - req.headers_out.add("Cache-Control", 'no-cache="set-cookie"') + g = _RE_BAD_MSIE.search(req.headers_in.get('User-Agent', "MSIE 6.0")) + bad_msie = g and float(g.group(1)) < 9.0 + if not (bad_msie and req.is_https()): + req.headers_out.add("Cache-Control", 'no-cache="set-cookie"') req.headers_out.add("Set-Cookie", str(cookie)) def get_cookies(req, Class=Cookie, **kw): """ A shorthand for retrieveing and parsing cookies given a Cookie class. The class must be one of the classes from this module. """ if not req.headers_in.has_key("cookie"): return {} cookies = req.headers_in["cookie"] if type(cookies) == type([]): cookies = '; '.join(cookies) return Class.parse(cookies, **kw) def get_cookie(req, name, Class=Cookie, **kw): cookies = get_cookies(req, Class, names=[name], **kw) if cookies.has_key(name): return cookies[name] parse_qs = cgi.parse_qs parse_qsl = cgi.parse_qsl # Maximum line length for reading. (64KB) # Fixes memory error when upload large files such as 700+MB ISOs. readBlockSize = 65368 """ The classes below are a (almost) a drop-in replacement for the standard cgi.py FieldStorage class. They should have pretty much the same functionality. These classes differ in that unlike cgi.FieldStorage, they are not recursive. The class FieldStorage contains a list of instances of Field class. Field class is incapable of storing anything in it. These objects should be considerably faster than the ones in cgi.py because they do not expect CGI environment, and are optimized specifically for Apache and mod_python. """ class Field: def __init__(self, name, *args, **kwargs): self.name = name # Some third party packages such as Trac create # instances of the Field object and insert it # directly into the list of form fields. To # maintain backward compatibility check for # where more than just a field name is supplied # and invoke an additional initialisation step # to process the arguments. Ideally, third party # code should use the add_field() method of the # form, but if they need to maintain backward # compatibility with older versions of mod_python # they will not have a choice but to use old # way of doing things and thus we need this code # for the forseeable future to cope with that. if args or kwargs: self.__bc_init__(*args, **kwargs) def __bc_init__(self, file, ctype, type_options, disp, disp_options, headers = {}): self.file = file self.type = ctype self.type_options = type_options self.disposition = disp self.disposition_options = disp_options if disp_options.has_key("filename"): self.filename = disp_options["filename"] else: self.filename = None self.headers = headers def __repr__(self): """Return printable representation.""" return "Field(%s, %s)" % (`self.name`, `self.value`) def __getattr__(self, name): if name != 'value': raise AttributeError, name if self.file: self.file.seek(0) value = self.file.read() self.file.seek(0) else: value = None return value def __del__(self): self.file.close() class StringField(str): """ This class is basically a string with added attributes for compatibility with std lib cgi.py. Basically, this works the opposite of Field, as it stores its data in a string, but creates a file on demand. Field creates a value on demand and stores data in a file. """ filename = None headers = {} ctype = "text/plain" type_options = {} disposition = None disp_options = None # I wanted __init__(name, value) but that does not work (apparently, you # cannot subclass str with a constructor that takes >1 argument) def __init__(self, value): '''Create StringField instance. You'll have to set name yourself.''' str.__init__(self, value) self.value = value def __getattr__(self, name): if name != 'file': raise AttributeError, name self.file = cStringIO.StringIO(self.value) return self.file def __repr__(self): """Return printable representation (to pass unit tests).""" return "Field(%s, %s)" % (`self.name`, `self.value`) class FieldList(list): def __init__(self): self.__table = None list.__init__(self) def table(self): if self.__table is None: self.__table = {} for item in self: if item.name in self.__table: self.__table[item.name].append(item) else: self.__table[item.name] = [item] return self.__table def __delitem__(self, *args): self.__table = None return list.__delitem__(self, *args) def __delslice__(self, *args): self.__table = None return list.__delslice__(self, *args) def __iadd__(self, *args): self.__table = None return list.__iadd__(self, *args) def __imul__(self, *args): self.__table = None return list.__imul__(self, *args) def __setitem__(self, *args): self.__table = None return list.__setitem__(self, *args) def __setslice__(self, *args): self.__table = None return list.__setslice__(self, *args) def append(self, *args): self.__table = None return list.append(self, *args) def extend(self, *args): self.__table = None return list.extend(self, *args) def insert(self, *args): self.__table = None return list.insert(self, *args) def pop(self, *args): self.__table = None return list.pop(self, *args) def remove(self, *args): self.__table = None return list.remove(self, *args) class FieldStorage: def __init__(self, req, keep_blank_values=0, strict_parsing=0, file_callback=None, field_callback=None): # # Whenever readline is called ALWAYS use the max size EVEN when # not expecting a long line. - this helps protect against # malformed content from exhausting memory. # self.list = FieldList() self.wsgi_input_consumed = True # always process GET-style parameters if req.args: pairs = parse_qsl(req.args, keep_blank_values) for pair in pairs: self.add_field(pair[0], pair[1]) if req.method != "POST": return try: clen = int(req.headers_in["content-length"]) except (KeyError, ValueError): # absent content-length is not acceptable raise SERVER_RETURN, HTTP_LENGTH_REQUIRED self.clen = clen self.count = 0 if not req.headers_in.has_key("content-type"): ctype = "application/x-www-form-urlencoded" else: ctype = req.headers_in["content-type"] if ctype.startswith("application/x-www-form-urlencoded"): pairs = parse_qsl(req.read(clen), keep_blank_values) for pair in pairs: self.add_field(pair[0], pair[1]) return if not ctype.startswith("multipart/"): # we don't understand this content-type self.wsgi_input_consumed = False return # figure out boundary try: i = ctype.lower().rindex("boundary=") boundary = ctype[i+9:] if len(boundary) >= 2 and boundary[0] == boundary[-1] == '"': boundary = boundary[1:-1] boundary = re.compile("--" + re.escape(boundary) + "(--)?\r?\n") except ValueError: raise SERVER_RETURN, HTTP_BAD_REQUEST # read until boundary self.read_to_boundary(req, boundary, None) end_of_stream = False while not end_of_stream and not self.eof(): # jjj JIM BEGIN WHILE ## parse headers ctype, type_options = "text/plain", {} disp, disp_options = None, {} headers = table([]) line = req.readline(readBlockSize) self.count += len(line) if self.eof(): end_of_stream = True match = boundary.match(line) if (not line) or match: # we stop if we reached the end of the stream or a stop # boundary (which means '--' after the boundary) we # continue to the next part if we reached a simple # boundary in either case this would mean the entity is # malformed, but we're tolerating it anyway. end_of_stream = (not line) or (match.group(1) is not None) continue skip_this_part = False while line not in ('\r','\r\n'): nextline = req.readline(readBlockSize) self.count += len(nextline) if self.eof(): end_of_stream = True while nextline and nextline[0] in [ ' ', '\t']: line = line + nextline nextline = req.readline(readBlockSize) self.count += len(nextline) if self.eof(): end_of_stream = True # we read the headers until we reach an empty line # NOTE : a single \n would mean the entity is malformed, but # we're tolerating it anyway h, v = line.split(":", 1) headers.add(h, v) h = h.lower() if h == "content-disposition": disp, disp_options = parse_header(v) elif h == "content-type": ctype, type_options = parse_header(v) # # NOTE: FIX up binary rubbish sent as content type # from Microsoft IE 6.0 when sending a file which # does not have a suffix. # if ctype.find('/') == -1: ctype = 'application/octet-stream' line = nextline match = boundary.match(line) if (not line) or match: # we stop if we reached the end of the stream or a # stop boundary (which means '--' after the # boundary) we continue to the next part if we # reached a simple boundary in either case this # would mean the entity is malformed, but we're # tolerating it anyway. skip_this_part = True end_of_stream = (not line) or (match.group(1) is not None) break if skip_this_part: continue if disp_options.has_key("name"): name = disp_options["name"] else: name = None # create a file object # is this a file? if disp_options.has_key("filename"): if file_callback and callable(file_callback): file = file_callback(disp_options["filename"]) else: file = tempfile.TemporaryFile("w+b") else: if field_callback and callable(field_callback): file = field_callback() else: file = cStringIO.StringIO() # read it in self.read_to_boundary(req, boundary, file) if self.eof(): end_of_stream = True file.seek(0) # make a Field if disp_options.has_key("filename"): field = Field(name) field.filename = disp_options["filename"] else: field = StringField(file.read()) field.name = name field.file = file field.type = ctype field.type_options = type_options field.disposition = disp field.disposition_options = disp_options field.headers = headers self.list.append(field) def add_field(self, key, value): """Insert a field as key/value pair""" item = StringField(value) item.name = key self.list.append(item) def __setitem__(self, key, value): table = self.list.table() if table.has_key(key): items = table[key] for item in items: self.list.remove(item) item = StringField(value) item.name = key self.list.append(item) def read_to_boundary(self, req, boundary, file): previous_delimiter = None while not self.eof(): line = req.readline(readBlockSize) self.count += len(line) if not line: # end of stream if file is not None and previous_delimiter is not None: file.write(previous_delimiter) return True match = boundary.match(line) if match: # the line is the boundary, so we bail out # if the two last chars are '--' it is the end of the entity return match.group(1) is not None if line[-2:] == '\r\n': # the line ends with a \r\n, which COULD be part # of the next boundary. We write the previous line delimiter # then we write the line without \r\n and save it for the next # iteration if it was not part of the boundary if file is not None: if previous_delimiter is not None: file.write(previous_delimiter) file.write(line[:-2]) previous_delimiter = '\r\n' elif line[-1:] == '\r': # the line ends with \r, which is only possible if # readBlockSize bytes have been read. In that case the # \r COULD be part of the next boundary, so we save it # for the next iteration assert len(line) == readBlockSize if file is not None: if previous_delimiter is not None: file.write(previous_delimiter) file.write(line[:-1]) previous_delimiter = '\r' elif line == '\n' and previous_delimiter == '\r': # the line us a single \n and we were in the middle of a \r\n, # so we complete the delimiter previous_delimiter = '\r\n' else: if file is not None: if previous_delimiter is not None: file.write(previous_delimiter) file.write(line) previous_delimiter = None def eof(self): return self.clen <= self.count def __getitem__(self, key): """Dictionary style indexing.""" found = self.list.table()[key] if len(found) == 1: return found[0] else: return found def get(self, key, default): try: return self.__getitem__(key) except (TypeError, KeyError): return default def keys(self): """Dictionary style keys() method.""" return self.list.table().keys() def __iter__(self): return iter(self.keys()) def __repr__(self): return repr(self.list.table()) def has_key(self, key): """Dictionary style has_key() method.""" return (key in self.list.table()) __contains__ = has_key def __len__(self): """Dictionary style len(x) support.""" return len(self.list.table()) def getfirst(self, key, default=None): """ return the first value received """ try: return self.list.table()[key][0] except KeyError: return default def getlist(self, key): """ return a list of received values """ try: return self.list.table()[key] except KeyError: return [] def items(self): """Dictionary-style items(), except that items are returned in the same order as they were supplied in the form.""" return [(item.name, item) for item in self.list] def __delitem__(self, key): table = self.list.table() values = table[key] for value in values: self.list.remove(value) def clear(self): self.list = FieldList() def parse_header(line): """Parse a Content-type like header. Return the main content-type and a dictionary of options. """ plist = map(lambda a: a.strip(), line.split(';')) key = plist[0].lower() del plist[0] pdict = {} for p in plist: i = p.find('=') if i >= 0: name = p[:i].strip().lower() value = p[i+1:].strip() if len(value) >= 2 and value[0] == value[-1] == '"': value = value[1:-1] pdict[name] = value return key, pdict def apply_fs_data(object, fs, **args): """ Apply FieldStorage data to an object - the object must be callable. Examine the args, and match then with fs data, then call the object, return the result. """ # we need to weed out unexpected keyword arguments # and for that we need to get a list of them. There # are a few options for callable objects here: fc = None expected = [] if hasattr(object, "func_code"): # function fc = object.func_code expected = fc.co_varnames[0:fc.co_argcount] elif hasattr(object, 'im_func'): # method fc = object.im_func.func_code expected = fc.co_varnames[1:fc.co_argcount] elif type(object) in (TypeType,ClassType): # class fc = object.__init__.im_func.func_code expected = fc.co_varnames[1:fc.co_argcount] elif type(object) is BuiltinFunctionType: # builtin fc = None expected = [] elif hasattr(object, '__call__'): # callable object if type(object.__call__) is MethodType: fc = object.__call__.im_func.func_code expected = fc.co_varnames[1:fc.co_argcount] else: # abuse of objects to create hierarchy return apply_fs_data(object.__call__, fs, **args) # add form data to args for field in fs.list: if field.filename: val = field else: val = field.value args.setdefault(field.name, []).append(val) # replace lists with single values for arg in args: if ((type(args[arg]) is ListType) and (len(args[arg]) == 1)): args[arg] = args[arg][0] # remove unexpected args unless co_flags & 0x08, # meaning function accepts **kw syntax if fc is None: args = {} elif not (fc.co_flags & 0x08): for name in args.keys(): if name not in expected: del args[name] return object(**args) RE_CDISPOSITION_FILENAME = re.compile(r'filename=(?P[\w\.]*)') def handle_file_post(req, allowed_mimetypes=None): """ Handle the POST of a file. @return: the a tuple with th full path to the file saved on disk, and it's mimetype as provided by the request. @rtype: (string, string) """ from invenio.bibdocfile import decompose_file, md5 ## We retrieve the length clen = req.headers_in["Content-Length"] if clen is None: raise InvenioWebInterfaceWSGIContentLenghtError("Content-Length header is missing") try: clen = int(clen) assert (clen > 1) except (ValueError, AssertionError): raise InvenioWebInterfaceWSGIContentLenghtError("Content-Length header should contain a positive integer") ## Let's take the content type ctype = req.headers_in["Content-Type"] if allowed_mimetypes and ctype not in allowed_mimetypes: raise InvenioWebInterfaceWSGIContentTypeError("Content-Type not in allowed list of content types: %s" % allowed_mimetypes) ## Let's optionally accept a suggested filename suffix = prefix = '' g = RE_CDISPOSITION_FILENAME.search(req.headers_in.get("Content-Disposition", "")) if g: dummy, prefix, suffix = decompose_file(g.group("filename")) ## Let's optionally accept an MD5 hash (and use it later for comparison) cmd5 = req.headers_in["Content-MD5"] if cmd5: the_md5 = md5() ## Ok. We can initialize the file fd, path = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=CFG_TMPDIR) the_file = os.fdopen(fd, 'w') ## Let's read the file while True: chunk = req.read(max(10240, clen)) if len(chunk) < clen: ## We expected to read at least clen (which is different than 0) ## but chunk was shorter! Gosh! Error! Panic! the_file.close() os.close(fd) os.remove(path) raise InvenioWebInterfaceWSGIContentLenghtError("File shorter than what specified in Content-Length") if cmd5: ## MD5 was in the header let's compute it the_md5.update(chunk) ## And let's definitively write the content to disk :-) the_file.write(chunk) clen -= len(chunk) if clen == 0: ## That's it. Everything was read. break if cmd5 and the_md5.hexdigest().lower() != cmd5.strip().lower(): ## Let's check the MD5 the_file.close() os.close(fd) os.remove(path) raise InvenioWebInterfaceWSGIContentMD5Error("MD5 checksum does not match") ## Let's clean everything up the_file.close() return (path, ctype)