diff --git a/modules/bibrank/lib/bibrank_regression_tests.py b/modules/bibrank/lib/bibrank_regression_tests.py
index 3818569f6..506c17c72 100644
--- a/modules/bibrank/lib/bibrank_regression_tests.py
+++ b/modules/bibrank/lib/bibrank_regression_tests.py
@@ -1,168 +1,169 @@
 # -*- coding: utf-8 -*-
 ##
 ## $Id$
 ##
 ## This file is part of CDS Invenio.
-## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
+## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2012 CERN.
 ##
 ## CDS Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## CDS Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 """BibRank Regression Test Suite."""
 
 __revision__ = "$Id$"
 
 import unittest
 
 from invenio.config import CFG_SITE_URL
 from invenio.dbquery import run_sql
 from invenio.testutils import make_test_suite, run_test_suite, \
                               test_web_page_content, merge_error_messages
 
 class BibRankWebPagesAvailabilityTest(unittest.TestCase):
     """Check BibRank web pages whether they are up or not."""
 
     def test_rank_by_word_similarity_pages_availability(self):
         """bibrank - availability of ranking search results pages"""
 
         baseurl = CFG_SITE_URL + '/search'
 
         _exports = ['?p=ellis&r=wrd']
 
         error_messages = []
         for url in [baseurl + page for page in _exports]:
             error_messages.extend(test_web_page_content(url))
         if error_messages:
             self.fail(merge_error_messages(error_messages))
         return
 
     def test_similar_records_pages_availability(self):
         """bibrank - availability of similar records results pages"""
 
         baseurl = CFG_SITE_URL + '/search'
 
         _exports = ['?p=recid%3A18&rm=wrd']
 
         error_messages = []
         for url in [baseurl + page for page in _exports]:
             error_messages.extend(test_web_page_content(url))
         if error_messages:
             self.fail(merge_error_messages(error_messages))
         return
 
 class BibRankIntlMethodNames(unittest.TestCase):
     """Check BibRank I18N ranking method names."""
 
     def test_i18n_ranking_method_names(self):
         """bibrank - I18N ranking method names"""
         self.assertEqual([],
                          test_web_page_content(CFG_SITE_URL + '/collection/Articles%20%26%20Preprints?as=1',
                                                expected_text="times cited"))
         self.assertEqual([],
                          test_web_page_content(CFG_SITE_URL + '/collection/Articles%20%26%20Preprints?as=1',
                                                expected_text="journal impact factor"))
 
 class BibRankWordSimilarityRankingTest(unittest.TestCase):
     """Check BibRank word similarity ranking tools."""
 
     def test_search_results_ranked_by_similarity(self):
         """bibrank - search results ranked by word similarity"""
         self.assertEqual([],
                          test_web_page_content(CFG_SITE_URL + '/search?p=ellis&rm=wrd&of=id',
                                                expected_text="[8, 10, 11, 12, 47, 17, 13, 16, 18, 9, 14, 15]"))
 
     def test_similar_records_link(self):
         """bibrank - 'Similar records' link"""
         self.assertEqual([],
                          test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A77&rm=wrd&of=id',
                                                expected_text="[84, 95, 85, 77]"))
 
 class BibRankCitationRankingTest(unittest.TestCase):
     """Check BibRank citation ranking tools."""
 
     def test_search_results_ranked_by_citations(self):
         """bibrank - search results ranked by number of citations"""
         self.assertEqual([],
                          test_web_page_content(CFG_SITE_URL + '/search?cc=Articles+%26+Preprints&p=Klebanov&rm=citation&of=id',
                                                expected_text="[85, 77, 84]"))
 
     def test_search_results_ranked_by_citations_verbose(self):
         """bibrank - search results ranked by number of citations, verbose output"""
         self.assertEqual([],
                          test_web_page_content(CFG_SITE_URL + '/search?cc=Articles+%26+Preprints&p=Klebanov&rm=citation&verbose=2',
+                                               username="admin",
                                                expected_text="find_citations retlist [[85, 0], [77, 2], [84, 3]]"))
 
 class BibRankExtCitesTest(unittest.TestCase):
     """Check BibRank citation ranking tools with respect to the external cites."""
 
     def _detect_extcite_info(self, extcitepubinfo):
         """
         Helper function to return list of recIDs citing given
         extcitepubinfo.  Could be move to the business logic, if
         interesting for other callers.
         """
         res = run_sql("""SELECT id_bibrec FROM rnkCITATIONDATAEXT
                           WHERE extcitepubinfo=%s""",
                       (extcitepubinfo,))
         return [int(x[0]) for x in res]
 
     def test_extcite_via_report_number(self):
         """bibrank - external cites, via report number"""
         # The external paper hep-th/0112258 is cited by 9 demo
         # records: you can find out via 999:"hep-th/0112258", and we
         # could eventually automatize this query, but it is maybe
         # safer to leave it manual in case queries fail for some
         # reason.
         test_case_repno = "hep-th/0112258"
         test_case_repno_cited_by = [77, 78, 81, 82, 85, 86, 88, 90, 91]
         self.assertEqual(self._detect_extcite_info(test_case_repno),
                          test_case_repno_cited_by)
 
     def test_extcite_via_publication_reference(self):
         """bibrank - external cites, via publication reference"""
         # The external paper "J. Math. Phys. 4 (1963) 915" does not
         # have any report number, and is cited by 1 demo record.
         test_case_pubinfo = "J. Math. Phys. 4 (1963) 915"
         test_case_pubinfo_cited_by = [90]
         self.assertEqual(self._detect_extcite_info(test_case_pubinfo),
                          test_case_pubinfo_cited_by)
 
     def test_intcite_via_report_number(self):
         """bibrank - external cites, no internal papers via report number"""
         # The internal paper hep-th/9809057 is cited by 2 demo
         # records, but it also exists as a demo record, so it should
         # not be found in the extcite table.
         test_case_repno = "hep-th/9809057"
         test_case_repno_cited_by = []
         self.assertEqual(self._detect_extcite_info(test_case_repno),
                          test_case_repno_cited_by)
 
     def test_intcite_via_publication_reference(self):
         """bibrank - external cites, no internal papers via publication reference"""
         # The internal paper #18 has only pubinfo, no repno, and is
         # cited by internal paper #96 via its pubinfo, so should not
         # be present in the extcite list:
         test_case_repno = "Phys. Lett., B 151 (1985) 357"
         test_case_repno_cited_by = []
         self.assertEqual(self._detect_extcite_info(test_case_repno),
                          test_case_repno_cited_by)
 
 TEST_SUITE = make_test_suite(BibRankWebPagesAvailabilityTest,
                              BibRankIntlMethodNames,
                              BibRankWordSimilarityRankingTest,
                              BibRankCitationRankingTest,
                              BibRankExtCitesTest)
 
 if __name__ == "__main__":
     run_test_suite(TEST_SUITE, warn_user=True)
diff --git a/modules/websearch/lib/websearch_external_collections.py b/modules/websearch/lib/websearch_external_collections.py
index 7735a726a..e7a0f1158 100644
--- a/modules/websearch/lib/websearch_external_collections.py
+++ b/modules/websearch/lib/websearch_external_collections.py
@@ -1,304 +1,304 @@
 # -*- coding: utf-8 -*-
 ## $Id$
 
 ## This file is part of CDS Invenio.
-## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
+## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2012 CERN.
 ##
 ## CDS Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## CDS Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 """External collection 'core' file.
     Perform search, database access."""
 
 __revision__ = "$Id$"
 
 import cgi
 from copy import copy
 from sets import Set
 
 from invenio.config import CFG_SITE_LANG
 from invenio.dbquery import run_sql, OperationalError
 from invenio.messages import gettext_set_language
 
 from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_TIMEOUT
 from invenio.websearch_external_collections_searcher import external_collections_dictionary
 from invenio.websearch_external_collections_getter import HTTPAsyncPageGetter, async_download
 from invenio.websearch_external_collections_templates import print_results, print_timeout
 from invenio.websearch_external_collections_utils import get_collection_id, get_collection_descendants, \
     warning, get_verbose_print
 
 import invenio.template
 template = invenio.template.load('websearch_external_collections')
 
 #dico_collection_external_searches = {}
 #dico_collection_seealso = {}
 
 def print_external_results_overview(req, current_collection, pattern_list, field,
         external_collection, verbosity_level=0, lang=CFG_SITE_LANG):
     """Print the external collection overview box. Return the selected external collections and parsed query"""
     from invenio.search_engine import create_basic_search_units
     assert req
     vprint = get_verbose_print(req, 'External collection (print_external_results_overview): ', verbosity_level)
 
     pattern = bind_patterns(pattern_list)
-    vprint(3, 'pattern = ' + pattern)
+    vprint(3, 'pattern = %s' % cgi.escape(pattern))
 
     if not pattern:
         return (None, None, None, None)
 
     basic_search_units = create_basic_search_units(None, pattern, field)
-    vprint(3, 'basic_search_units = ' + str(basic_search_units))
+    vprint(3, 'basic_search_units = %s' % cgi.escape(repr(basic_search_units)))
 
     (search_engines, seealso_engines) = select_external_engines(current_collection, external_collection)
     vprint(3, 'search_engines = ' + str(search_engines))
     vprint(3, 'seealso_engines = ' + str(seealso_engines))
 
     search_engines_list = external_collection_sort_engine_by_name(search_engines)
     vprint(3, 'search_engines_list (sorted) : ' + str(search_engines_list))
     html = template.external_collection_overview(lang, search_engines_list)
     req.write(html)
 
     return (search_engines, seealso_engines, pattern, basic_search_units)
 
 def perform_external_collection_search(req, current_collection, pattern_list, field,
         external_collection, verbosity_level=0, lang=CFG_SITE_LANG, selected_external_collections_infos=None):
     """Search external collection and print the seealso box."""
 
     vprint = get_verbose_print(req, 'External collection: ', verbosity_level)
 
     if selected_external_collections_infos:
         (search_engines, seealso_engines, pattern, basic_search_units) = selected_external_collections_infos
     else:
         (search_engines, seealso_engines, pattern, basic_search_units) = print_external_results_overview(req,
             current_collection, pattern_list, field, external_collection, verbosity_level, lang)
 
     if not pattern:
         return
 
     do_external_search(req, lang, vprint, basic_search_units, search_engines)
     create_seealso_box(req, lang, vprint, basic_search_units, seealso_engines, pattern)
     vprint(3, 'end')
 
 def bind_patterns(pattern_list):
     """Combine a list of patterns in an unique pattern.
     pattern_list[0] should be the standart search pattern,
     pattern_list[1:] are advanced search patterns."""
     if pattern_list[0]:
         return pattern_list[0]
 
     pattern = ""
     for pattern_part in pattern_list[1:]:
         if pattern_part:
             pattern += " " + pattern_part
 
     return pattern.strip()
 
 # See also box
 def create_seealso_box(req, lang, vprint, basic_search_units=None, seealso_engines=None, query=''):
     "Create the box that proposes links to other useful search engines like Google."
 
     vprint(3, 'Create seealso box')
     seealso_engines_list = external_collection_sort_engine_by_name(seealso_engines)
     vprint(3, 'seealso_engines_list = ' + str(seealso_engines_list))
     links = build_seealso_links(basic_search_units, seealso_engines_list, lang, query)
     html = template.external_collection_seealso_box(lang, links)
     req.write(html)
 
 def build_seealso_links(basic_search_units, seealso_engines, lang, query):
     """Build the links for the see also box."""
     _ = gettext_set_language(lang)
 
     links = []
     for engine in seealso_engines:
         url = engine.build_search_url(basic_search_units, lang)
         if url:
             links.append('<a class="google" href="%(url)s">%(query)s %(text_in)s %(name)s</a>' % \
                 {'url': cgi.escape(url),
                  'query': cgi.escape(query),
                  'text_in': _('in'),
                  'name': _(engine.name)})
     return links
 
 # Selection
 def select_external_engines(collection_name, selected_external_searches):
     """Build a tuple of two sets. The first one is the list of engine to use for an external search and the
     second one is for the seealso box."""
 
     collection_id = get_collection_id(collection_name)
     if not collection_id:
         return (None, None)
 
     if not type(selected_external_searches) is list:
         selected_external_searches = [selected_external_searches]
 
     seealso_engines = Set()
     search_engines = Set()
 
     if dico_collection_seealso.has_key(collection_id):
         seealso_engines = copy(dico_collection_seealso[collection_id])
 
     if dico_collection_external_searches.has_key(collection_id):
         seealso_engines = seealso_engines.union(dico_collection_external_searches[collection_id])
 
     for ext_search_name in selected_external_searches:
         if external_collections_dictionary.has_key(ext_search_name):
             engine = external_collections_dictionary[ext_search_name]
             if engine.parser:
                 search_engines.add(engine)
         else:
             warning('select_external_engines: %(ext_search_name)s unknown.' % locals())
 
     seealso_engines = seealso_engines.difference(search_engines)
 
     return (search_engines, seealso_engines)
 
 # Search
 def do_external_search(req, lang, vprint, basic_search_units, search_engines):
     """Make the external search."""
     _ = gettext_set_language(lang)
     vprint(3, 'beginning external search')
     engines_list = []
 
     for engine in search_engines:
         url = engine.build_search_url(basic_search_units, lang)
         if url:
             engines_list.append([url, engine])
 
     pagegetters_list = [HTTPAsyncPageGetter(engine[0]) for engine in engines_list]
 
     def finished(pagegetter, data, current_time):
         """Function called, each time the download of a web page finish.
         Will parse and print the results of this page."""
         print_results(req, lang, pagegetter, data, current_time)
 
     finished_list = async_download(pagegetters_list, finished, engines_list, CFG_EXTERNAL_COLLECTION_TIMEOUT)
 
     for (finished, engine) in zip(finished_list, engines_list):
         if not finished:
             url = engine[0]
             name = engine[1].name
             print_timeout(req, lang, engine[1], name, url)
 
 # Database management
 def external_collection_load_states():
     global external_collections_state, dico_collection_external_searches, dico_collection_seealso
 
     external_collections_state = {}
     dico_collection_external_searches = {}
     dico_collection_seealso = {}
 
     query = "SELECT collection_externalcollection.id_collection, collection_externalcollection.type, externalcollection.name FROM collection_externalcollection, externalcollection WHERE collection_externalcollection.id_externalcollection = externalcollection.id;"
     try:
         results = run_sql(query)
     except OperationalError:
         results = None
     if results:
         for result in results:
             collection_id = int(result[0])
             search_type = int(result[1])
             engine_name = result[2]
 
             if not external_collections_dictionary.has_key(engine_name):
                 warning("No search engine : " + engine_name)
                 continue
 
             engine = external_collections_dictionary[engine_name]
 
             if not external_collections_state.has_key(collection_id):
                 external_collections_state[collection_id] = {}
             col_states = external_collections_state[collection_id]
 
             col_states[engine] = search_type
 
             dictionary = None
 
             if search_type == 1:
                 dictionary = dico_collection_seealso
 
             if search_type in [2, 3]:
                 dictionary = dico_collection_external_searches
 
             if dictionary is None:
                 continue
 
             if not dictionary.has_key(collection_id):
                 dictionary[collection_id] = Set()
             engine_set = dictionary[collection_id]
             engine_set.add(engine)
 
 def external_collection_init():
     """Load db infos if it's not already done."""
     if not external_collection_init.done:
         external_collection_load_states()
         external_collection_init.done = True
 external_collection_init.done = False
 
 def external_collection_get_state(external_collection, collection_id):
     external_collection_load_states()
     if not external_collections_state.has_key(collection_id):
         return 0
     col_states = external_collections_state[collection_id]
     if not col_states.has_key(external_collection):
         return 0
     return col_states[external_collection]
 
 def external_collection_get_update_state_list(external_collection, collection_id, state, recurse=False):
     changes = []
 
     if external_collection_get_state(external_collection, collection_id) != state:
         changes = ['(%(collection_id)d, %(id_externalcollection)d, %(state)d)' %
             {'collection_id': collection_id, 'id_externalcollection': external_collection_getid(external_collection), 'state': state}]
 
     if not recurse:
         return changes
 
     for descendant_id in get_collection_descendants(collection_id):
         changes += external_collection_get_update_state_list(external_collection, descendant_id, state)
 
     return changes
 
 def external_collection_apply_changes(changes_list):
     if not changes_list:
         return
 
     sql_values = ", ".join(changes_list)
     sql = 'INSERT INTO collection_externalcollection (id_collection, id_externalcollection, type) VALUES ' + sql_values + 'ON DUPLICATE KEY UPDATE type=VALUES(type);'
     run_sql(sql)
 
 # Misc functions
 def external_collection_sort_engine_by_name(engines_set):
     """Return a list of sorted (by name) search engines."""
     engines_list = [engine for engine in engines_set]
     engines_list.sort(lambda x, y: cmp(x.name, y.name))
     return engines_list
 
 # External search ID
 def external_collection_getid(external_collection):
     """Return the id of an external_collection. Will create a new entry in DB if needed."""
 
     if external_collection.__dict__.has_key('id'):
         return external_collection.id
 
     query = 'SELECT id FROM externalcollection WHERE name="%(name)s";' % {'name': external_collection.name}
     results = run_sql(query)
     if not results:
         query = 'INSERT INTO externalcollection (name) VALUES ("%(name)s");' % {'name': external_collection.name}
         run_sql(query)
         return external_collection_getid(external_collection)
 
     external_collection.id = results[0][0]
     return external_collection.id
 
 external_collection_init()
 
diff --git a/modules/websearch/lib/websearch_webinterface.py b/modules/websearch/lib/websearch_webinterface.py
index 95a9dd188..7c6747aa4 100644
--- a/modules/websearch/lib/websearch_webinterface.py
+++ b/modules/websearch/lib/websearch_webinterface.py
@@ -1,1002 +1,1021 @@
 ## $Id$
 ##
 ## This file is part of CDS Invenio.
-## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
+## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2012 CERN.
 ##
 ## CDS Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## CDS Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 """WebSearch URL handler."""
 
 __revision__ = "$Id$"
 
 import cgi
 import os
 import datetime
 from urllib import quote
 from mod_python import apache
 
 #maximum number of collaborating authors etc shown in GUI
 MAX_COLLAB_LIST = 10
 MAX_KEYWORD_LIST = 10
 MAX_VENUE_LIST = 10
 #tag constants
 AUTHOR_TAG = "100__a"
 COAUTHOR_TAG = "700_a"
 AUTHOR_INST_TAG = "100__u"
 VENUE_TAG = "909C4p"
 KEYWORD_TAG = "6531_a"
 
 try:
     Set = set
 except NameError:
     from sets import Set
 
 from invenio.config import \
      CFG_SITE_URL, \
      CFG_SITE_NAME, \
      CFG_CACHEDIR, \
      CFG_SITE_LANG, \
      CFG_SITE_ADMIN_EMAIL, \
      CFG_SITE_SECURE_URL, \
      CFG_WEBSEARCH_INSTANT_BROWSE_RSS, \
      CFG_WEBSEARCH_RSS_TTL, \
      CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS, \
      CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, \
      CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES, \
      CFG_WEBDIR
 from invenio.dbquery import Error
 from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
 from invenio.urlutils import redirect_to_url, make_canonical_urlargd, drop_default_urlargd, create_html_link
 from invenio.webuser import getUid, page_not_authorized, get_user_preferences, \
-    collect_user_info, http_check_credentials, logoutUser
+    collect_user_info, http_check_credentials, logoutUser, isUserSuperAdmin
 from invenio import search_engine
 from invenio.websubmit_webinterface import WebInterfaceFilesPages
 from invenio.webcomment_webinterface import WebInterfaceCommentsPages
 from invenio.webpage import page, create_error_box
 from invenio.messages import gettext_set_language
 from invenio.search_engine import get_colID, get_coll_i18nname, \
     check_user_can_view_record, collection_restricted_p, restricted_collection_cache, \
     get_fieldvalues
 from invenio.access_control_engine import acc_authorize_action
 from invenio.access_control_config import VIEWRESTRCOLL
 from invenio.access_control_mailcookie import mail_cookie_create_authorize_action
 from invenio.bibformat import format_records
 from invenio.bibformat_engine import get_output_formats
 from invenio.websearch_webcoll import mymkdir, get_collection
 from invenio.intbitset import intbitset
 from invenio.bibupload import find_record_from_sysno
 from invenio.bibrank_citation_searcher import get_author_cited_by, get_cited_by_list
 from invenio.bibrank_downloads_indexer import get_download_weight_total
 from invenio.search_engine_summarizer import summarize_records
 
 import invenio.template
 websearch_templates = invenio.template.load('websearch')
 
 search_results_default_urlargd = websearch_templates.search_results_default_urlargd
 search_interface_default_urlargd = websearch_templates.search_interface_default_urlargd
 try:
     output_formats = [output_format['attrs']['code'].lower() for output_format in \
                       get_output_formats(with_attributes=True).values()]
 except KeyError:
     output_formats = ['xd', 'xm', 'hd', 'hb', 'hs', 'hx']
 output_formats.extend(['hm', 't', 'h'])
 
 def wash_search_urlargd(form):
     """
     Create canonical search arguments from those passed via web form.
     """
 
     argd = wash_urlargd(form, search_results_default_urlargd)
 
     # Sometimes, users pass ot=245,700 instead of
     # ot=245&ot=700. Normalize that.
     ots = []
     for ot in argd['ot']:
         ots += ot.split(',')
     argd['ot'] = ots
 
     # We can either get the mode of function as
     # action=<browse|search>, or by setting action_browse or
     # action_search.
     if argd['action_browse']:
         argd['action'] = 'browse'
     elif argd['action_search']:
         argd['action'] = 'search'
     else:
         if argd['action'] not in ('browse', 'search'):
             argd['action'] = 'search'
 
     del argd['action_browse']
     del argd['action_search']
 
     return argd
 
 
 class WebInterfaceAuthorPages(WebInterfaceDirectory):
     """ Handle /author/Doe%2C+John etc set of pages."""
 
     _exports = ['author']
 
     def __init__(self, authorname=''):
         """Constructor."""
         self.authorname = authorname
 
     def _lookup(self, component, path):
         """This handler parses dynamic URLs (/author/John+Doe)."""
         return WebInterfaceAuthorPages(component), path
 
 
     def __call__(self, req, form):
         """Serve the page in the given language."""
         argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         req.argd = argd #needed since perform_req_search
 
         # start page
         req.content_type = "text/html"
         req.send_http_header()
         uid = getUid(req)
 
         search_engine.page_start(req, "hb", "", "", ln, uid)
 
         #wants to check it in case of no results
         self.authorname = self.authorname.replace("+"," ")
 
         if not self.authorname:
             return websearch_templates.tmpl_author_information(req, {}, self.authorname,
                                                                0, {},
                                                                {}, {}, {}, {}, ln)
 
         citelist = get_author_cited_by(self.authorname)
         #search the publications by this author
         pubs = search_engine.perform_request_search(req=req, p=self.authorname, f="author")
         #get most frequent first authors of these pubs
         authors = search_engine.get_most_popular_values_for_code(pubs, AUTHOR_TAG)
         #and affiliates
         collabs = search_engine.get_most_popular_values_for_code(pubs, COAUTHOR_TAG)
         #and publication venues
         venuedict =  search_engine.get_values_for_code_dict(pubs, VENUE_TAG)
         #and keywords
         kwdict = search_engine.get_values_for_code_dict(pubs, KEYWORD_TAG)
 
         #construct a simple list of tuples that contains keywords that appear more than once
         #moreover, limit the length of the list to MAX_KEYWORD_LIST
         kwtuples = []
         for k in kwdict.keys():
             if kwdict[k] > 1:
                 mytuple = (kwdict[k], k)
                 kwtuples.append(mytuple)
         #sort ..
         kwtuples.sort()
         kwtuples.reverse()
         kwtuples = kwtuples[0:MAX_KEYWORD_LIST]
 
         #same for venues
         vtuples = []
 
         for k in venuedict.keys():
             if venuedict[k] > 1:
                 mytuple = (venuedict[k], k)
                 vtuples.append(mytuple)
         #sort ..
         vtuples.sort()
         vtuples.reverse()
         vtuples = vtuples[0:MAX_VENUE_LIST]
 
 
         authors.extend(collabs) #join
         #remove the author in question from authors: they are associates
         if (authors.count(self.authorname) > 0):
             authors.remove(self.authorname)
 
         authors = authors[0:MAX_COLLAB_LIST] #cut extra
 
         #a dict. keys: affiliations, values: lists of publications
         author_aff_pubs = self.get_institute_pub_dict(pubs)
         authoraffs = author_aff_pubs.keys()
 
         #find out how many times these records have been downloaded
         recsloads = {}
         recsloads = get_download_weight_total(recsloads, pubs)
         #sum up
         totaldownloads = 0
         for k in recsloads.keys():
             totaldownloads = totaldownloads + recsloads[k]
 
         #get cited by..
         citedbylist = get_cited_by_list(pubs)
         #finally all stuff there, call the template
         websearch_templates.tmpl_author_information(req, pubs, self.authorname,
                                                     totaldownloads, author_aff_pubs,
                                                     citedbylist, kwtuples, authors, vtuples, ln)
 
         #cited-by summary
         out = summarize_records(intbitset(pubs), 'hcs', ln, self.authorname, 'author', req)
         req.write(out)
 
         simauthbox = search_engine.create_similarly_named_authors_link_box(self.authorname)
         req.write(simauthbox)
 
         return search_engine.page_end(req, 'hb', ln)
 
     def get_institute_pub_dict(self, recids):
         #return a dictionary consisting of institute -> list of publications
         affus = [] #list of insts from the record
         author_aff_pubs = {} #the disct to be build
         for recid in recids:
             #iterate all so that we get first author's intitute
             #if this the first author OR
             #"his" institute if he is an affliate author
             mainauthors = get_fieldvalues(recid, AUTHOR_TAG)
             mainauthor = " "
             if mainauthors:
                 mainauthor = mainauthors[0]
             if (mainauthor == self.authorname):
                 affus = get_fieldvalues(recid, AUTHOR_INST_TAG)
             #if this is empty, add a dummy " " value
             if (affus == []):
                 affus = [" "]
             for a in affus:
                 #add in author_aff_pubs
                 if (author_aff_pubs.has_key(a)):
                     tmp = author_aff_pubs[a]
                     tmp.append(recid)
                     author_aff_pubs[a] = tmp
                 else:
                     author_aff_pubs[a] = [recid]
         return author_aff_pubs
 
     index = __call__
 
 
 class WebInterfaceRecordPages(WebInterfaceDirectory):
     """ Handling of a /record/<recid> URL fragment """
 
     _exports = ['', 'files', 'reviews', 'comments', 'usage',
                 'references', 'export', 'citations']
 
     #_exports.extend(output_formats)
 
     def __init__(self, recid, tab, format=None):
         self.recid = recid
         self.tab = tab
         self.format = format
 
         self.export = self
         self.files = WebInterfaceFilesPages(self.recid)
         self.reviews = WebInterfaceCommentsPages(self.recid, reviews=1)
         self.comments = WebInterfaceCommentsPages(self.recid)
         self.usage = self
         self.references = self
         self.citations = self
         self.export = WebInterfaceRecordExport(self.recid, self.format)
 
         return
 
     def __call__(self, req, form):
         argd = wash_search_urlargd(form)
         argd['recid'] = self.recid
         argd['tab'] = self.tab
 
         if self.format is not None:
             argd['of'] = self.format
         req.argd = argd
         uid = getUid(req)
         if uid == -1:
             return page_not_authorized(req, "../",
                 text="You are not authorized to view this record.",
                                        navmenuid='search')
         elif uid > 0:
             pref = get_user_preferences(uid)
             try:
                 if not form.has_key('rg'):
                     # fetch user rg preference only if not overridden via URL
                     argd['rg'] = int(pref['websearch_group_records'])
             except (KeyError, ValueError):
                 pass
 
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
 
         if auth_code and user_info['email'] == 'guest' and not user_info['apache_user']:
             cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : search_engine.guess_primary_collection_of_a_record(self.recid)})
             target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
                     make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_URL + req.unparsed_uri}, {})
             return redirect_to_url(req, target)
         elif auth_code:
             return page_not_authorized(req, "../", \
                 text = auth_msg,\
                 navmenuid='search')
 
+        # only superadmins can use verbose parameter for obtaining debug information
+        if not isUserSuperAdmin(user_info):
+            argd['verbose'] = 0
+
         # mod_python does not like to return [] in case when of=id:
         out = search_engine.perform_request_search(req, **argd)
         if out == []:
             return str(out)
         else:
             return out
 
     # Return the same page wether we ask for /record/123 or /record/123/
     index = __call__
 
 class WebInterfaceRecordRestrictedPages(WebInterfaceDirectory):
     """ Handling of a /record-restricted/<recid> URL fragment """
 
     _exports = ['', 'files', 'reviews', 'comments', 'usage',
                 'references', 'export', 'citations']
 
     #_exports.extend(output_formats)
 
     def __init__(self, recid, tab, format=None):
         self.recid = recid
         self.tab = tab
         self.format = format
 
         self.files = WebInterfaceFilesPages(self.recid)
         self.reviews = WebInterfaceCommentsPages(self.recid, reviews=1)
         self.comments = WebInterfaceCommentsPages(self.recid)
         self.usage = self
         self.references = self
         self.citations = self
         self.export = WebInterfaceRecordExport(self.recid, self.format)
 
         return
 
     def __call__(self, req, form):
         argd = wash_search_urlargd(form)
         argd['recid'] = self.recid
         if self.format is not None:
             argd['of'] = self.format
 
         req.argd = argd
 
         uid = getUid(req)
         user_info = collect_user_info(req)
         if uid == -1:
             return page_not_authorized(req, "../",
                 text="You are not authorized to view this record.",
                                        navmenuid='search')
         elif uid > 0:
             pref = get_user_preferences(uid)
             try:
                 if not form.has_key('rg'):
                     # fetch user rg preference only if not overridden via URL
                     argd['rg'] = int(pref['websearch_group_records'])
             except (KeyError, ValueError):
                 pass
 
         record_primary_collection = search_engine.guess_primary_collection_of_a_record(self.recid)
 
         if collection_restricted_p(record_primary_collection):
             (auth_code, dummy) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=record_primary_collection)
             if auth_code:
                 return page_not_authorized(req, "../",
                     text="You are not authorized to view this record.",
                     navmenuid='search')
 
+        # only superadmins can use verbose parameter for obtaining debug information
+        if not isUserSuperAdmin(user_info):
+            argd['verbose'] = 0
+
         # Keep all the arguments, they might be reused in the
         # record page itself to derivate other queries
         req.argd = argd
 
         # mod_python does not like to return [] in case when of=id:
         out = search_engine.perform_request_search(req, **argd)
         if out == []:
             return str(out)
         else:
             return out
 
     # Return the same page wether we ask for /record/123 or /record/123/
     index = __call__
 
 class WebInterfaceSearchResultsPages(WebInterfaceDirectory):
     """ Handling of the /search URL and its sub-pages. """
 
     _exports = ['', 'authenticate', 'cache', 'log']
 
     def __call__(self, req, form):
         """ Perform a search. """
         argd = wash_search_urlargd(form)
 
         _ = gettext_set_language(argd['ln'])
 
         if req.method == 'POST':
             raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED
 
         uid = getUid(req)
         user_info = collect_user_info(req)
         if uid == -1:
             return page_not_authorized(req, "../",
                 text = _("You are not authorized to view this area."),
                                        navmenuid='search')
         elif uid > 0:
             pref = get_user_preferences(uid)
             try:
                 if not form.has_key('rg'):
                     # fetch user rg preference only if not overridden via URL
                     argd['rg'] = int(pref['websearch_group_records'])
             except (KeyError, ValueError):
                 pass
 
         involved_collections = Set()
         involved_collections.update(argd['c'])
         involved_collections.add(argd['cc'])
 
         if argd['id'] > 0:
             argd['recid'] = argd['id']
         if argd['idb'] > 0:
             argd['recidb'] = argd['idb']
         if argd['sysno']:
             tmp_recid = find_record_from_sysno(argd['sysno'])
             if tmp_recid:
                 argd['recid'] = tmp_recid
         if argd['sysnb']:
             tmp_recid = find_record_from_sysno(argd['sysnb'])
             if tmp_recid:
                 argd['recidb'] = tmp_recid
 
         if argd['recid'] > 0:
             if argd['recidb'] > argd['recid']:
                 # Hack to check if among the restricted collections
                 # at least a record of the range is there and
                 # then if the user is not authorized for that
                 # collection.
                 recids = intbitset(xrange(argd['recid'], argd['recidb']))
                 restricted_colls = restricted_collection_cache.get_cache()
                 for collname in restricted_colls:
                     (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collname)
                     if auth_code:
                         coll_recids = get_collection(collname).reclist
                         if coll_recids & recids:
                             cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : collname})
                             target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
                             make_canonical_urlargd({'action' : cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_URL + req.unparsed_uri}, {})
                             return redirect_to_url(req, target)
             else:
                 involved_collections.add(search_engine.guess_primary_collection_of_a_record(argd['recid']))
 
         # If any of the collection requires authentication, redirect
         # to the authentication form.
         for coll in involved_collections:
             if collection_restricted_p(coll):
                 (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll)
                 if auth_code:
                     cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll})
                     target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
                     make_canonical_urlargd({'action' : cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_URL + req.unparsed_uri}, {})
                     return redirect_to_url(req, target)
 
+        # only superadmins can use verbose parameter for obtaining debug information
+        if not isUserSuperAdmin(user_info):
+            argd['verbose'] = 0
+
         # Keep all the arguments, they might be reused in the
         # search_engine itself to derivate other queries
         req.argd = argd
 
         # mod_python does not like to return [] in case when of=id:
         out = search_engine.perform_request_search(req, **argd)
         if out == []:
             return str(out)
         else:
             return out
 
     def cache(self, req, form):
         """Search cache page."""
         argd = wash_urlargd(form, {'action': (str, 'show')})
         return search_engine.perform_request_cache(req, action=argd['action'])
 
     def log(self, req, form):
         """Search log page."""
         argd = wash_urlargd(form, {'date': (str, '')})
         return search_engine.perform_request_log(req, date=argd['date'])
 
     def authenticate(self, req, form):
         """Restricted search results pages."""
 
         argd = wash_search_urlargd(form)
 
         user_info = collect_user_info(req)
         for coll in argd['c'] + [argd['cc']]:
             if collection_restricted_p(coll):
                 (auth_code, dummy) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll)
                 if auth_code:
                     cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll})
                     target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
                     make_canonical_urlargd({'action' : cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_URL + req.unparsed_uri}, {})
                     return redirect_to_url(req, target)
 
         # Keep all the arguments, they might be reused in the
         # search_engine itself to derivate other queries
         req.argd = argd
 
         uid = getUid(req)
         if uid > 0:
             pref = get_user_preferences(uid)
             try:
                 if not form.has_key('rg'):
                     # fetch user rg preference only if not overridden via URL
                     argd['rg'] = int(pref['websearch_group_records'])
             except (KeyError, ValueError):
                 pass
 
+        # only superadmins can use verbose parameter for obtaining debug information
+        if not isUserSuperAdmin(user_info):
+            argd['verbose'] = 0
 
         # mod_python does not like to return [] in case when of=id:
         out = search_engine.perform_request_search(req, **argd)
         if out == []:
             return str(out)
         else:
             return out
 
     index = __call__
 
 # Parameters for the legacy URLs, of the form /?c=ALEPH
 legacy_collection_default_urlargd = {
     'as': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
     'verbose': (int, 0),
     'c': (str, CFG_SITE_NAME)}
 
 class WebInterfaceSearchInterfacePages(WebInterfaceDirectory):
 
     """ Handling of collection navigation."""
 
     _exports = [('index.py', 'legacy_collection'),
                 ('', 'legacy_collection'),
                 ('search.py', 'legacy_search'),
                 'search', 'openurl', 'testsso',
                 'logout_SSO_hook']
 
     search = WebInterfaceSearchResultsPages()
 
     def testsso(self, req, form):
         """ For testing single sign-on """
         req.add_common_vars()
         sso_env = {}
         for var, value in req.subprocess_env.iteritems():
             if var.startswith('HTTP_ADFS_'):
                 sso_env[var] = value
         out = "<HTML><HEAD><TITLE>SSO test</TITLE</HEAD>"
         out += "<BODY><TABLE>"
         for var, value in sso_env.iteritems():
             out += "<TR><TD><STRONG>%s</STRONG></TD><TD>%s</TD></TR>" % (var, value)
         out += "</TABLE></BODY></HTML>"
         return out
 
     def logout_SSO_hook(self, req, form):
         """Script triggered by the display of the centralized SSO logout
         dialog. It logouts the user from CDS Invenio and stream back the
         expected picture."""
         logoutUser(req)
         req.content_type = 'image/gif'
         req.encoding = None
         req.filename = 'wsignout.gif'
         req.headers_out["Content-Disposition"] = "inline; filename=wsignout.gif"
         req.set_content_length(os.path.getsize('%s/img/wsignout.gif' % CFG_WEBDIR))
         req.send_http_header()
         req.sendfile('%s/img/wsignout.gif' % CFG_WEBDIR)
 
     def _lookup(self, component, path):
         """ This handler is invoked for the dynamic URLs (for
         collections and records)"""
 
         if component == 'collection':
             c = '/'.join(path)
 
             def answer(req, form):
                 """Accessing collections cached pages."""
                 # Accessing collections: this is for accessing the
                 # cached page on top of each collection.
 
                 argd = wash_urlargd(form, search_interface_default_urlargd)
 
                 # We simply return the cached page of the collection
                 argd['c'] = c
 
                 if not argd['c']:
                     # collection argument not present; display
                     # home collection by default
                     argd['c'] = CFG_SITE_NAME
                 return display_collection(req, **argd)
 
             return answer, []
 
 
         elif component == 'record' or component == 'record-restricted':
             try:
                 recid = int(path[0])
             except IndexError:
                 # display record #1 for URL /record without a number
                 recid = 1
             except ValueError:
                 if path[0] == '':
                     # display record #1 for URL /record/ without a number
                     recid = 1
                 else:
                     # display page not found for URLs like /record/foo
                     return None, []
 
             if recid <= 0:
                 # display page not found for URLs like /record/-5 or /record/0
                 return None, []
 
             format = None
             tab = ''
             try:
                 if path[1] in ['', 'files', 'reviews', 'comments',
                                'usage', 'references', 'citations']:
                     tab = path[1]
                 elif path[1] == 'export':
                     tab = ''
                     format = path[2]
 #                    format = None
 #                elif path[1] in output_formats:
 #                    tab = ''
 #                    format = path[1]
                 else:
                     # display page not found for URLs like /record/references
                     # for a collection where 'references' tabs is not visible
                     return None, []
 
             except IndexError:
                 # Keep normal url if tabs is not specified
                 pass
 
             #if component == 'record-restricted':
                 #return WebInterfaceRecordRestrictedPages(recid, tab, format), path[1:]
             #else:
             return WebInterfaceRecordPages(recid, tab, format), path[1:]
 
         return None, []
 
     def openurl(self, req, form):
         """ OpenURL Handler."""
         argd = wash_urlargd(form, websearch_templates.tmpl_openurl_accepted_args)
         ret_url = websearch_templates.tmpl_openurl2invenio(argd)
         if ret_url:
             return redirect_to_url(req, ret_url)
         else:
             return redirect_to_url(req, CFG_SITE_URL)
 
     def legacy_collection(self, req, form):
         """Collection URL backward compatibility handling."""
         accepted_args = dict(legacy_collection_default_urlargd)
         accepted_args.update({'referer' : (str, ''),
              'realm' : (str, '')})
         argd = wash_urlargd(form, accepted_args)
 
         # Apache authentication stuff
         if argd['realm']:
             http_check_credentials(req, argd['realm'])
             return redirect_to_url(req, argd['referer'] or '%s/youraccount/youradminactivities' % CFG_SITE_SECURE_URL)
 
         del argd['referer']
         del argd['realm']
 
         # If we specify no collection, then we don't need to redirect
         # the user, so that accessing <http://yoursite/> returns the
         # default collection.
         if not form.has_key('c'):
             return display_collection(req, **argd)
 
         # make the collection an element of the path, and keep the
         # other query elements as is. If the collection is CFG_SITE_NAME,
         # however, redirect to the main URL.
         c = argd['c']
         del argd['c']
 
         if c == CFG_SITE_NAME:
             target = '/'
         else:
             target = '/collection/' + quote(c)
 
         target += make_canonical_urlargd(argd, legacy_collection_default_urlargd)
         return redirect_to_url(req, target)
 
 
     def legacy_search(self, req, form):
         """Search URL backward compatibility handling."""
         argd = wash_search_urlargd(form)
 
         # We either jump into the generic search form, or the specific
         # /record/... display if a recid is requested
         if argd['recid'] != -1:
             target = '/record/%d' % argd['recid']
             del argd['recid']
 
         else:
             target = '/search'
 
         target += make_canonical_urlargd(argd, search_results_default_urlargd)
         return redirect_to_url(req, target)
 
 
 def display_collection(req, c, as, verbose, ln):
     """Display search interface page for collection c by looking
     in the collection cache."""
     _ = gettext_set_language(ln)
 
     req.argd = drop_default_urlargd({'as': as, 'verbose': verbose, 'ln': ln},
                                     search_interface_default_urlargd)
 
     # get user ID:
     try:
         uid = getUid(req)
         user_preferences = {}
         if uid == -1:
             return page_not_authorized(req, "../",
                 text="You are not authorized to view this collection",
                                        navmenuid='search')
         elif uid > 0:
             user_preferences = get_user_preferences(uid)
     except Error:
         return page(title=_("Internal Error"),
                     body = create_error_box(req, verbose=verbose, ln=ln),
                     description="%s - Internal Error" % CFG_SITE_NAME,
                     keywords="%s, Internal Error" % CFG_SITE_NAME,
                     language=ln,
                     req=req,
                     navmenuid='search')
     # start display:
     req.content_type = "text/html"
     req.send_http_header()
     # deduce collection id:
     colID = get_colID(c)
     if type(colID) is not int:
         page_body = '<p>' + (_("Sorry, collection %s does not seem to exist.") % ('<strong>' + str(c) + '</strong>')) + '</p>'
         page_body = '<p>' + (_("You may want to start browsing from %s.") % ('<a href="' + CFG_SITE_URL + '?ln=' + ln + '">' + get_coll_i18nname(CFG_SITE_NAME, ln) + '</a>')) + '</p>'
         return page(title=_("Collection %s Not Found") % cgi.escape(c),
                     body=page_body,
                     description=(CFG_SITE_NAME + ' - ' + _("Not found") + ': ' + cgi.escape(str(c))),
                     keywords="%s" % CFG_SITE_NAME,
                     uid=uid,
                     language=ln,
                     req=req,
                     navmenuid='search')
     # display collection interface page:
     try:
         filedesc = open("%s/collections/%d/navtrail-as=%d-ln=%s.html" % (CFG_CACHEDIR, colID, as, ln), "r")
         c_navtrail = filedesc.read()
         filedesc.close()
         filedesc = open("%s/collections/%d/body-as=%d-ln=%s.html" % (CFG_CACHEDIR, colID, as, ln), "r")
         c_body = filedesc.read()
         filedesc.close()
         filedesc = open("%s/collections/%d/portalbox-tp-ln=%s.html" % (CFG_CACHEDIR, colID, ln), "r")
         c_portalbox_tp = filedesc.read()
         filedesc.close()
         filedesc = open("%s/collections/%d/portalbox-te-ln=%s.html" % (CFG_CACHEDIR, colID, ln), "r")
         c_portalbox_te = filedesc.read()
         filedesc.close()
         filedesc = open("%s/collections/%d/portalbox-lt-ln=%s.html" % (CFG_CACHEDIR, colID, ln), "r")
         c_portalbox_lt = filedesc.read()
         filedesc.close()
         # show help boxes (usually located in "tr", "top right")
         # if users have not banned them in their preferences:
         c_portalbox_rt = ""
         if user_preferences.get('websearch_helpbox', 1) > 0:
             filedesc = open("%s/collections/%d/portalbox-rt-ln=%s.html" % (CFG_CACHEDIR, colID, ln), "r")
             c_portalbox_rt = filedesc.read()
             filedesc.close()
         filedesc = open("%s/collections/%d/last-updated-ln=%s.html" % (CFG_CACHEDIR, colID, ln), "r")
         c_last_updated = filedesc.read()
         filedesc.close()
 
         title = get_coll_i18nname(c, ln)
         # if there is only one collection defined, do not print its
         # title on the page as it would be displayed repetitively.
         if len(search_engine.collection_reclist_cache.keys()) == 1:
             title = ""
 
         rssurl = CFG_SITE_URL + '/rss'
         if c != CFG_SITE_NAME:
             rssurl += '?cc=' + quote(c)
 
         return page(title=title,
                     body=c_body,
                     navtrail=c_navtrail,
                     description="%s - %s" % (CFG_SITE_NAME, c),
                     keywords="%s, %s" % (CFG_SITE_NAME, c),
                     uid=uid,
                     language=ln,
                     req=req,
                     cdspageboxlefttopadd=c_portalbox_lt,
                     cdspageboxrighttopadd=c_portalbox_rt,
                     titleprologue=c_portalbox_tp,
                     titleepilogue=c_portalbox_te,
                     lastupdated=c_last_updated,
                     navmenuid='search',
                     rssurl=rssurl,
                     show_title_p=-1 not in CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES)
     except:
         if verbose >= 9:
             req.write("<br />c=%s" % c)
             req.write("<br />as=%s" % as)
             req.write("<br />ln=%s" % ln)
             req.write("<br />colID=%s" % colID)
             req.write("<br />uid=%s" % uid)
         return page(title=_("Internal Error"),
                     body = create_error_box(req, ln=ln),
                     description="%s - Internal Error" % CFG_SITE_NAME,
                     keywords="%s, Internal Error" % CFG_SITE_NAME,
                     uid=uid,
                     language=ln,
                     req=req,
                     navmenuid='search')
 
     return "\n"
 
 class WebInterfaceRSSFeedServicePages(WebInterfaceDirectory):
     """RSS 2.0 feed service pages."""
 
     def __call__(self, req, form):
         """RSS 2.0 feed service."""
 
         # Keep only interesting parameters for the search
         default_params = websearch_templates.rss_default_urlargd
         # We need to keep 'jrec' and 'rg' here in order to have
         # 'multi-page' RSS. These parameters are not kept be default
         # as we don't want to consider them when building RSS links
         # from search and browse pages.
         default_params.update({'jrec':(int, 1),
                                'rg': (int, CFG_WEBSEARCH_INSTANT_BROWSE_RSS)})
         argd = wash_urlargd(form, default_params)
 
         for coll in argd['c'] + [argd['cc']]:
             if collection_restricted_p(coll):
                 user_info = collect_user_info(req)
                 (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll)
                 if auth_code:
                     cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll})
                     target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
                     make_canonical_urlargd({'action' : cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_URL + req.unparsed_uri}, {})
                     return redirect_to_url(req, target)
 
         # Create a standard filename with these parameters
         current_url = websearch_templates.build_rss_url(argd)
         cache_filename = current_url.split('/')[-1]
 
         # In the same way as previously, add 'jrec' & 'rg'
 
         req.content_type = "application/rss+xml"
         req.send_http_header()
         try:
             # Try to read from cache
             path = "%s/rss/%s.xml" % (CFG_CACHEDIR, cache_filename)
             # Check if cache needs refresh
             filedesc = open(path, "r")
             last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(path)).st_mtime)
             assert(datetime.datetime.now() < last_update_time + datetime.timedelta(minutes=CFG_WEBSEARCH_RSS_TTL))
             c_rss = filedesc.read()
             filedesc.close()
             req.write(c_rss)
             return
         except Exception, e:
             # do it live and cache
 
             previous_url = None
             if argd['jrec'] > 1:
                 prev_jrec = argd['jrec'] - argd['rg']
                 if prev_jrec < 1:
                     prev_jrec = 1
                 previous_url = websearch_templates.build_rss_url(argd,
                                                                  jrec=prev_jrec)
 
             recIDs = search_engine.perform_request_search(req, of="id",
                                                           c=argd['c'], cc=argd['cc'],
                                                           p=argd['p'], f=argd['f'],
                                                           p1=argd['p1'], f1=argd['f1'],
                                                           m1=argd['m1'], op1=argd['op1'],
                                                           p2=argd['p2'], f2=argd['f2'],
                                                           m2=argd['m2'], op2=argd['op2'],
                                                           p3=argd['p3'], f3=argd['f3'],
                                                           m3=argd['m3'])
             next_url = None
             if len(recIDs) >= argd['jrec'] + argd['rg']:
                 next_url = websearch_templates.build_rss_url(argd,
                                                              jrec=(argd['jrec'] + argd['rg']))
 
             recIDs = recIDs[-argd['jrec']:(-argd['rg']-argd['jrec']):-1]
 
             rss_prologue = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
             websearch_templates.tmpl_xml_rss_prologue(current_url=current_url,
                                                       previous_url=previous_url,
                                                       next_url=next_url) + '\n'
             req.write(rss_prologue)
             rss_body = format_records(recIDs,
                                       of='xr',
                                       record_separator="\n",
                                       req=req, epilogue="\n")
             rss_epilogue = websearch_templates.tmpl_xml_rss_epilogue() + '\n'
             req.write(rss_epilogue)
 
             # update cache
             dirname = "%s/rss" % (CFG_CACHEDIR)
             mymkdir(dirname)
             fullfilename = "%s/rss/%s.xml" % (CFG_CACHEDIR, cache_filename)
             try:
                 # Remove the file just in case it already existed
                 # so that a bit of space is created
                 os.remove(fullfilename)
             except OSError:
                 pass
 
             # Check if there's enough space to cache the request.
             if len(os.listdir(dirname)) < CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS:
                 try:
                     os.umask(022)
                     f = open(fullfilename, "w")
                     f.write(rss_prologue + rss_body + rss_epilogue)
                     f.close()
                 except IOError, v:
                     if v[0] == 36:
                         # URL was too long. Never mind, don't cache
                         pass
                     else:
                         raise repr(v)
 
     index = __call__
 
 
 class WebInterfaceRecordExport(WebInterfaceDirectory):
     """ Handling of a /record/<recid>/export/<format> URL fragment """
 
     _exports = output_formats
 
     def __init__(self, recid, format=None):
         self.recid = recid
         self.format = format
 
         for output_format in output_formats:
             self.__dict__[output_format] = self
 
         return
 
     def __call__(self, req, form):
         argd = wash_search_urlargd(form)
         argd['recid'] = self.recid
 
         if self.format is not None:
             argd['of'] = self.format
         req.argd = argd
         uid = getUid(req)
         if uid == -1:
             return page_not_authorized(req, "../",
                 text="You are not authorized to view this record.",
                                        navmenuid='search')
         elif uid > 0:
             pref = get_user_preferences(uid)
             try:
                 if not form.has_key('rg'):
                     # fetch user rg preference only if not overridden via URL
                     argd['rg'] = int(pref['websearch_group_records'])
             except (KeyError, ValueError):
                 pass
 
         # Check if the record belongs to a restricted primary
         # collection.  If yes, redirect to the authenticated URL.
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
         if auth_code and user_info['email'] == 'guest' and not user_info['apache_user']:
             cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : search_engine.guess_primary_collection_of_a_record(self.recid)})
             target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
                     make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_URL + req.unparsed_uri}, {})
             return redirect_to_url(req, target)
         elif auth_code:
             return page_not_authorized(req, "../", \
                 text = auth_msg,\
                 navmenuid='search')
 
+        # only superadmins can use verbose parameter for obtaining debug information
+        if not isUserSuperAdmin(user_info):
+            argd['verbose'] = 0
+
         # mod_python does not like to return [] in case when of=id:
         out = search_engine.perform_request_search(req, **argd)
         if out == []:
             return str(out)
         else:
             return out
 
     # Return the same page wether we ask for /record/123/export/xm or /record/123/export/xm/
     index = __call__