diff --git a/config/invenio.conf b/config/invenio.conf
index 0ba9d5323..671f84bf4 100644
--- a/config/invenio.conf
+++ b/config/invenio.conf
@@ -1,1503 +1,1512 @@
 ## This file is part of Invenio.
 ## Copyright (C) 2008, 2009, 2010, 2011 CERN.
 ##
 ## Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 ###################################################
 ## About 'invenio.conf' and 'invenio-local.conf' ##
 ###################################################
 
 ## The 'invenio.conf' file contains the vanilla default configuration
 ## parameters of a Invenio installation, as coming out of the
 ## distribution.  The file should be self-explanatory.  Once installed
 ## in its usual location (usually /opt/invenio/etc), you could in
 ## principle go ahead and change the values according to your local
 ## needs, but this is not advised.
 ##
 ## If you would like to customize some of these parameters, you should
 ## rather create a file named 'invenio-local.conf' in the same
 ## directory where 'invenio.conf' lives and you should write there
 ## only the customizations that you want to be different from the
 ## vanilla defaults.
 ##
 ## Here is a realistic, minimalist, yet production-ready example of
 ## what you would typically put there:
 ##
 ##    $ cat /opt/invenio/etc/invenio-local.conf
 ##    [Invenio]
 ##    CFG_SITE_NAME = John Doe's Document Server
 ##    CFG_SITE_NAME_INTL_fr = Serveur des Documents de John Doe
 ##    CFG_SITE_URL = http://your.site.com
 ##    CFG_SITE_SECURE_URL = https://your.site.com
 ##    CFG_SITE_ADMIN_EMAIL = john.doe@your.site.com
 ##    CFG_SITE_SUPPORT_EMAIL = john.doe@your.site.com
 ##    CFG_WEBALERT_ALERT_ENGINE_EMAIL = john.doe@your.site.com
 ##    CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL = john.doe@your.site.com
 ##    CFG_WEBCOMMENT_DEFAULT_MODERATOR = john.doe@your.site.com
 ##    CFG_DATABASE_HOST = localhost
 ##    CFG_DATABASE_NAME = invenio
 ##    CFG_DATABASE_USER = invenio
 ##    CFG_DATABASE_PASS = my123p$ss
 ##
 ## You should override at least the parameters mentioned above and the
 ## parameters mentioned in the `Part 1: Essential parameters' below in
 ## order to define some very essential runtime parameters such as the
 ## name of your document server (CFG_SITE_NAME and
 ## CFG_SITE_NAME_INTL_*), the visible URL of your document server
 ## (CFG_SITE_URL and CFG_SITE_SECURE_URL), the email address of the
 ## local Invenio administrator, comment moderator, and alert engine
 ## (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_ADMIN_EMAIL, etc), and last but
 ## not least your database credentials (CFG_DATABASE_*).
 ##
 ## The Invenio system will then read both the default invenio.conf
 ## file and your customized invenio-local.conf file and it will
 ## override any default options with the ones you have specified in
 ## your local file.  This cascading of configuration parameters will
 ## ease your future upgrades.
 
 [Invenio]
 
 ###################################
 ## Part 1:  Essential parameters ##
 ###################################
 
 ## This part defines essential Invenio internal parameters that
 ## everybody should override, like the name of the server or the email
 ## address of the local Invenio administrator.
 
 ## CFG_DATABASE_* - specify which MySQL server to use, the name of the
 ## database to use, and the database access credentials.
 CFG_DATABASE_HOST = localhost
 CFG_DATABASE_PORT = 3306
 CFG_DATABASE_NAME = invenio
 CFG_DATABASE_USER = invenio
 CFG_DATABASE_PASS = my123p$ss
 
 ## CFG_SITE_URL - specify URL under which your installation will be
 ## visible.  For example, use "http://your.site.com".  Do not leave
 ## trailing slash.
 CFG_SITE_URL = http://localhost
 
 ## CFG_SITE_SECURE_URL - specify secure URL under which your
 ## installation secure pages such as login or registration will be
 ## visible.  For example, use "https://your.site.com".  Do not leave
 ## trailing slash.  If you don't plan on using HTTPS, then you may
 ## leave this empty.
 CFG_SITE_SECURE_URL = https://localhost
 
 ## CFG_SITE_NAME -- the visible name of your Invenio installation.
 CFG_SITE_NAME = Atlantis Institute of Fictive Science
 
 ## CFG_SITE_NAME_INTL -- the international versions of CFG_SITE_NAME
 ## in various languages.  (See also CFG_SITE_LANGS below.)
 CFG_SITE_NAME_INTL_en = Atlantis Institute of Fictive Science
 CFG_SITE_NAME_INTL_fr = Atlantis Institut des Sciences Fictives
 CFG_SITE_NAME_INTL_de = Atlantis Institut der fiktiven Wissenschaft
 CFG_SITE_NAME_INTL_es = Atlantis Instituto de la Ciencia Fictive
 CFG_SITE_NAME_INTL_ca = Institut Atlantis de Ciència Fictícia
 CFG_SITE_NAME_INTL_pt = Instituto Atlantis de Ciência Fictícia
 CFG_SITE_NAME_INTL_it = Atlantis Istituto di Scienza Fittizia
 CFG_SITE_NAME_INTL_ru = Институт Фиктивных Наук Атлантиды
 CFG_SITE_NAME_INTL_sk = Atlantis Inštitút Fiktívnych Vied
 CFG_SITE_NAME_INTL_cs = Atlantis Institut Fiktivních Věd
 CFG_SITE_NAME_INTL_no = Atlantis Institutt for Fiktiv Vitenskap
 CFG_SITE_NAME_INTL_sv = Atlantis Institut för Fiktiv Vetenskap
 CFG_SITE_NAME_INTL_el = Ινστιτούτο Φανταστικών Επιστημών Ατλαντίδος
 CFG_SITE_NAME_INTL_uk = Інститут вигаданих наук в Атлантісі
 CFG_SITE_NAME_INTL_ja = Fictive 科学のAtlantis の協会
 CFG_SITE_NAME_INTL_pl = Instytut Fikcyjnej Nauki Atlantis
 CFG_SITE_NAME_INTL_bg = Институт за фиктивни науки Атлантис
 CFG_SITE_NAME_INTL_hr = Institut Fiktivnih Znanosti Atlantis
 CFG_SITE_NAME_INTL_zh_CN = 阿特兰提斯虚拟科学学院
 CFG_SITE_NAME_INTL_zh_TW = 阿特蘭提斯虛擬科學學院
 CFG_SITE_NAME_INTL_hu = Kitalált Tudományok Atlantiszi Intézete
 CFG_SITE_NAME_INTL_af = Atlantis Instituut van Fiktiewe Wetenskap
 CFG_SITE_NAME_INTL_gl = Instituto Atlantis de Ciencia Fictive
 CFG_SITE_NAME_INTL_ro = Institutul Atlantis al Ştiinţelor Fictive
 CFG_SITE_NAME_INTL_rw = Atlantis Ishuri Rikuru Ry'ubuhanga
 CFG_SITE_NAME_INTL_ka = ატლანტიდის ფიქტიური მეცნიერების ინსტიტუტი
 CFG_SITE_NAME_INTL_lt = Fiktyvių Mokslų Institutas Atlantis
 CFG_SITE_NAME_INTL_ar = معهد أطلنطيس للعلوم الافتراضية
 
 ## CFG_SITE_LANG -- the default language of the interface: '
 CFG_SITE_LANG = en
 
 ## CFG_SITE_LANGS -- list of all languages the user interface should
 ## be available in, separated by commas.  The order specified below
 ## will be respected on the interface pages.  A good default would be
 ## to use the alphabetical order.  Currently supported languages
 ## include Afrikaans, Arabic, Bulgarian, Catalan, Czech, German, Georgian,
 ## Greek, English, Spanish, French, Croatian, Hungarian, Galician,
 ## Italian, Japanese, Kinyarwanda, Lithuanian, Norwegian, Polish,
 ## Portuguese, Romanian, Russian, Slovak, Swedish, Ukrainian, Chinese
 ## (China), Chinese (Taiwan), so that the eventual maximum you can
 ## currently select is
 ## "af,ar,bg,ca,cs,de,el,en,es,fr,hr,gl,ka,it,rw,lt,hu,ja,no,pl,pt,ro,ru,sk,sv,uk,zh_CN,zh_TW".
 CFG_SITE_LANGS = af,ar,bg,ca,cs,de,el,en,es,fr,hr,gl,ka,it,rw,lt,hu,ja,no,pl,pt,ro,ru,sk,sv,uk,zh_CN,zh_TW
 
 ## CFG_SITE_SUPPORT_EMAIL -- the email address of the support team for
 ## this installation:
 CFG_SITE_SUPPORT_EMAIL = info@invenio-software.org
 
 ## CFG_SITE_ADMIN_EMAIL -- the email address of the 'superuser' for
 ## this installation.  Enter your email address below and login with
 ## this address when using Invenio inistration modules.  You
 ## will then be automatically recognized as superuser of the system.
 CFG_SITE_ADMIN_EMAIL = info@invenio-software.org
 
 ## CFG_SITE_EMERGENCY_EMAIL_ADDRESSES -- list of email addresses to
 ## which an email should be sent in case of emergency (e.g. bibsched
 ## queue has been stopped because of an error).  Configuration
 ## dictionary allows for different recipients based on weekday and
 ## time-of-day. Example:
 ##
 ## CFG_SITE_EMERGENCY_EMAIL_ADDRESSES = {
 ##    'Sunday 22:00-06:00': '0041761111111@email2sms.foo.com',
 ##    '06:00-18:00': 'team-in-europe@foo.com,0041762222222@email2sms.foo.com',
 ##    '18:00-06:00': 'team-in-usa@foo.com',
 ##    '*': 'john.doe.phone@foo.com'}
 ##
 ## If you want the emergency email notifications to always go to the
 ## same address, just use the wildcard line in the above example.
 CFG_SITE_EMERGENCY_EMAIL_ADDRESSES = {}
 
 ## CFG_SITE_ADMIN_EMAIL_EXCEPTIONS -- set this to 0 if you do not want
 ## to receive any captured exception via email to CFG_SITE_ADMIN_EMAIL
 ## address.  Captured exceptions will still be available in
 ## var/log/invenio.err file.  Set this to 1 if you want to receive
 ## some of the captured exceptions (this depends on the actual place
 ## where the exception is captured).  Set this to 2 if you want to
 ## receive all captured exceptions.
 CFG_SITE_ADMIN_EMAIL_EXCEPTIONS = 1
 
 ## CFG_SITE_RECORD -- what is the URI part representing detailed
 ## record pages?  We recomment to leave the default value `record'
 ## unchanged.
 CFG_SITE_RECORD = record
 
 ## CFG_ERRORLIB_RESET_EXCEPTION_NOTIFICATION_COUNTER_AFTER -- set this to
 ## the number of seconds after which to reset the exception notification
 ## counter. A given repetitive exception is notified via email with a
 ## logarithmic strategy: the first time it is seen it is sent via email,
 ## then the second time, then the fourth, then the eighth and so forth.
 ## If the number of seconds elapsed since the last time it was notified
 ## is greater than CFG_ERRORLIB_RESET_EXCEPTION_NOTIFICATION_COUNTER_AFTER
 ## then the internal counter is reset in order not to have exception
 ## notification become more and more rare.
 CFG_ERRORLIB_RESET_EXCEPTION_NOTIFICATION_COUNTER_AFTER = 14400
 
 ## CFG_CERN_SITE -- do we want to enable CERN-specific code?
 ## Put "1" for "yes" and "0" for "no".
 CFG_CERN_SITE = 0
 
 ## CFG_INSPIRE_SITE -- do we want to enable INSPIRE-specific code?
 ## Put "1" for "yes" and "0" for "no".
 CFG_INSPIRE_SITE = 0
 
 ## CFG_ADS_SITE -- do we want to enable ADS-specific code?
 ## Put "1" for "yes" and "0" for "no".
 CFG_ADS_SITE = 0
 
 ## CFG_OPENAIRE_SITE -- do we want to enable OpenAIRE-specific code?
 ## Put "1" for "yes" and "0" for "no".
 CFG_OPENAIRE_SITE = 0
 
 ## CFG_DEVEL_SITE -- is this a development site? If it is, you might
 ## prefer that it does not do certain things. For example, you might
 ## not want WebSubmit to send certain emails or trigger certain
 ## processes on a development site.
 ## Put "1" for "yes" (this is a development site) or "0" for "no"
 ## (this isn't a development site.)
 CFG_DEVEL_SITE = 0
 
 ################################
 ## Part 2: Web page style     ##
 ################################
 
 ## The variables affecting the page style.  The most important one is
 ## the 'template skin' you would like to use and the obfuscation mode
 ## for your email addresses.  Please refer to the WebStyle Admin Guide
 ## for more explanation.  The other variables are listed here mostly
 ## for backwards compatibility purposes only.
 
 ## CFG_WEBSTYLE_TEMPLATE_SKIN -- what template skin do you want to
 ## use?
 CFG_WEBSTYLE_TEMPLATE_SKIN = default
 
 ## CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE. How do we "protect"
 ## email addresses from undesired automated email harvesters?  This
 ## setting will not affect 'support' and 'admin' emails.
 ## NOTE: there is no ultimate solution to protect against email
 ## harvesting. All have drawbacks and can more or less be
 ## circumvented. Choose you preferred mode ([t] means "transparent"
 ## for the user):
 ##    -1: hide all emails.
 ## [t] 0 : no protection, email returned as is.
 ##           foo@example.com => foo@example.com
 ##     1 : basic email munging: replaces @ by [at] and . by [dot]
 ##           foo@example.com => foo [at] example [dot] com
 ## [t] 2 : transparent name mangling: characters are replaced by
 ##         equivalent HTML entities.
 ##           foo@example.com => foo@example.com
 ## [t] 3 : javascript insertion. Requires Javascript enabled on client
 ##         side.
 ##     4 : replaces @ and . characters by gif equivalents.
 ##             foo@example.com => foo<img src="at.gif" alt=" [at] ">example<img src="dot.gif" alt=" [dot] ">com
 CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE = 2
 
 ## CFG_WEBSTYLE_INSPECT_TEMPLATES -- Do we want to debug all template
 ## functions so that they would return HTML results wrapped in
 ## comments indicating which part of HTML page was created by which
 ## template function?  Useful only for debugging Pythonic HTML
 ## template.  See WebStyle Admin Guide for more information.
 CFG_WEBSTYLE_INSPECT_TEMPLATES = 0
 
 ## (deprecated) CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP -- eventual global HTML
 ## left top box:
 CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP =
 
 ## (deprecated) CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM -- eventual global
 ## HTML left bottom box:
 CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM =
 
 ## (deprecated) CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP -- eventual global
 ## HTML right top box:
 CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP =
 
 ## (deprecated) CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM -- eventual global
 ## HTML right bottom box:
 CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM =
 
 ## CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST -- when certain HTTP status
 ## codes are raised to the WSGI handler, the corresponding exceptions
 ## and error messages can be sent to the system administrator for
 ## inspecting.  This is useful to detect and correct errors.  The
 ## variable represents a comma-separated list of HTTP statuses that
 ## should alert admin.  Wildcards are possible. If the status is
 ## followed by an "r", it means that a referer is required to exist
 ## (useful to distinguish broken known links from URL typos when 404
 ## errors are raised).
 CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST = 404r,400,5*,41*
 
 ## CFG_WEBSTYLE_HTTP_USE_COMPRESSION -- whether to enable deflate
 ## compression of your HTTP/HTTPS connections. This will affect the Apache
 ## configuration snippets created by inveniocfg --create-apache-conf and
 ## the OAI-PMH Identify response.
 CFG_WEBSTYLE_HTTP_USE_COMPRESSION = 0
 
 ##################################
 ## Part 3: WebSearch parameters ##
 ##################################
 
 ## This section contains some configuration parameters for WebSearch
 ## module.  Please note that WebSearch is mostly configured on
 ## run-time via its WebSearch Admin web interface.  The parameters
 ## below are the ones that you do not probably want to modify very
 ## often during the runtime.  (Note that you may modify them
 ## afterwards too, though.)
 
 ## CFG_WEBSEARCH_SEARCH_CACHE_SIZE -- how many queries we want to
 ## cache in memory per one Apache httpd process?  This cache is used
 ## mainly for "next/previous page" functionality, but it caches also
 ## "popular" user queries if more than one user happen to search for
 ## the same thing.  Note that large numbers may lead to great memory
 ## consumption.  We recommend a value not greater than 100.
 CFG_WEBSEARCH_SEARCH_CACHE_SIZE = 0
 
 ## CFG_WEBSEARCH_FIELDS_CONVERT -- if you migrate from an older
 ## system, you may want to map field codes of your old system (such as
 ## 'ti') to Invenio/MySQL ("title").  Use Python dictionary syntax
 ## for the translation table, e.g. {'wau':'author', 'wti':'title'}.
 ## Usually you don't want to do that, and you would use empty dict {}.
 CFG_WEBSEARCH_FIELDS_CONVERT = {}
 
 ## CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH -- width of the
 ## search pattern window in the light search interface, in
 ## characters.  CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH = 60
 CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH = 60
 
 ## CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH -- width of the search
 ## pattern window in the simple search interface, in characters.
 CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH = 40
 
 ## CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH -- width of the
 ## search pattern window in the advanced search interface, in
 ## characters.
 CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH = 30
 
 ## CFG_WEBSEARCH_NB_RECORDS_TO_SORT -- how many records do we still
 ## want to sort?  For higher numbers we print only a warning and won't
 ## perform any sorting other than default 'latest records first', as
 ## sorting would be very time consuming then.  We recommend a value of
 ## not more than a couple of thousands.
 CFG_WEBSEARCH_NB_RECORDS_TO_SORT = 1000
 
 ## CFG_WEBSEARCH_CALL_BIBFORMAT -- if a record is being displayed but
 ## it was not preformatted in the "HTML brief" format, do we want to
 ## call BibFormatting on the fly?  Put "1" for "yes" and "0" for "no".
 ## Note that "1" will display the record exactly as if it were fully
 ## preformatted, but it may be slow due to on-the-fly processing; "0"
 ## will display a default format very fast, but it may not have all
 ## the fields as in the fully preformatted HTML brief format.  Note
 ## also that this option is active only for old (PHP) formats; the new
 ## (Python) formats are called on the fly by default anyway, since
 ## they are much faster.  When usure, please set "0" here.
 CFG_WEBSEARCH_CALL_BIBFORMAT = 0
 
 ## CFG_WEBSEARCH_USE_ALEPH_SYSNOS -- do we want to make old SYSNOs
 ## visible rather than MySQL's record IDs?  You may use this if you
 ## migrate from a different e-doc system, and you store your old
 ## system numbers into 970__a.  Put "1" for "yes" and "0" for
 ## "no". Usually you don't want to do that, though.
 CFG_WEBSEARCH_USE_ALEPH_SYSNOS = 0
 
 ## CFG_WEBSEARCH_I18N_LATEST_ADDITIONS -- Put "1" if you want the
 ## "Latest Additions" in the web collection pages to show
 ## internationalized records.  Useful only if your brief BibFormat
 ## templates contains internationalized strings. Otherwise put "0" in
 ## order not to slow down the creation of latest additions by WebColl.
 CFG_WEBSEARCH_I18N_LATEST_ADDITIONS = 0
 
 ## CFG_WEBSEARCH_INSTANT_BROWSE -- the number of records to display
 ## under 'Latest Additions' in the web collection pages.
 CFG_WEBSEARCH_INSTANT_BROWSE = 10
 
 ## CFG_WEBSEARCH_INSTANT_BROWSE_RSS -- the number of records to
 ## display in the RSS feed.
 CFG_WEBSEARCH_INSTANT_BROWSE_RSS = 25
 
 ## CFG_WEBSEARCH_RSS_I18N_COLLECTIONS -- comma-separated list of
 ## collections that feature an internationalized RSS feed on their
 ## main seach interface page created by webcoll.  Other collections
 ## will have RSS feed using CFG_SITE_LANG.
 CFG_WEBSEARCH_RSS_I18N_COLLECTIONS =
 
 ## CFG_WEBSEARCH_RSS_TTL -- number of minutes that indicates how long
 ## a feed cache is valid.
 CFG_WEBSEARCH_RSS_TTL = 360
 
 ## CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS -- maximum number of request kept
 ## in cache. If the cache is filled, following request are not cached.
 CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS = 1000
 
 ## CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD -- up to how many author names
 ## to print explicitely; for more print "et al".  Note that this is
 ## used in default formatting that is seldomly used, as usually
 ## BibFormat defines all the format.  The value below is only used
 ## when BibFormat fails, for example.
 CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD = 3
 
 ## CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS -- whether to show or
 ## not collection grandsons in Narrow Search boxes (sons are shown by
 ## default, grandsons are configurable here).  Use 0 for no and 1 for
 ## yes.
 CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS = 1
 
 ## CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX -- shall we
 ## create help links for Ellis, Nick or Ellis, Nicholas and friends
 ## when Ellis, N was searched for?  Useful if you have one author
 ## stored in the database under several name formats, namely surname
 ## comma firstname and surname comma initial cataloging policy.  Use 0
 ## for no and 1 for yes.
 CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX = 1
 
 ## CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS -- MathJax is a JavaScript
 ## library that renders (La)TeX mathematical formulas in the client
 ## browser.  This parameter must contain a comma-separated list of
 ## output formats for which to apply the MathJax rendering, for example
 ## "hb,hd".  If the list is empty, MathJax is disabled.
 CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS =
 
 ## CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_TIMEOUT -- when searching
 ## external collections (e.g. SPIRES, CiteSeer, etc), how many seconds
 ## do we wait for reply before abandonning?
 CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_TIMEOUT = 5
 
 ## CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_MAXRESULTS -- how many
 ## results do we fetch?
 CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_MAXRESULTS = 10
 
 ## CFG_WEBSEARCH_SPLIT_BY_COLLECTION -- do we want to split the search
 ## results by collection or not?  Use 0 for not, 1 for yes.
 CFG_WEBSEARCH_SPLIT_BY_COLLECTION = 1
 
 ## CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS -- the default number of
 ## records to display per page in the search results pages.
 CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS = 10
 
 ## CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS -- in order to limit denial of
 ## service attacks the total number of records per group displayed as a
 ## result of a search query will be limited to this number. Only the superuser
 ## queries will not be affected by this limit.
 CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS = 200
 
 ## CFG_WEBSEARCH_PERMITTED_RESTRICTED_COLLECTIONS_LEVEL -- logged in users
 ## might have rights to access some restricted collections. This variable
 ## tweaks the kind of support the system will automatically provide to the
 ## user with respect to searching into these restricted collections.
 ## Set this to 0 in order to have the user to explicitly activate restricted
 ## collections in order to search into them. Set this to 1 in order to
 ## propose to the user the list of restricted collections to which he/she has
 ## rights (note: this is not yet implemented). Set this to 2 in order to
 ## silently add all the restricted collections to which the user has rights to
 ## to any query.
 ## Note: the system will discover which restricted collections a user has
 ## rights to, at login time. The time complexity of this procedure is
 ## proportional to the number of restricted collections. E.g. for a system
 ## with ~50 restricted collections, you might expect ~1s of delay in the
 ## login time, when this variable is set to a value higher than 0.
 CFG_WEBSEARCH_PERMITTED_RESTRICTED_COLLECTIONS_LEVEL = 0
 
 ## CFG_WEBSEARCH_SHOW_COMMENT_COUNT -- do we want to show the 'N comments'
 ## links on the search engine pages?  (useful only when you have allowed
 ## commenting)
 CFG_WEBSEARCH_SHOW_COMMENT_COUNT = 1
 
 ## CFG_WEBSEARCH_SHOW_REVIEW_COUNT -- do we want to show the 'N reviews'
 ## links on the search engine pages?  (useful only when you have allowed
 ## reviewing)
 CFG_WEBSEARCH_SHOW_REVIEW_COUNT = 1
 
 ## CFG_WEBSEARCH_FULLTEXT_SNIPPETS -- how many full-text snippets to
 ## display for full-text searches?
 CFG_WEBSEARCH_FULLTEXT_SNIPPETS = 4
 
 ## CFG_WEBSEARCH_FULLTEXT_SNIPPETS_WORDS -- how many context words
 ## to display around the pattern in the snippet?
 CFG_WEBSEARCH_FULLTEXT_SNIPPETS_WORDS = 4
 
 ## CFG_WEBSEARCH_WILDCARD_LIMIT -- some of the queries, wildcard
 ## queries in particular (ex: cern*, a*), but also regular expressions
 ## (ex: [a-z]+), may take a long time to respond due to the high
 ## number of hits. You can limit the number of terms matched by a
 ## wildcard by setting this variable.  A negative value or zero means
 ## that none of the queries will be limited (which may be wanted by
 ## also prone to denial-of-service kind of attacks).
 CFG_WEBSEARCH_WILDCARD_LIMIT = 50000
 
 ## CFG_WEBSEARCH_SYNONYM_KBRS -- defines which knowledge bases are to
 ## be used for which index in order to provide runtime synonym lookup
 ## of user-supplied terms, and what massaging function should be used
 ## upon search pattern before performing the KB lookup.  (Can be one
 ## of `exact', 'leading_to_comma', `leading_to_number'.)
 CFG_WEBSEARCH_SYNONYM_KBRS = {
     'journal': ['SEARCH-SYNONYM-JOURNAL', 'leading_to_number'],
     }
 
 ## CFG_SOLR_URL -- optionally, you may use Solr to serve full-text
 ## queries.  If so, please specify the URL of your Solr instance.
 ## (example: http://localhost:8080/sorl)
 CFG_SOLR_URL =
 
 ## CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT -- specify the limit when
 ## the previous/next/back hit links are to be displayed on detailed record pages.
 ## In order to speeding up list manipulations, if a search returns lots of hits,
 ## more than this limit, then do not loose time calculating next/previous/back
 ## hits at all, but display page directly without these.
 ## Note also that Invenio installations that do not like
 ## to have the next/previous hit link functionality would be able to set this
 ## variable to zero and not see anything.
 CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT = 1000
 
 #######################################
 ## Part 4: BibHarvest OAI parameters ##
 #######################################
 
 ## This part defines parameters for the Invenio OAI gateway.
 ## Useful if you are running Invenio as OAI data provider.
 
 ## CFG_OAI_ID_FIELD -- OAI identifier MARC field:
 CFG_OAI_ID_FIELD = 909COo
 
 ## CFG_OAI_SET_FIELD -- OAI set MARC field:
 CFG_OAI_SET_FIELD = 909COp
 
 ## CFG_OAI_DELETED_POLICY -- OAI deletedrecordspolicy
 ## (no/transient/persistent).
 CFG_OAI_DELETED_POLICY = no
 
 ## CFG_OAI_ID_PREFIX -- OAI identifier prefix:
 CFG_OAI_ID_PREFIX = atlantis.cern.ch
 
 ## CFG_OAI_SAMPLE_IDENTIFIER -- OAI sample identifier:
 CFG_OAI_SAMPLE_IDENTIFIER = oai:atlantis.cern.ch:CERN-TH-4036
 
 ## CFG_OAI_IDENTIFY_DESCRIPTION -- description for the OAI Identify verb:
 CFG_OAI_IDENTIFY_DESCRIPTION = <description>
    <oai-identifier xmlns="http://www.openarchives.org/OAI/2.0/oai-identifier"
                    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
                    xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai-identifier
                                        http://www.openarchives.org/OAI/2.0/oai-identifier.xsd">
       <scheme>oai</scheme>
       <repositoryIdentifier>atlantis.cern.ch</repositoryIdentifier>
       <delimiter>:</delimiter>
       <sampleIdentifier>oai:atlantis.cern.ch:CERN-TH-4036</sampleIdentifier>
    </oai-identifier>
  </description>
  <description>
   <eprints xmlns="http://www.openarchives.org/OAI/1.1/eprints"
            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
            xsi:schemaLocation="http://www.openarchives.org/OAI/1.1/eprints
                                http://www.openarchives.org/OAI/1.1/eprints.xsd">
       <content>
        <URL>http://atlantis.cern.ch/</URL>
       </content>
       <metadataPolicy>
        <text>Free and unlimited use by anybody with obligation to refer to original record</text>
       </metadataPolicy>
       <dataPolicy>
        <text>Full content, i.e. preprints may not be harvested by robots</text>
       </dataPolicy>
       <submissionPolicy>
        <text>Submission restricted. Submitted documents are subject of approval by OAI repository admins.</text>
       </submissionPolicy>
   </eprints>
  </description>
 
 ## CFG_OAI_LOAD -- OAI number of records in a response:
 CFG_OAI_LOAD = 1000
 
 ## CFG_OAI_EXPIRE -- OAI resumptionToken expiration time:
 CFG_OAI_EXPIRE = 90000
 
 ## CFG_OAI_SLEEP -- service unavailable between two consecutive
 ## requests for CFG_OAI_SLEEP seconds:
 CFG_OAI_SLEEP = 10
 
 ##################################
 ## Part 5: WebSubmit parameters ##
 ##################################
 
 ## This section contains some configuration parameters for WebSubmit
 ## module.  Please note that WebSubmit is mostly configured on
 ## run-time via its WebSubmit Admin web interface.  The parameters
 ## below are the ones that you do not probably want to modify during
 ## the runtime.
 
 ## CFG_WEBSUBMIT_FILESYSTEM_BIBDOC_GROUP_LIMIT -- the fulltext
 ## documents are stored under "/opt/invenio/var/data/files/gX/Y"
 ## directories where X is 0,1,... and Y stands for bibdoc ID.  Thusly
 ## documents Y are grouped into directories X and this variable
 ## indicates the maximum number of documents Y stored in each
 ## directory X.  This limit is imposed solely for filesystem
 ## performance reasons in order not to have too many subdirectories in
 ## a given directory.
 CFG_WEBSUBMIT_FILESYSTEM_BIBDOC_GROUP_LIMIT = 5000
 
 ## CFG_WEBSUBMIT_ADDITIONAL_KNOWN_FILE_EXTENSIONS -- a comma-separated
 ## list of document extensions not listed in Python standard mimetype
 ## library that should be recognized by Invenio.
 CFG_WEBSUBMIT_ADDITIONAL_KNOWN_FILE_EXTENSIONS = hpg,link,lis,llb,mat,mpp,msg,docx,docm,xlsx,xlsm,xlsb,pptx,pptm,ppsx,ppsm
 
 ## CFG_BIBDOCFILE_USE_XSENDFILE -- if your web server supports
 ## XSendfile header, you may want to enable this feature in order for
 ## to Invenio tell the web server to stream files for download (after
 ## proper authorization checks) by web server's means.  This helps to
 ## liberate Invenio worker processes from being busy with sending big
 ## files to clients.  The web server will take care of that.  Note:
 ## this feature is still somewhat experimental.  Note: when enabled
 ## (set to 1), then you have to also regenerate Apache vhost conf
 ## snippets (inveniocfg --update-config-py --create-apache-conf).
 CFG_BIBDOCFILE_USE_XSENDFILE = 0
 
 ## CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY -- a number between 0 and
 ## 1 that indicates probability with which MD5 checksum will be
 ## verified when streaming bibdocfile-managed files.  (0.1 will cause
 ## the check to be performed once for every 10 downloads)
 CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY = 0.1
 
 ## CFG_OPENOFFICE_SERVER_HOST -- the host where an OpenOffice Server is
 ## listening to. If localhost an OpenOffice server will be started
 ## automatically if it is not already running.
 ## Note: if you set this to an empty value this will disable the usage of
 ## OpenOffice for converting documents.
 ## If you set this to something different than localhost you'll have to take
 ## care to have an OpenOffice server running on the corresponding host and
 ## to install the same OpenOffice release both on the client and on the server
 ## side.
 ## In order to launch an OpenOffice server on a remote machine, just start
 ## the usual 'soffice' executable in this way:
 ## $> soffice -headless -nologo -nodefault -norestore -nofirststartwizard \
 ## .. -accept=socket,host=HOST,port=PORT;urp;StarOffice.ComponentContext
 CFG_OPENOFFICE_SERVER_HOST = localhost
 
 ## CFG_OPENOFFICE_SERVER_PORT -- the port where an OpenOffice Server is
 ## listening to.
 CFG_OPENOFFICE_SERVER_PORT = 2002
 
 ## CFG_OPENOFFICE_USER -- the user that will be used to launch the OpenOffice
 ## client. It is recommended to set this to a user who don't own files, like
 ## e.g. 'nobody'. You should also authorize your Apache server user to be
 ## able to become this user, e.g. by adding to your /etc/sudoers the following
 ## line:
 ## "apache  ALL=(nobody) NOPASSWD: ALL"
 ## provided that apache is the username corresponding to the Apache user.
 ## On some machine this might be apache2 or www-data.
 CFG_OPENOFFICE_USER = nobody
 
 #################################
 ## Part 6: BibIndex parameters ##
 #################################
 
 ## This section contains some configuration parameters for BibIndex
 ## module.  Please note that BibIndex is mostly configured on run-time
 ## via its BibIndex Admin web interface.  The parameters below are the
 ## ones that you do not probably want to modify very often during the
 ## runtime.
 
 ## CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY -- when fulltext indexing, do
 ## you want to index locally stored files only, or also external URLs?
 ## Use "0" to say "no" and "1" to say "yes".
 CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY = 1
 
 ## CFG_BIBINDEX_REMOVE_STOPWORDS -- when indexing, do we want to remove
 ## stopwords?  Use "0" to say "no" and "1" to say "yes".
 CFG_BIBINDEX_REMOVE_STOPWORDS = 0
 
 ## CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS -- characters considered as
 ## alphanumeric separators of word-blocks inside words.  You probably
 ## don't want to change this.
 CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS = \!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~
 
 ## CFG_BIBINDEX_CHARS_PUNCTUATION -- characters considered as punctuation
 ## between word-blocks inside words.  You probably don't want to
 ## change this.
 CFG_BIBINDEX_CHARS_PUNCTUATION = \.\,\:\;\?\!\"
 
 ## CFG_BIBINDEX_REMOVE_HTML_MARKUP -- should we attempt to remove HTML markup
 ## before indexing?  Use 1 if you have HTML markup inside metadata
 ## (e.g. in abstracts), use 0 otherwise.
 CFG_BIBINDEX_REMOVE_HTML_MARKUP = 0
 
 ## CFG_BIBINDEX_REMOVE_LATEX_MARKUP -- should we attempt to remove LATEX markup
 ## before indexing?  Use 1 if you have LATEX markup inside metadata
 ## (e.g. in abstracts), use 0 otherwise.
 CFG_BIBINDEX_REMOVE_LATEX_MARKUP = 0
 
 ## CFG_BIBINDEX_MIN_WORD_LENGTH -- minimum word length allowed to be added to
 ## index.  The terms smaller then this amount will be discarded.
 ## Useful to keep the database clean, however you can safely leave
 ## this value on 0 for up to 1,000,000 documents.
 CFG_BIBINDEX_MIN_WORD_LENGTH = 0
 
 ## CFG_BIBINDEX_URLOPENER_USERNAME and CFG_BIBINDEX_URLOPENER_PASSWORD --
 ## access credentials to access restricted URLs, interesting only if
 ## you are fulltext-indexing files located on a remote server that is
 ## only available via username/password.  But it's probably better to
 ## handle this case via IP or some convention; the current scheme is
 ## mostly there for demo only.
 CFG_BIBINDEX_URLOPENER_USERNAME = mysuperuser
 CFG_BIBINDEX_URLOPENER_PASSWORD = mysuperpass
 
 ## CFG_INTBITSET_ENABLE_SANITY_CHECKS --
 ## Enable sanity checks for integers passed to the intbitset data
 ## structures. It is good to enable this during debugging
 ## and to disable this value for speed improvements.
 CFG_INTBITSET_ENABLE_SANITY_CHECKS = False
 
 ## CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES -- regular expression that matches
 ## docnames for which OCR is desired (set this to .* in order to enable
 ## OCR in general, set this to empty in order to disable it.)
 CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES = scan-.*
 
 ## CFG_BIBINDEX_SPLASH_PAGES -- key-value mapping where the key corresponds
 ## to a regular expression that matches the URLs of the splash pages of
 ## a given service and the value is a regular expression of the set of URLs
 ## referenced via <a> tags in the HTML content of the splash pages that are
 ## referring to documents that need to be indexed.
 ## NOTE: for backward compatibility reasons you can set this to a simple
 ## regular expression that will directly be used as the unique key of the
 ## map, with corresponding value set to ".*" (in order to match any URL)
 CFG_BIBINDEX_SPLASH_PAGES = {
     "http://documents\.cern\.ch/setlink\?.*": ".*",
     "http://ilcagenda\.linearcollider\.org/subContributionDisplay\.py\?.*|http://ilcagenda\.linearcollider\.org/contributionDisplay\.py\?.*": "http://ilcagenda\.linearcollider\.org/getFile\.py/access\?.*|http://ilcagenda\.linearcollider\.org/materialDisplay\.py\?.*",
     }
 
 ## CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES -- do we want
 ## the author word index to exclude first names to keep only last
 ## names?  If set to True, then for the author `Bernard, Denis', only
 ## `Bernard' will be indexed in the word index, not `Denis'.  Note
 ## that if you change this variable, you have to re-index the author
 ## index via `bibindex -w author -R'.
 CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES = False
 
 ## CFG_BIBINDEX_SYNONYM_KBRS -- defines which knowledge bases are to
 ## be used for which index in order to provide index-time synonym
 ## lookup, and what massaging function should be used upon search
 ## pattern before performing the KB lookup.  (Can be one of `exact',
 ## 'leading_to_comma', `leading_to_number'.)
 CFG_BIBINDEX_SYNONYM_KBRS = {
     'global': ['INDEX-SYNONYM-TITLE', 'exact'],
     'title': ['INDEX-SYNONYM-TITLE', 'exact'],
     }
 
 #######################################
 ## Part 7: Access control parameters ##
 #######################################
 
 ## This section contains some configuration parameters for the access
 ## control system.  Please note that WebAccess is mostly configured on
 ## run-time via its WebAccess Admin web interface.  The parameters
 ## below are the ones that you do not probably want to modify very
 ## often during the runtime.  (If you do want to modify them during
 ## runtime, for example te deny access temporarily because of backups,
 ## you can edit access_control_config.py directly, no need to get back
 ## here and no need to redo the make process.)
 
 ## CFG_ACCESS_CONTROL_LEVEL_SITE -- defines how open this site is.
 ## Use 0 for normal operation of the site, 1 for read-only site (all
 ## write operations temporarily closed), 2 for site fully closed,
 ## 3 for also disabling any database connection.
 ## Useful for site maintenance.
 CFG_ACCESS_CONTROL_LEVEL_SITE = 0
 
 ## CFG_ACCESS_CONTROL_LEVEL_GUESTS -- guest users access policy.  Use
 ## 0 to allow guest users, 1 not to allow them (all users must login).
 CFG_ACCESS_CONTROL_LEVEL_GUESTS = 0
 
 ## CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS -- account registration and
 ## activation policy.  When 0, users can register and accounts are
 ## automatically activated.  When 1, users can register but admin must
 ## activate the accounts.  When 2, users cannot register nor update
 ## their email address, only admin can register accounts.  When 3,
 ## users cannot register nor update email address nor password, only
 ## admin can register accounts.  When 4, the same as 3 applies, nor
 ## user cannot change his login method.  When 5, then the same as 4
 ## applies, plus info about how to get an account is hidden from the
 ## login page.
 CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS = 0
 
 ## CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN -- limit account
 ## registration to certain email addresses?  If wanted, give domain
 ## name below, e.g. "cern.ch".  If not wanted, leave it empty.
 CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN =
 
 ## CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS -- send a
 ## notification email to the administrator when a new account is
 ## created?  Use 0 for no, 1 for yes.
 CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS = 0
 
 ## CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT -- send a
 ## notification email to the user when a new account is created in order to
 ## to verify the validity of the provided email address?  Use
 ## 0 for no, 1 for yes.
 CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT = 1
 
 ## CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_ACTIVATION -- send a
 ## notification email to the user when a new account is activated?
 ## Use 0 for no, 1 for yes.
 CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_ACTIVATION = 0
 
 ## CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_DELETION -- send a
 ## notification email to the user when a new account is deleted or
 ## account demand rejected?  Use 0 for no, 1 for yes.
 CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_DELETION = 0
 
 ## CFG_APACHE_PASSWORD_FILE -- the file where Apache user credentials
 ## are stored.  Must be an absolute pathname.  If the value does not
 ## start by a slash, it is considered to be the filename of a file
 ## located under prefix/var/tmp directory.  This is useful for the
 ## demo site testing purposes.  For the production site, if you plan
 ## to restrict access to some collections based on the Apache user
 ## authentication mechanism, you should put here an absolute path to
 ## your Apache password file.
 CFG_APACHE_PASSWORD_FILE = demo-site-apache-user-passwords
 
 ## CFG_APACHE_GROUP_FILE -- the file where Apache user groups are
 ## defined.  See the documentation of the preceding config variable.
 CFG_APACHE_GROUP_FILE = demo-site-apache-user-groups
 
 ###################################
 ## Part 8: WebSession parameters ##
 ###################################
 
 ## This section contains some configuration parameters for tweaking
 ## session handling.
 
 ## CFG_WEBSESSION_EXPIRY_LIMIT_DEFAULT -- number of days after which a session
 ## and the corresponding cookie is considered expired.
 CFG_WEBSESSION_EXPIRY_LIMIT_DEFAULT = 2
 
 ## CFG_WEBSESSION_EXPIRY_LIMIT_REMEMBER -- number of days after which a session
 ## and the corresponding cookie is considered expired, when the user has
 ## requested to permanently stay logged in.
 CFG_WEBSESSION_EXPIRY_LIMIT_REMEMBER = 365
 
 ## CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS -- when user requested
 ## a password reset, for how many days is the URL valid?
 CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS = 3
 
 ## CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS -- when an account
 ## activation email was sent, for how many days is the URL valid?
 CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS = 3
 
 ## CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS -- when
 ## user won't confirm his email address and not complete
 ## registeration, after how many days will it expire?
 CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS = 10
 
 ## CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS -- when set to 1, the session
 ## system allocates the same uid=0 to all guests users regardless of where they
 ## come from. 0 allocate a unique uid to each guest.
 CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS = 0
 
 ################################
 ## Part 9: BibRank parameters ##
 ################################
 
 ## This section contains some configuration parameters for the ranking
 ## system.
 
 ## CFG_BIBRANK_SHOW_READING_STATS -- do we want to show reading
 ## similarity stats?  ('People who viewed this page also viewed')
 CFG_BIBRANK_SHOW_READING_STATS = 1
 
 ## CFG_BIBRANK_SHOW_DOWNLOAD_STATS -- do we want to show the download
 ## similarity stats?  ('People who downloaded this document also
 ## downloaded')
 CFG_BIBRANK_SHOW_DOWNLOAD_STATS = 1
 
 ## CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS -- do we want to show download
 ## history graph? (0=no | 1=classic/gnuplot | 2=flot)
 CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS = 1
 
 ## CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS_CLIENT_IP_DISTRIBUTION -- do we
 ## want to show a graph representing the distribution of client IPs
 ## downloading given document?
 CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS_CLIENT_IP_DISTRIBUTION = 0
 
 ## CFG_BIBRANK_SHOW_CITATION_LINKS -- do we want to show the 'Cited
 ## by' links?  (useful only when you have citations in the metadata)
 CFG_BIBRANK_SHOW_CITATION_LINKS = 1
 
 ## CFG_BIBRANK_SHOW_CITATION_STATS -- de we want to show citation
 ## stats?  ('Cited by M recors', 'Co-cited with N records')
 CFG_BIBRANK_SHOW_CITATION_STATS = 1
 
 ## CFG_BIBRANK_SHOW_CITATION_GRAPHS -- do we want to show citation
 ## history graph?  (0=no | 1=classic/gnuplot | 2=flot)
 CFG_BIBRANK_SHOW_CITATION_GRAPHS = 1
 
 ####################################
 ## Part 10: WebComment parameters ##
 ####################################
 
 ## This section contains some configuration parameters for the
 ## commenting and reviewing facilities.
 
 ## CFG_WEBCOMMENT_ALLOW_COMMENTS -- do we want to allow users write
 ## public comments on records?
 CFG_WEBCOMMENT_ALLOW_COMMENTS = 1
 
 ## CFG_WEBCOMMENT_ALLOW_REVIEWS -- do we want to allow users write
 ## public reviews of records?
 CFG_WEBCOMMENT_ALLOW_REVIEWS = 1
 
 ## CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS -- do we want to allow short
 ## reviews, that is just the attribution of stars without submitting
 ## detailed review text?
 CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS = 0
 
 ## CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN -- if users
 ## report a comment to be abusive, how many they have to be before the
 ## site admin is alerted?
 CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN = 5
 
 ## CFG_WEBCOMMENT_NB_COMMENTS_IN_DETAILED_VIEW -- how many comments do
 ## we display in the detailed record page upon welcome?
 CFG_WEBCOMMENT_NB_COMMENTS_IN_DETAILED_VIEW = 1
 
 ## CFG_WEBCOMMENT_NB_REVIEWS_IN_DETAILED_VIEW -- how many reviews do
 ## we display in the detailed record page upon welcome?
 CFG_WEBCOMMENT_NB_REVIEWS_IN_DETAILED_VIEW = 1
 
 ## CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL -- do we notify the site
 ## admin after every comment?
 CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL = 1
 
 ## CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS -- how many
 ## elapsed seconds do we consider enough when checking for possible
 ## multiple comment submissions by a user?
 CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS = 20
 
 ## CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_REVIEWS_IN_SECONDS -- how many
 ## elapsed seconds do we consider enough when checking for possible
 ## multiple review submissions by a user?
 CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_REVIEWS_IN_SECONDS = 20
 
 ## CFG_WEBCOMMENT_USE_RICH_EDITOR -- enable the WYSIWYG
 ## Javascript-based editor when user edits comments?
 CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR = False
 
 ## CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL -- the email address from which the
 ## alert emails will appear to be sent:
 CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL = info@invenio-software.org
 
 ## CFG_WEBCOMMENT_DEFAULT_MODERATOR -- if no rules are
 ## specified to indicate who is the comment moderator of
 ## a collection, this person will be used as default
 CFG_WEBCOMMENT_DEFAULT_MODERATOR = info@invenio-software.org
 
 ## CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS -- do we want to allow the use
 ## of MathJax plugin to render latex input in comments?
 CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS = 1
 
 ## CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION -- allow comment author to
 ## delete its own comment?
 CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION = 1
 
 # CFG_WEBCOMMENT_EMAIL_REPLIES_TO -- which field of the record define
 # email addresses that should be notified of newly submitted comments,
 # and for which collection. Use collection names as keys, and list of
 # tags as values
 CFG_WEBCOMMENT_EMAIL_REPLIES_TO = {
     'Articles': ['506__d', '506__m'],
     }
 
 # CFG_WEBCOMMENT_RESTRICTION_DATAFIELD -- which field of the record
 # define the restriction (must be linked to WebAccess
 # 'viewrestrcomment') to apply to newly submitted comments, and for
 # which collection. Use collection names as keys, and one tag as value
 CFG_WEBCOMMENT_RESTRICTION_DATAFIELD = {
     'Articles': '5061_a',
     'Pictures': '5061_a',
     'Theses': '5061_a',
     }
 
 # CFG_WEBCOMMENT_ROUND_DATAFIELD -- which field of the record define
 # the current round of comment for which collection. Use collection
 # name as key, and one tag as value
 CFG_WEBCOMMENT_ROUND_DATAFIELD = {
     'Articles': '562__c',
     'Pictures': '562__c',
     }
 
 # CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE -- max file size per attached
 # file, in bytes.  Choose 0 if you don't want to limit the size
 CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE = 5242880
 
 # CFG_WEBCOMMENT_MAX_ATTACHED_FILES -- maxium number of files that can
 # be attached per comment.  Choose 0 if you don't want to limit the
 # number of files.  File uploads can be restricted with action
 # "attachcommentfile".
 CFG_WEBCOMMENT_MAX_ATTACHED_FILES = 5
 
 # CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH -- how many levels of
 # indentation discussions can be.  This can be used to ensure that
 # discussions will not go into deep levels of nesting if users don't
 # understand the difference between "reply to comment" and "add
 # comment". When the depth is reached, any "reply to comment" is
 # conceptually converted to a "reply to thread" (i.e. reply to this
 # parent's comment). Use -1 for no limit, 0 for unthreaded (flat)
 # discussions.
 CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH = 1
 
 ##################################
 ## Part 11: BibSched parameters ##
 ##################################
 
 ## This section contains some configuration parameters for the
 ## bibliographic task scheduler.
 
 ## CFG_BIBSCHED_REFRESHTIME -- how often do we want to refresh
 ## bibsched monitor? (in seconds)
 CFG_BIBSCHED_REFRESHTIME = 5
 
 ## CFG_BIBSCHED_LOG_PAGER -- what pager to use to view bibsched task
 ## logs?
 CFG_BIBSCHED_LOG_PAGER = /bin/more
 
 ## CFG_BIBSCHED_GC_TASKS_OLDER_THAN -- after how many days to perform the
 ## gargbage collector of BibSched queue (i.e. removing/moving task to archive).
 CFG_BIBSCHED_GC_TASKS_OLDER_THAN = 30
 
 ## CFG_BIBSCHED_GC_TASKS_TO_REMOVE -- list of BibTask that can be safely
 ## removed from the BibSched queue once they are DONE.
 CFG_BIBSCHED_GC_TASKS_TO_REMOVE = bibindex,bibreformat,webcoll,bibrank,inveniogc
 
 ## CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE -- list of BibTasks that should be safely
 ## archived out of the BibSched queue once they are DONE.
 CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE = bibupload,oaiarchive
 
 ## CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS -- maximum number of BibTasks
 ## that can run concurrently.
 ## NOTE: concurrent tasks are still considered as an experimental
 ## feature. Please keep this value set to 1 on production environments.
 CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS = 1
 
 ## CFG_BIBSCHED_PROCESS_USER -- bibsched and bibtask processes must
 ## usually run under the same identity as the Apache web server
 ## process in order to share proper file read/write privileges.  If
 ## you want to force some other bibsched/bibtask user, e.g. because
 ## you are using a local `invenio' user that belongs to your
 ## `www-data' Apache user group and so shares writing rights with your
 ## Apache web server process in this way, then please set its username
 ## identity here.  Otherwise we shall check whether your
 ## bibsched/bibtask processes are run under the same identity as your
 ## Apache web server process (in which case you can leave the default
 ## empty value here).
 CFG_BIBSCHED_PROCESS_USER =
 
 ## CFG_BIBSCHED_NODE_TASKS -- specific nodes may be configured to
 ## run only specific tasks; if you want this, then this variable is a
 ## dictionary of the form {'hostname1': ['task1', 'task2']}.  The
 ## default is that any node can run any task.
 CFG_BIBSCHED_NODE_TASKS = {}
 
 ###################################
 ## Part 12: WebBasket parameters ##
 ###################################
 
 ## CFG_WEBBASKET_MAX_NUMBER_OF_DISPLAYED_BASKETS -- a safety limit for
 ## a maximum number of displayed baskets
 CFG_WEBBASKET_MAX_NUMBER_OF_DISPLAYED_BASKETS = 20
 
 ## CFG_WEBBASKET_USE_RICH_TEXT_EDITOR -- enable the WYSIWYG
 ## Javascript-based editor when user edits comments in WebBasket?
 CFG_WEBBASKET_USE_RICH_TEXT_EDITOR = False
 
 ##################################
 ## Part 13: WebAlert parameters ##
 ##################################
 
 ## This section contains some configuration parameters for the
 ## automatic email notification alert system.
 
 ## CFG_WEBALERT_ALERT_ENGINE_EMAIL -- the email address from which the
 ## alert emails will appear to be sent:
 CFG_WEBALERT_ALERT_ENGINE_EMAIL = info@invenio-software.org
 
 ## CFG_WEBALERT_MAX_NUM_OF_RECORDS_IN_ALERT_EMAIL -- how many records
 ## at most do we send in an outgoing alert email?
 CFG_WEBALERT_MAX_NUM_OF_RECORDS_IN_ALERT_EMAIL = 20
 
 ## CFG_WEBALERT_MAX_NUM_OF_CHARS_PER_LINE_IN_ALERT_EMAIL -- number of
 ## chars per line in an outgoing alert email?
 CFG_WEBALERT_MAX_NUM_OF_CHARS_PER_LINE_IN_ALERT_EMAIL = 72
 
 ## CFG_WEBALERT_SEND_EMAIL_NUMBER_OF_TRIES -- when sending alert
 ## emails fails, how many times we retry?
 CFG_WEBALERT_SEND_EMAIL_NUMBER_OF_TRIES = 3
 
 ## CFG_WEBALERT_SEND_EMAIL_SLEEPTIME_BETWEEN_TRIES -- when sending
 ## alert emails fails, what is the sleeptime between tries? (in
 ## seconds)
 CFG_WEBALERT_SEND_EMAIL_SLEEPTIME_BETWEEN_TRIES = 300
 
 ####################################
 ## Part 14: WebMessage parameters ##
 ####################################
 
 ## CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE -- how large web messages do we
 ## allow?
 CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE = 20000
 
 ## CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES -- how many messages for a
 ## regular user do we allow in its inbox?
 CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES = 30
 
 ## CFG_WEBMESSAGE_DAYS_BEFORE_DELETE_ORPHANS -- how many days before
 ## we delete orphaned messages?
 CFG_WEBMESSAGE_DAYS_BEFORE_DELETE_ORPHANS = 60
 
 ##################################
 ## Part 15: MiscUtil parameters ##
 ##################################
 
 ## CFG_MISCUTIL_SQL_USE_SQLALCHEMY -- whether to use SQLAlchemy.pool
 ## in the DB engine of Invenio.  It is okay to enable this flag
 ## even if you have not installed SQLAlchemy.  Note that Invenio will
 ## loose some perfomance if this option is enabled.
 CFG_MISCUTIL_SQL_USE_SQLALCHEMY = False
 
 ## CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT -- how many queries can we run
 ## inside run_sql_many() in one SQL statement?  The limit value
 ## depends on MySQL's max_allowed_packet configuration.
 CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT = 10000
 
 ## CFG_MISCUTIL_SMTP_HOST -- which server to use as outgoing mail server to
 ## send outgoing emails generated by the system, for example concerning
 ## submissions or email notification alerts.
 CFG_MISCUTIL_SMTP_HOST = localhost
 
 ## CFG_MISCUTIL_SMTP_PORT -- which port to use on the outgoing mail server
 ## defined in the previous step.
 CFG_MISCUTIL_SMTP_PORT = 25
 
 ## CFG_MISCUTILS_DEFAULT_PROCESS_TIMEOUT -- the default number of seconds after
 ## which a process launched trough shellutils.run_process_with_timeout will
 ## be killed. This is useful to catch runaway processes.
 CFG_MISCUTIL_DEFAULT_PROCESS_TIMEOUT = 300
 
 ## CFG_MATHJAX_HOSTING -- if you plan to use MathJax to display TeX
 ## formulas on HTML web pages, you can specify whether you wish to use
 ## 'local' hosting or 'cdn' hosting of MathJax libraries.  (If set to
 ## 'local', you have to run 'make install-mathjax-plugin' as described
 ## in the INSTALL guide.)  If set to 'local', users will use your site
 ## to download MathJax sources.  If set to 'cdn', users will use
 ## centralized MathJax CDN servers instead.  Please note that using
 ## CDN is suitable only for small institutions or for MathJax
 ## sponsors; see the MathJax website for more details.  (Also, please
 ## note that if you plan to use MathJax on your site, you have to
 ## adapt CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS and
 ## CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS configuration variables
 ## elsewhere in this file.)
 CFG_MATHJAX_HOSTING = local
 
 #################################
 ## Part 16: BibEdit parameters ##
 #################################
 
 ## CFG_BIBEDIT_TIMEOUT -- when a user edits a record, this record is
 ## locked to prevent other users to edit it at the same time.
 ## How many seconds of inactivity before the locked record again will be free
 ## for other people to edit?
 CFG_BIBEDIT_TIMEOUT = 3600
 
 ## CFG_BIBEDIT_LOCKLEVEL -- when a user tries to edit a record which there
 ## is a pending bibupload task for in the queue, this shouldn't be permitted.
 ## The lock level determines how thouroughly the queue should be investigated
 ## to determine if this is the case.
 ## Level 0 - always permits editing, doesn't look at the queue
 ##           (unsafe, use only if you know what you are doing)
 ## Level 1 - permits editing if there are no queued bibedit tasks for this record
 ##           (safe with respect to bibedit, but not for other bibupload maintenance jobs)
 ## Level 2 - permits editing if there are no queued bibupload tasks of any sort
 ##           (safe, but may lock more than necessary if many cataloguers around)
 ## Level 3 - permits editing if no queued bibupload task concerns given record
 ##           (safe, most precise locking, but slow,
 ##            checks for 001/EXTERNAL_SYSNO_TAG/EXTERNAL_OAIID_TAG)
 ## The recommended level is 3 (default) or 2 (if you use maintenance jobs often).
 CFG_BIBEDIT_LOCKLEVEL = 3
 
 ## CFG_BIBEDIT_PROTECTED_FIELDS -- a comma-separated list of fields that BibEdit
 ## will not allow to be added, edited or deleted. Wildcards are not supported,
 ## but conceptually a wildcard is added at the end of every field specification.
 ## Examples:
 ## 500A   - protect all MARC fields with tag 500 and first indicator A
 ## 5      - protect all MARC fields in the 500-series.
 ## 909C_a - protect subfield a in tag 909 with first indicator C and empty
 ##          second indicator
 ## Note that 001 is protected by default, but if protection of other
 ## identifiers or automated fields is a requirement, they should be added to
 ## this list.
 CFG_BIBEDIT_PROTECTED_FIELDS =
 
 ## CFG_BIBEDIT_QUEUE_CHECK_METHOD -- how do we want to check for
 ## possible queue locking situations to prevent cataloguers from
 ## editing a record that may be waiting in the queue?  Use 'bibrecord'
 ## for exact checking (always works, but may be slow), use 'regexp'
 ## for regular expression based checking (very fast, but may be
 ## inaccurate).  When unsure, use 'bibrecord'.
 CFG_BIBEDIT_QUEUE_CHECK_METHOD = bibrecord
 
 ## CFG_BIBEDITMULTI_LIMIT_INSTANT_PROCESSING -- maximum number of records
 ## that can be modified instantly using the multi-record editor. Above
 ## this limit, modifications will only be executed in limited hours.
 CFG_BIBEDITMULTI_LIMIT_INSTANT_PROCESSING = 2000
 
 ## CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING -- maximum number of records
 ## that can be send for modification without having a superadmin role.
 ## If the number of records is between CFG_BIBEDITMULTI_LIMIT_INSTANT_PROCESSING
 ## and this number, the modifications will take place only in limited hours.
 CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING = 20000
 
 ## CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING_TIME -- Allowed time to
 ## execute modifications on records, when the number exceeds
 ## CFG_BIBEDITMULTI_LIMIT_INSTANT_PROCESSING.
 CFG_BIBEDITMULTI_LIMIT_DELAYED_PROCESSING_TIME = 22:00-05:00
 
 ###################################
 ## Part 17: BibUpload parameters ##
 ###################################
 
 ## CFG_BIBUPLOAD_REFERENCE_TAG -- where do we store references?
 CFG_BIBUPLOAD_REFERENCE_TAG = 999
 
 ## CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG -- where do we store external
 ## system numbers?  Useful for matching when our records come from an
 ## external digital library system.
 CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG = 970__a
 
 ## CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG -- where do we store OAI ID tags
 ## of harvested records?  Useful for matching when we harvest stuff
 ## via OAI that we do not want to reexport via Invenio OAI; so records
 ## may have only the source OAI ID stored in this tag (kind of like
 ## external system number too).
 CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG = 035__a
 
 ## CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG -- where do we store OAI SRC
 ## tags of harvested records?  Useful for matching when we harvest stuff
 ## via OAI that we do not want to reexport via Invenio OAI; so records
 ## may have only the source OAI SRC stored in this tag (kind of like
 ## external system number too). Note that the field should be the same of
 ## CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG.
 CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG = 035__9
 
 ## CFG_BIBUPLOAD_STRONG_TAGS -- a comma-separated list of tags that
 ## are strong enough to resist the replace mode.  Useful for tags that
 ## might be created from an external non-metadata-like source,
 ## e.g. the information about the number of copies left.
 CFG_BIBUPLOAD_STRONG_TAGS = 964
 
 ## CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS -- a comma-separated list
 ## of tags that contain provenance information that should be checked
 ## in the bibupload correct mode via matching provenance codes.  (Only
 ## field instances of the same provenance information would be acted
 ## upon.)  Please specify the whole tag info up to subfield codes.
 CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS = 6531_9
 
 ## CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS -- a comma-separated list of system
 ## paths from which it is allowed to take fulltextes that will be uploaded via
 ## FFT (CFG_TMPDIR is included by default).
 CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS = /tmp,/home
 
 ## CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS -- a dictionary containing external
 ## URLs that can be accessed by Invenio and specific HTTP headers that will be
 ## used for each URL.
 ## The keys of the dictionary are regular expressions matching a set of URLs,
 ## the values are dictionaries of headers as consumed by urllib2.Request. If a
 ## regular expression matching all URLs is created at the end of the list, it
 ## means that Invenio will download all URLs. Otherwise Invenio will just
 ## download authorized URLs.
 ## CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS = [
 ##     ('http://myurl.com/.*', {'User-Agent': 'Me'}),
 ##     ('http://yoururl.com/.*', {'User-Agent': 'You', 'Accept': 'text/plain'}),
 ##     ('http://.*', {'User-Agent': 'Invenio'}),
 ##     ]
 CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS = [
     ('http://.*', {'User-Agent': 'Invenio'}),
     ]
 
 ## CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE -- do we want to serialize
 ## internal representation of records (Pythonic record structure) into
 ## the database?  This can improve internal processing speed of some
 ## operations at the price of somewhat bigger disk space usage.
 ## If you change this value after some records have already been added
 ## to your installation, you may want to run:
 ##     $ /opt/invenio/bin/inveniocfg --reset-recstruct-cache
 ## in order to either erase the cache thus freeing database space,
 ## or to fill the cache for all records that have not been cached yet.
 CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE = 1
 
 ## CFG_BATCHUPLOADER_FILENAME_MATCHING_POLICY -- a comma-separated list
 ## indicating which fields match the file names of the documents to be
 ## uploaded.
 ## The matching will be done in the same order as the list provided.
 CFG_BATCHUPLOADER_FILENAME_MATCHING_POLICY = reportnumber,recid
 
 ## CFG_BATCHUPLOADER_DAEMON_DIR -- Directory where the batchuploader daemon
 ## will look for the subfolders metadata and document by default.
 ## If path is relative, CFG_PREFIX will be joined as a prefix
 CFG_BATCHUPLOADER_DAEMON_DIR = var/batchupload
 
 ## CFG_BATCHUPLOADER_WEB_ROBOT_AGENT -- Comma-separated list to specify the
 ## agents permitted when calling batch uploader web interface
 ## cdsweb.cern.ch/batchuploader/robotupload
 ## if using a curl, eg: curl xxx -A invenio_webupload
 CFG_BATCHUPLOADER_WEB_ROBOT_AGENT = invenio_webupload
 
 ## CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS -- Access list specifying for each
 ## IP address, which collections are allowed using batch uploader robot
 ## interface.
 CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS = {
     '10.0.0.1': ['BOOK', 'REPORT'], # Example 1
     '10.0.0.2': ['POETRY', 'PREPRINT'], # Example 2
     }
 
 ####################################
 ## Part 18: BibCatalog parameters ##
 ####################################
 
 ## CFG_BIBCATALOG_SYSTEM -- set desired catalog system. For example, RT.
 CFG_BIBCATALOG_SYSTEM =
 
 ## RT CONFIGURATION
 ## CFG_BIBCATALOG_SYSTEM_RT_CLI -- path to the RT CLI client
 CFG_BIBCATALOG_SYSTEM_RT_CLI = /usr/bin/rt
 
 ## CFG_BIBCATALOG_SYSTEM_RT_URL -- Base URL of the remote RT system
 CFG_BIBCATALOG_SYSTEM_RT_URL = http://localhost/rt3
 
 ## CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_USER -- Set the username for a default RT account
 ## on remote system, with limited privileges, in order to only create and modify own tickets.
 CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_USER =
 
 ## CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_PWD -- Set the password for the default RT account
 ## on remote system.
 CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_PWD =
 
 ####################################
 ## Part 19: BibFormat parameters  ##
 ####################################
 
 ## CFG_BIBFORMAT_HIDDEN_TAGS -- comma-separated list of MARC tags that
 ## are not shown to users not having cataloging authorizations.
 CFG_BIBFORMAT_HIDDEN_TAGS = 595
 
 ## CFG_BIBFORMAT_ADDTHIS_ID -- if you want to use the AddThis service from
 ## <http://www.addthis.com/>, set this value to the pubid parameter as
 ## provided by the service (e.g. ra-4ff80aae118f4dad), and add a call to
 ## <BFE_ADDTHIS /> formatting element in your formats, for example
 ## Default_HTML_detailed.bft.
 CFG_BIBFORMAT_ADDTHIS_ID =
 
 ####################################
 ## Part 20: BibMatch parameters  ##
 ####################################
 
 ## CFG_BIBMATCH_LOCAL_SLEEPTIME -- Determines the amount of seconds to sleep
 ## between search queries on LOCAL system.
 CFG_BIBMATCH_LOCAL_SLEEPTIME = 0.0
 
 ## CFG_BIBMATCH_REMOTE_SLEEPTIME -- Determines the amount of seconds to sleep
 ## between search queries on REMOTE systems.
 CFG_BIBMATCH_REMOTE_SLEEPTIME = 2.0
 
 ## CFG_BIBMATCH_FUZZY_WORDLIMITS -- Determines the amount of words to extract
 ## from a certain fields value during fuzzy matching mode. Add/change field
 ## and appropriate number to the dictionary to configure this.
 CFG_BIBMATCH_FUZZY_WORDLIMITS = {
                                 '100__a': 2,
                                 '245__a': 4
                                 }
 
 ## CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT -- Determines the amount of empty results
 ## to accept during fuzzy matching mode.
 CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT = 1
 
 ## CFG_BIBMATCH_QUERY_TEMPLATES -- Here you can set the various predefined querystrings
 ## used to standardize common matching queries. By default the following templates
 ## are given:
 ## title             - standard title search. Taken from 245__a (default)
 ## title-author      - title and author search (i.e. this is a title AND author a)
 ##                     Taken from 245__a and 100__a
 ## reportnumber      - reportnumber search (i.e. reportnumber:REP-NO-123).
 CFG_BIBMATCH_QUERY_TEMPLATES = {
                                 'title' : '[title]',
                                 'title-author' : '[title] [author]',
                                 'reportnumber' : 'reportnumber:[reportnumber]'
                                 }
 
 ######################################
 ## Part 21: BibAuthorID parameters  ##
 ######################################
 
 # CFG_BIBAUTHORID_MAX_PROCESSES is the max number of processes
 # that may be spawned by the disambiguation algorithm
 CFG_BIBAUTHORID_MAX_PROCESSES = 12
 
 # CFG_BIBAUTHORID_PERSONID_SQL_MAX_THREADS is the max number of threads
 # to parallelize sql queries during personID tables updates
 CFG_BIBAUTHORID_PERSONID_SQL_MAX_THREADS = 12
 
 # CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_BCTKD_RA is the minimum confidence needed
 # when backtracking automatically disambiguated authors to persons.
 # Values in [0,1]
 CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_BCTKD_RA = 0.5
 
 # CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_NEW_RA is the threshold for
 # the confidence in a paper by the disambiguation algorithm to have it
 # automatically connected to a personID. Papers below the thresholds are
 # left disconnected from persons if not already connected in other ways.
 # values in [0,1]
 CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_NEW_RA = 0.5
 
 # CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH minimum threshold for
 # disambiguated authors and persons: if less compatible than this the update
 # process will create a new person to associate to the found disambiguated author.
 CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH = 0.5
 
 # CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH_P_N is a fallback mechanism
 # to force a merge if a certain percentage of papers is compatible no matter
 # what the confidences on the automatically disambiguated author looks like
 CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH_P_N = 0.5
 
 # CFG_BIBAUTHORID_EXTERNAL_CLAIMED_RECORDS_KEY defines the user info
 # keys for externally claimed records in an remote-login scenario--e.g. from arXiv.org
 # e.g. "external_arxivids" for arXiv SSO
 CFG_BIBAUTHORID_EXTERNAL_CLAIMED_RECORDS_KEY =
 
 # CFG_BIBAUTHORID_ATTACH_VA_TO_MULTIPLE_RAS determines if the authorid
 # algorithm is allowed to attach a virtual author to multiple
 # real authors in the last run of the orphan processing.
 # Comma separated list of values.
 CFG_BIBAUTHORID_ATTACH_VA_TO_MULTIPLE_RAS = False
 
 # CFG_BIBAUTHORID_AID_ENABLED
 # Globally enable AuthorID Interfaces.
 #     If False: No guest, user or operator will have access to the system.
 CFG_BIBAUTHORID_ENABLED = True
 
 # CFG_BIBAUTHORID_AID_ON_AUTHORPAGES
 # Enable AuthorID information on the author pages.
 CFG_BIBAUTHORID_ON_AUTHORPAGES = True
 
 # CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL defines the eMail address
 # all ticket requests concerning authors will be sent to.
 CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL = info@invenio-software.org
 
 #CFG_BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE defines if the optional arXive stub page is skipped
 CFG_BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE = False
 
 ######################################
 ## Part 22: BibClassify parameters  ##
 ######################################
 
 # CFG_BIBCLASSIFY_WEB_MAXKW -- maximum number of keywords to display
 # in the Keywords tab web page.
 CFG_BIBCLASSIFY_WEB_MAXKW = 100
 
 ########################################
 ## Part 23: Plotextractor parameters  ##
 ########################################
 
 ## CFG_PLOTEXTRACTOR_SOURCE_BASE_URL -- for acquiring source tarballs for plot
 ## extraction, where should we look?  If nothing is set, we'll just go
 ## to arXiv, but this can be a filesystem location, too
 CFG_PLOTEXTRACTOR_SOURCE_BASE_URL = http://arxiv.org/
 
 ## CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER -- for acquiring source tarballs for plot
 ## extraction, subfolder where the tarballs sit
 CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER = e-print/
 
 ## CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER -- for acquiring source tarballs for plot
 ## extraction, subfolder where the pdf sit
 CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER = pdf/
 
 ## CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT -- a float representing the number of seconds
 ## to wait between each download of pdf and/or tarball from source URL.
 CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT = 2.0
 
 ## CFG_PLOTEXTRACTOR_CONTEXT_LIMIT -- when extracting context of plots from
 ## TeX sources, this is the limitation of characters in each direction to extract
 ## context from. Default 750.
 CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT = 750
 
 ## CFG_PLOTEXTRACTOR_DISALLOWED_TEX -- when extracting context of plots from TeX
 ## sources, this is the list of TeX tags that will trigger 'end of context'.
 CFG_PLOTEXTRACTOR_DISALLOWED_TEX = begin,end,section,includegraphics,caption,acknowledgements
 
 ## CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT -- when extracting context of plots from
 ## TeX sources, this is the limitation of words in each direction. Default 75.
 CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT = 75
 
 ## CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT -- when extracting context of plots from
 ## TeX sources, this is the limitation of sentences in each direction. Default 2.
 CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT = 2
 
+######################################
+## Part 24: WebStat parameters      ##
+######################################
+
+# CFG_WEBSTAT_BIBCIRCULATION_START_YEAR defines the start date of the BibCirculation
+# statistics. Value should have the format 'yyyy'. If empty, take all existing data.
+CFG_WEBSTAT_BIBCIRCULATION_START_YEAR =
+
+
 ##########################
 ##  THAT's ALL, FOLKS!  ##
 ##########################
diff --git a/modules/webstat/bin/webstat.in b/modules/webstat/bin/webstat.in
index 29a9f9ce5..2cb70ab36 100644
--- a/modules/webstat/bin/webstat.in
+++ b/modules/webstat/bin/webstat.in
@@ -1,223 +1,223 @@
 #!/bin/sh
 ## -*- mode: script; coding: utf-8; -*-
 ##
 ## This file is part of Invenio.
 ## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011 CERN.
 ##
 ## Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 ## fill config variables:
 VERSION='0.1'
 LISPIMAGEDIR=@prefix@/lib/lisp/invenio
 CONFIGDIR=@prefix@/etc/webstat
 CLISP=@CLISP@
 CMUCL=@CMUCL@
 SBCL=@SBCL@
 LOGDIR=@prefix@/var/log
 TMPDIR=@prefix@/var/tmp
 INVENIOLOGFILE=${LOGDIR}/invenio.err
 APACHELOGFILE=${LOGDIR}/apache.err
 #Search string with format: '^>> 2010-07-02'
-searchstringmonth="^\(>> \)\{0,1\}"`date +%Y-%m-`
+searchstringmonth="^\(>> \)*"`date +%Y-%m-`
 searchstringday="$searchstringmonth"`date +%d`
-numberoferrors=500
+numberoferrors=100
 
 ## usage helper function:
 usage () {
     echo "Usage:" $0 "[options] <httpd-log-file>"
     echo "General options:"
     echo "  -e, --error-log        Error log analyzer mode"
     echo "  -h, --help             Print this help."
     echo "  -V, --version          Print version information."
     echo "Error log analyzer mode options:"
     echo "  Invenio error log options:"
     echo "  -is,   --isplit            Create splitted files for each error (of today)"
     echo "  -ir,   --iranking          Print error ranking of the last 500 errors in this month."
     echo "  -il N, --ilast-errors=N    Print last N errors."
     echo "  -id N, --ierror-details=N  Print details of a specific error. N is the position "
     echo "                           of the error, starting from the end (1 is the last error)"
     echo "  Apache error log options:"
     echo "  -ar,   --aranking          Print error ranking."
     echo "Description: print interesting usage stats from the Apache log file."
     echo "Note: Please analyze only moderately-sized logs, e.g. for a day or a week."
 }
 
 errorLogMode(){
     ##INVENIO
     ## looking for splitted files?
     if [ "$2" = "-is" ] || [ "$2" = "--isplit" ]; then
 	invenioSplit
     fi
 
     ## looking for ranking info?
     if [ "$2" = "-ir" ] || [ "$2" = "--iranking" ]; then
 	invenioErrorRanking
     fi
 
     ## looking for last errors?
     if [ "$2" = "-il" ]; then
 	invenioLastErrors $3
     fi
 
     if [ "${2:0:14}" = "--ilast-errors=" ]; then
 	invenioLastErrors "${2:15}"
     fi
 
     ## looking for error details?
     if [ "$2" = "-id" ]; then
 	invenioErrorDetails $3
     fi
 
     if [ "${2:0:17}" = "--ierror-details=" ]; then
 	invenioErrorDetails "${2:17:27}"
     fi
 
     #APACHE
     ## looking for ranking info?
     if [ "$2" = "-ar" ] || [ "$2" = "--aranking" ]; then
 	apacheErrorRanking
     fi
 
     ## do we have enough arguments?
     if [ ! -n "$2" ]; then
 	echo "Error: Not enough arguments."
 	usage
 	exit 1
     fi
 }
 
 invenioSplit() {
     rm ${TMPDIR}/inverr*
     errors=`grep -c "$searchstringmonth" $INVENIOLOGFILE`
     count=$(($errors-$numberoferrors))
     if [[ $count -le 0 ]];then
         chars=$((`echo $errors | wc -c`-1))
         csplit -f ${TMPDIR}/inverr -n $chars $INVENIOLOGFILE /"$searchstringmonth"/ {$(($errors-1))}
     else
         chars=$((`echo $numberoferrors | wc -c`-1))
         csplit -f ${TMPDIR}/inverr -n $chars $INVENIOLOGFILE %"$searchstringmonth"% {"$count"} /"$searchstringmonth"/ {$(($numberoferrors-2))}
     fi
 }
 
 invenioErrorRanking () {
     head -1 -q ${TMPDIR}/inverr* | cut -d ' ' -f 5- | cut -d ':' -f 1 | sort | uniq -c -w 14 | sort -nr
 }
 
 invenioLastErrors() {
     head -1 -q ${TMPDIR}/inverr* | tail -n $1
 }
 
 invenioErrorDetails() {
     filename=`ls ${TMPDIR} | grep 'inverr' | tail -n $1 | head -1`
     cat ${TMPDIR}/$filename
 }
 
 apacheErrorRanking(){
     rm ${TMPDIR}/apacheerrors.err
     tail -n 700 $APACHELOGFILE | uniq -w 56 | cut -d ] -f 3- >> ${TMPDIR}/apacheerrors.err
     pythonerrors=`cut -d ] -f 3- ${TMPDIR}/apacheerrors.err | grep /usr/lib/python2.6/dist-packages/ | cut -d ' ' -f 3 | sed s/Warning:// | sort | uniq -c | sed s/'$'/'Warning'/`
     exceptionkeyerrors=`cut -d ] -f 3- ${TMPDIR}/apacheerrors.err | grep -v /usr/lib/python2.6/dist-packages/ | egrep -o 'Exception KeyError' | sort | uniq -c`
     errorsbyclient=`cut -d ] -f 1  ${TMPDIR}/apacheerrors.err | cut -d ' ' -f 3 | sort | egrep '([0-9]{1,3}\.){3}[0-9]{1,3}' | uniq -c | sort -n -r`
     apperrors=`grep 'Application error' ${TMPDIR}/apacheerrors.err | wc -l`
     filenotexisterrors=`grep 'File does not exist' ${TMPDIR}/apacheerrors.err | wc -l`
     #from here on it formats and displays all the errors
     echo "Python errors:"
     ws=$((7-${#apperrors}))
     while [ $ws -gt 0 ]
     do
         fapperrors=$fapperrors" "
 	ws=`expr $ws - 1`
     done
     fapperrors="$fapperrors$apperrors Application error"
     echo -e "$pythonerrors\n$fapperrors" | sort -r
     echo "Document errors:"
     ws=$((7-${#filenotexisterrors}))
     while [ $ws -gt 0 ]
     do
         ffilenotexisterrors=$ffilenotexisterrors" "
     ws=`expr $ws - 1`
     done
     ffilenotexisterrors="$ffilenotexisterrors$filenotexisterrors File does not exist"
     echo -e "$exceptionkeyerrors\n$ffilenotexisterrors" | sort -r
     echo "Errors by client:"
     echo "$errorsbyclient"
 }
 
 ## looking for version number?
 if [ "$1" = "-V" ] || [ "$1" = "--version" ]; then
     echo $VERSION
     exit 0
 fi
 
 ## looking for help?
 if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
     usage
     exit 0
 fi
 
 ## looking for error log analyzer mode?
 if [ "$1" = "-e" ] || [ "$1" = "--error-log" ]; then
     errorLogMode $*
     exit 0
 fi
 
 ## do we have enough arguments?
 if [ ! -n "$1" ]; then
     echo "Error: Not enough arguments."
     usage
     exit 1
 fi
 
 ## are input files okay?
 FILECFG=${CONFIGDIR}/webstat.cfg
 FILELOG=$1
 if [ ! -f $FILECFG ]; then
     echo "Error: config file ${FILECFG} not found."
     exit 1
 fi
 if [ ! -f $FILELOG ]; then
     echo "Error: httpd log file ${FILELOG} not found."
     exit 1
 fi
 
 ## check which Common Lisp implementation to use?
 if [ "$LISP" == "" ]; then
     LISP=cmucl
     if [ ! -s ${LISPIMAGEDIR}/webstat.$LISP.core ]; then
         LISP=sbcl
         if [ ! -s ${LISPIMAGEDIR}/webstat.$LISP.core ]; then
             LISP=clisp
             if [ ! -s ${LISPIMAGEDIR}/webstat.$LISP.mem ]; then
                 echo "Error: no suitable Lisp images found in ${LISPIMAGEDIR}."
                 exit 1
             fi
         fi
     fi
 fi
 
 ## okay, try to run the process:
 if [ "$LISP" == "cmucl" ]; then
     $CMUCL -core ${LISPIMAGEDIR}/webstat.$LISP.core -quiet -batch \
            -eval "(progn (analyze-httpd-log-file \"$FILECFG\" \"$FILELOG\")(quit))"
 elif [ "$LISP" == "sbcl" ]; then
     $SBCL --noinform --core ${LISPIMAGEDIR}/webstat.$LISP.core \
           --eval "(progn (analyze-httpd-log-file \"$FILECFG\" \"$FILELOG\")(quit))"
 elif [ "$LISP" == "clisp" ]; then
     $CLISP -q -M ${LISPIMAGEDIR}/webstat.$LISP.mem \
            -x "(progn (analyze-httpd-log-file \"$FILECFG\" \"$FILELOG\")(quit))"
 else
     echo "Error: $LISP not supported.  Please read README."
     exit 1
 fi
diff --git a/modules/webstat/lib/webstat.py b/modules/webstat/lib/webstat.py
index 0bd16c686..f05699b44 100644
--- a/modules/webstat/lib/webstat.py
+++ b/modules/webstat/lib/webstat.py
@@ -1,1695 +1,1814 @@
 ## This file is part of Invenio.
 ## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
 ##
 ## Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 __revision__ = "$Id$"
 __lastupdated__ = "$Date$"
 
 import os
 import time
 import re
 import datetime
 import cPickle
 import calendar
 from datetime import timedelta
 from urllib import quote
 
 from invenio import template
 from invenio.config import \
-     CFG_SITE_NAME, \
      CFG_WEBDIR, \
      CFG_TMPDIR, \
      CFG_SITE_URL, \
-     CFG_SITE_LANG
+     CFG_SITE_LANG, \
+     CFG_WEBSTAT_BIBCIRCULATION_START_YEAR
 from invenio.webstat_config import CFG_WEBSTAT_CONFIG_PATH
-from invenio.search_engine import get_alphabetically_ordered_collection_list, \
-    is_hosted_collection
+from invenio.search_engine import get_coll_i18nname, \
+    wash_index_term
 from invenio.dbquery import run_sql, wash_table_column_name
 from invenio.bibsched import is_task_scheduled, \
     get_task_ids_by_descending_date, \
     get_task_options
-from invenio.bibcirculation_utils import book_title_from_MARC
 
 # Imports handling key events and error log
 from invenio.webstat_engine import get_keyevent_trend_collection_population, \
+    get_keyevent_trend_new_records, \
     get_keyevent_trend_search_frequency, \
     get_keyevent_trend_search_type_distribution, \
     get_keyevent_trend_download_frequency, \
     get_keyevent_trend_comments_frequency, \
     get_keyevent_trend_number_of_loans, \
     get_keyevent_trend_web_submissions, \
     get_keyevent_snapshot_apache_processes, \
     get_keyevent_snapshot_bibsched_status, \
     get_keyevent_snapshot_uptime_cmd, \
     get_keyevent_snapshot_sessions, \
     get_keyevent_bibcirculation_report, \
     get_keyevent_loan_statistics, \
     get_keyevent_loan_lists, \
     get_keyevent_renewals_lists, \
     get_keyevent_returns_table, \
     get_keyevent_trend_returns_percentage, \
     get_keyevent_ill_requests_statistics, \
     get_keyevent_ill_requests_lists, \
     get_keyevent_trend_satisfied_ill_requests_percentage, \
     get_keyevent_items_statistics, \
     get_keyevent_items_lists, \
     get_keyevent_loan_request_statistics, \
     get_keyevent_loan_request_lists, \
     get_keyevent_user_statistics, \
     get_keyevent_user_lists, \
     _get_doctypes, \
     _get_item_statuses, \
     _get_item_doctype, \
     _get_request_statuses, \
     _get_libraries, \
     _get_loan_periods, \
     get_invenio_error_log_ranking, \
     get_invenio_last_n_errors, \
     update_error_log_analyzer, \
     get_apache_error_log_ranking
 
 # Imports handling custom events
 from invenio.webstat_engine import get_customevent_table, \
     get_customevent_trend, \
     get_customevent_dump
 
 # Imports handling custom report
 from invenio.webstat_engine import get_custom_summary_data, \
     _get_tag_name, \
     create_custom_summary_graph
 
 # Imports for handling outputting
 from invenio.webstat_engine import create_graph_trend, \
     create_graph_dump, \
     create_graph_table
 
 # Imports for handling exports
 from invenio.webstat_engine import export_to_python, \
     export_to_csv, \
-    export_to_excel
+    export_to_file
 
 TEMPLATES = template.load('webstat')
 
 # Constants
 WEBSTAT_CACHE_INTERVAL = 600 # Seconds, cache_* functions not affected by this.
                              # Also not taking into account if BibSched has
                              # webstatadmin process.
 WEBSTAT_RAWDATA_DIRECTORY = CFG_TMPDIR + "/"
 WEBSTAT_GRAPH_DIRECTORY = CFG_WEBDIR + "/img/"
 
 TYPE_REPOSITORY = [('gnuplot', 'Image - Gnuplot'),
                    ('asciiart', 'Image - ASCII art'),
                    ('flot', 'Image - Flot'),
                    ('asciidump', 'Image - ASCII dump'),
                    ('python', 'Data - Python code', export_to_python),
                    ('csv', 'Data - CSV', export_to_csv)]
 
 
 def get_collection_list_plus_all():
     """ Return all the collection names plus the name All"""
-    coll = get_alphabetically_ordered_collection_list()
-    hosted_colls = []
-    for collection in coll:
-        if is_hosted_collection(collection[0]):
-            hosted_colls.append(collection)
-    for hosted_coll in hosted_colls:
-        coll.remove(hosted_coll)
-    coll.append(['All', 'All'])
+    coll = [('All', 'All')]
+    res = run_sql("SELECT name FROM collection WHERE (dbquery IS NULL OR dbquery \
+NOT LIKE 'hostedcollection:%') ORDER BY name ASC")
+    for c_name in res:
+        # make a nice printable name (e.g. truncate c_printable for
+        # long collection names in given language):
+        c_printable_fullname = get_coll_i18nname(c_name[0], CFG_SITE_LANG, False)
+        c_printable = wash_index_term(c_printable_fullname, 30, False)
+        if c_printable != c_printable_fullname:
+            c_printable = c_printable + "..."
+        coll.append([c_name[0], c_printable])
     return coll
 
 # Key event repository, add an entry here to support new key measures.
 KEYEVENT_REPOSITORY = {'collection population':
                           {'fullname': 'Collection population',
                             'specificname':
                                    'Population in collection "%(collection)s"',
+                            'description':
+                                   ('The collection population is the number of \
+documents existing in the selected collection.', ),
                             'gatherer':
                                    get_keyevent_trend_collection_population,
                             'extraparams': {'collection': ('combobox', 'Collection',
                                    get_collection_list_plus_all)},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(collection)s_%(timespan)s',
                             'ylabel': 'Number of records',
                             'multiple': None,
                             'output': 'Graph'},
+                        'new records':
+                          {'fullname': 'New records',
+                            'specificname':
+                                   'New records in collection "%(collection)s"',
+                            'description':
+                                   ('The graph shows the new documents created in \
+the selected collection and time span.', ),
+                            'gatherer':
+                                   get_keyevent_trend_new_records,
+                            'extraparams': {'collection': ('combobox', 'Collection',
+                                   get_collection_list_plus_all)},
+                            'cachefilename':
+                                   'webstat_%(event_id)s_%(collection)s_%(timespan)s',
+                            'ylabel': 'Number of records',
+                            'multiple': None,
+                            'output': 'Graph'},
                         'search frequency':
                           {'fullname': 'Search frequency',
                             'specificname': 'Search frequency',
+                            'description':
+                                   ('The search frequency is the number of searches \
+performed in a specific time span.', ),
                             'gatherer': get_keyevent_trend_search_frequency,
                             'extraparams': {},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(timespan)s',
                             'ylabel': 'Number of searches',
                             'multiple': None,
                             'output': 'Graph'},
                         'search type distribution':
                           {'fullname': 'Search type distribution',
                             'specificname': 'Search type distribution',
+                            'description':
+                                   ('The search type distribution shows both the \
+number of simple searches and the number of advanced searches in the same graph.', ),
                             'gatherer':
                                    get_keyevent_trend_search_type_distribution,
                             'extraparams': {},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(timespan)s',
                             'ylabel': 'Number of searches',
                             'multiple': ['Simple searches',
                                          'Advanced searches'],
                             'output': 'Graph'},
                         'download frequency':
                           {'fullname': 'Download frequency',
                             'specificname': 'Download frequency in collection "%(collection)s"',
+                            'description':
+                                   ('The download frequency is the number of fulltext \
+downloads of the documents.', ),
                             'gatherer': get_keyevent_trend_download_frequency,
                             'extraparams': {'collection': ('combobox', 'Collection',
                                                     get_collection_list_plus_all)},
                             'cachefilename': 'webstat_%(event_id)s_%(collection)s_%(timespan)s',
                             'ylabel': 'Number of downloads',
                             'multiple': None,
                             'output': 'Graph'},
                          'comments frequency':
                           {'fullname': 'Comments frequency',
                             'specificname': 'Comments frequency in collection "%(collection)s"',
+                            'description':
+                                   ('The comments frequency is the amount of comments written \
+for all the documents.', ),
                             'gatherer': get_keyevent_trend_comments_frequency,
                             'extraparams': {'collection': ('combobox', 'Collection',
                                                     get_collection_list_plus_all)},
                             'cachefilename': 'webstat_%(event_id)s_%(collection)s_%(timespan)s',
                             'ylabel': 'Number of comments',
                             'multiple': None,
                             'output': 'Graph'},
                         'number of loans':
                           {'fullname': 'Number of loans',
                             'specificname': 'Number of loans',
+                            'description':
+                                   ('The number of loans shows the total number of records loaned \
+ over a time span', ),
                             'gatherer': get_keyevent_trend_number_of_loans,
                             'extraparams': {},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(timespan)s',
                             'ylabel': 'Number of loans',
                             'multiple': None,
-                            'output': 'Graph'},
+                            'output': 'Graph',
+                            'type': 'bibcirculation'},
                         'web submissions':
                           {'fullname': 'Number of web submissions',
                             'specificname':
                                    'Number of web submissions of "%(doctype)s"',
+                            'description':
+                                   ("The web submissions are the number of submitted \
+documents using the web form.", ),
                             'gatherer': get_keyevent_trend_web_submissions,
                             'extraparams': {
                                 'doctype': ('combobox', 'Type of document', _get_doctypes)},
                             'cachefilename':
                                 'webstat_%(event_id)s_%(doctype)s_%(timespan)s',
                             'ylabel': 'Web submissions',
                             'multiple': None,
                             'output': 'Graph'},
                         'loans statistics':
                           {'fullname': 'Loans statistics',
                             'specificname': 'Loans statistics',
+                            'description':
+                                   ('The loan statistics consist on different numbers \
+related to the records loaned. It is important to see the difference between document \
+and item. The item is the physical representation of a document (like every copy of a \
+book). There may be more items than documents, but never the opposite.', ),
                             'gatherer':
                                    get_keyevent_loan_statistics,
                             'extraparams': {
-                                'user_address': ('textbox', 'User address'),
                                 'udc': ('textbox', 'UDC'),
                                 'item_status': ('combobox', 'Item status', _get_item_statuses),
                                 'publication_date': ('textbox', 'Publication date'),
                                 'creation_date': ('textbox', 'Creation date')},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(user_address)s_' + \
-                                '%(udc)s_%(item_status)s_%(publication_date)s' + \
+                                   'webstat_%(event_id)s_%(udc)s_%(item_status)s_%(publication_date)s' + \
                                 '_%(creation_date)s_%(timespan)s',
                             'rows': ['Number of documents loaned',
-                                         'Number of items loaned on the total number of items',
+                                         'Number of items loaned on the total number of items (%)',
                                          'Number of items never loaned on the \
-                                         total number of items',
-                                         'Average time between the date of ' + \
-                                         'the record creation and the date of the first loan'],
-                            'output': 'Table'},
+                                         total number of items (%)',
+                                         'Average time between the date of \
+the record creation and the date of the first loan (in days)'],
+                            'output': 'Table',
+                            'type': 'bibcirculation'},
                          'loans lists':
                            {'fullname': 'Loans lists',
                             'specificname': 'Loans lists',
+                            'description':
+                                   ('The loan lists show the most loaned and the never loaned \
+records in a time span. The most loaned record are calculated as the number of loans by copy.', ),
                             'gatherer':
                                    get_keyevent_loan_lists,
                             'extraparams': {
                                 'udc': ('textbox', 'UDC'),
                                 'loan_period': ('combobox', 'Loan period', _get_loan_periods),
                                 'max_loans': ('textbox', 'Maximum number of loans'),
                                 'min_loans': ('textbox', 'Minimum number of loans'),
                                 'publication_date': ('textbox', 'Publication date'),
-                                'creation_date': ('textbox', 'Creation date'),
-                                'user_address': ('textbox', 'User address')},
+                                'creation_date': ('textbox', 'Creation date')},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(udc)s_%(loan_period)s' + \
                                  '_%(min_loans)s_%(max_loans)s_%(publication_date)s_' + \
-                                 '%(creation_date)s_%(user_address)s_%(timespan)s',
+                                 '%(creation_date)s_%(timespan)s',
                             'rows': [],
-                            'output': 'List'},
+                            'output': 'List',
+                            'type': 'bibcirculation'},
                           'renewals':
                            {'fullname': 'Renewals',
                             'specificname': 'Renewals',
+                            'description':
+                                   ('Here the list of most renewed items stored is shown \
+by decreasing order', ),
                             'gatherer':
                                    get_keyevent_renewals_lists,
                             'extraparams': {
-                                'udc': ('textbox', 'UDC'),
-                                'user_address': ('textbox', 'User address')},
+                                'udc': ('textbox', 'UDC')},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(udc)s_%(user_address)s_%(timespan)s',
+                                   'webstat_%(event_id)s_%(udc)s_%(timespan)s',
                             'rows': [],
-                            'output': 'List'},
+                            'output': 'List',
+                            'type': 'bibcirculation'},
                           'number returns':
                            {'fullname': 'Number of overdue returns',
                             'specificname': 'Number of overdue returns',
+                            'description':
+                                   ('The number of overdue returns is the number of loans \
+that has not been returned by the due date (they may have been returned after or never).', ),
                             'gatherer':
                                    get_keyevent_returns_table,
                             'extraparams': {},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(timespan)s',
                             'rows': ['Number of overdue returns'],
-                            'output': 'Table'},
+                            'output': 'Table',
+                            'type': 'bibcirculation'},
                           'percentage returns':
                            {'fullname': 'Percentage of overdue returns',
                             'specificname': 'Percentage of overdue returns',
+                            'description':
+                                   ('This graphs shows both the overdue returns and the total \
+of returns.', ),
                             'gatherer':
                                    get_keyevent_trend_returns_percentage,
                             'extraparams': {},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(timespan)s',
                             'ylabel': 'Percentage of overdue returns',
                             'multiple': ['Overdue returns',
                                          'Total returns'],
-                            'output': 'Graph'},
+                            'output': 'Graph',
+                            'type': 'bibcirculation'},
                         'ill requests statistics':
                           {'fullname': 'ILL Requests statistics',
                             'specificname': 'ILL Requests statistics',
+                            'description':
+                                   ('The ILL requests statistics are different numbers \
+related to the requests to other libraries.', ),
                             'gatherer':
                                    get_keyevent_ill_requests_statistics,
                             'extraparams': {
-                                'user_address': ('textbox', 'User address'),
                                 'doctype': ('combobox', 'Type of document', _get_item_doctype),
                                 'status': ('combobox', 'Status of request', _get_request_statuses),
                                 'supplier': ('combobox', 'Supplier', _get_libraries)},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(user_address)s_\
-                                   %(doctype)s_%(status)s_%(supplier)s_%(timespan)s',
+                                   'webstat_%(event_id)s_%(doctype)s_%(status)s_%(supplier)s_%(timespan)s',
                             'rows': ['Number of ILL requests',
-                                     'Number of satisfied ILL requests 3 months \
+                                     'Number of satisfied ILL requests 2 weeks \
                                      after the date of request creation',
-                                     'Average time between the date and the hour \
-                                     of the ill request date and the date and the \
-                                     hour of the delivery item to the user',
-                                     'Average time between the date and the hour \
+                                     'Average time between the day \
+                                     of the ILL request date and day \
+                                     of the delivery item to the user (in days)',
+                                     'Average time between the day \
                                      the ILL request was sent to the supplier and \
-                                     the date and hour of the delivery item'],
-                            'output': 'Table'},
+                                     the day of the delivery item (in days)'],
+                            'output': 'Table',
+                            'type': 'bibcirculation'},
                           'ill requests list':
                            {'fullname': 'ILL Requests list',
                             'specificname': 'ILL Requests list',
+                            'description':
+                                   ('The ILL requests list shows 50 requests to other \
+libraries on the selected time span.', ),
                             'gatherer':
                                    get_keyevent_ill_requests_lists,
                             'extraparams': {
                                 'doctype': ('combobox', 'Type of document', _get_item_doctype),
                                 'supplier': ('combobox', 'Supplier', _get_libraries)},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(doctype)s_%(supplier)s_%(timespan)s',
                             'rows': [],
-                            'output': 'List'},
+                            'output': 'List',
+                            'type': 'bibcirculation'},
                           'percentage satisfied ill requests':
                            {'fullname': 'Percentage of satisfied ILL requests',
                             'specificname': 'Percentage of satisfied ILL requests',
+                            'description':
+                                   ('This graph shows both the satisfied ILL requests and \
+the total number of requests in the selected time span.', ),
                             'gatherer':
                                    get_keyevent_trend_satisfied_ill_requests_percentage,
                             'extraparams': {
-                                'user_address': ('textbox', 'User address'),
                                 'doctype': ('combobox', 'Type of document', _get_item_doctype),
                                 'status': ('combobox', 'Status of request', _get_request_statuses),
                                 'supplier': ('combobox', 'Supplier', _get_libraries)},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(user_address)s_\
-                                   %(doctype)s_%(status)s_%(supplier)s_%(timespan)s',
+                                   'webstat_%(event_id)s_%(doctype)s_%(status)s_%(supplier)s_%(timespan)s',
                             'ylabel': 'Percentage of satisfied ILL requests',
                             'multiple': ['Satisfied ILL requests',
                                          'Total requests'],
-                            'output': 'Graph'},
+                            'output': 'Graph',
+                            'type': 'bibcirculation'},
                           'items stats':
                            {'fullname': 'Items statistics',
                             'specificname': 'Items statistics',
+                            'description':
+                                   ('The items statistics show the total number of items at \
+the moment and the number of new items in the selected time span.', ),
                             'gatherer':
                                    get_keyevent_items_statistics,
                             'extraparams': {
                                 'udc': ('textbox', 'UDC'),
                                 },
                             'cachefilename':
                                    'webstat_%(event_id)s_%(udc)s_%(timespan)s',
                             'rows': ['The total number of items', 'Total number of new items'],
-                            'output': 'Table'},
+                            'output': 'Table',
+                            'type': 'bibcirculation'},
                           'items list':
                            {'fullname': 'Items list',
                             'specificname': 'Items list',
+                            'description':
+                                   ('The item list shows data about the existing items.', ),
                             'gatherer':
                                    get_keyevent_items_lists,
                             'extraparams': {
                                 'library': ('combobox', 'Library', _get_libraries),
                                 'status': ('combobox', 'Status', _get_item_statuses)},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(library)s_%(status)s',
                             'rows': [],
-                            'output': 'List'},
+                            'output': 'List',
+                            'type': 'bibcirculation'},
                         'loan request statistics':
                           {'fullname': 'Hold requests statistics',
                             'specificname': 'Hold requests statistics',
+                            'description':
+                                   ('The hold requests statistics show numbers about the \
+requests for documents. For the numbers to be correct, there must be data in the loanrequest \
+custom event.', ),
                             'gatherer':
                                    get_keyevent_loan_request_statistics,
                             'extraparams': {
                                 'item_status': ('combobox', 'Item status', _get_item_statuses)},
                             'cachefilename':
                                    'webstat_%(event_id)s_%(item_status)s_%(timespan)s',
                             'rows': ['Number of hold requests, one week after the date of \
                                         request creation',
                                          'Number of successful hold requests transactions',
                                          'Average time between the hold request date and \
                                          the date of delivery document  in a year'],
-                            'output': 'Table'},
-                         'loans request lists':
+                            'output': 'Table',
+                            'type': 'bibcirculation'},
+                         'loan request lists':
                            {'fullname': 'Hold requests lists',
                             'specificname': 'Hold requests lists',
+                            'description':
+                                   ('The hold requests list shows the most requested items.', ),
                             'gatherer':
                                    get_keyevent_loan_request_lists,
                             'extraparams': {
-                                'udc': ('textbox', 'UDC'),
-                                'user_address': ('textbox', 'User address')},
+                                'udc': ('textbox', 'UDC')},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(udc)s_%(user_address)s_%(timespan)s',
+                                   'webstat_%(event_id)s_%(udc)s_%(timespan)s',
                             'rows': [],
-                            'output': 'List'},
+                            'output': 'List',
+                            'type': 'bibcirculation'},
                          'user statistics':
                            {'fullname': 'Users statistics',
                             'specificname': 'Users statistics',
+                            'description':
+                                   ('The user statistics show the number of active users \
+(at least one transaction) in the selected timespan.', ),
                             'gatherer':
                                    get_keyevent_user_statistics,
-                            'extraparams': {'user_address': ('textbox', 'User address')},
+                            'extraparams': {},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(user_address)s_%(timespan)s',
+                                   'webstat_%(event_id)s_%(timespan)s',
                             'rows': ['Number of active users'],
-                            'output': 'Table'},
+                            'output': 'Table',
+                            'type': 'bibcirculation'},
                          'user lists':
                            {'fullname': 'Users lists',
                             'specificname': 'Users lists',
+                            'description':
+                                   ('The user list shows the most intensive users \
+(ILL requests + Loans)', ),
                             'gatherer':
                                    get_keyevent_user_lists,
-                            'extraparams': {'user_address': ('textbox', 'User address')},
+                            'extraparams': {},
                             'cachefilename':
-                                   'webstat_%(event_id)s_%(user_address)s_%(timespan)s',
+                                   'webstat_%(event_id)s_%(timespan)s',
                             'rows': [],
-                            'output': 'List'}
+                            'output': 'List',
+                            'type': 'bibcirculation'}
 
                        }
 
 # CLI
 
 def create_customevent(event_id=None, name=None, cols=[]):
     """
     Creates a new custom event by setting up the necessary MySQL tables.
 
     @param event_id: Proposed human-readable id of the new event.
     @type event_id: str
 
     @param name: Optionally, a descriptive name.
     @type name: str
 
     @param cols: Optionally, the name of the additional columns.
     @type cols: [str]
 
     @return: A status message
     @type: str
     """
     if event_id is None:
         return "Please specify a human-readable ID for the event."
 
     # Only accept id and name with standard characters
     if not re.search("[^\w]", str(event_id) + str(name)) is None:
         return "Please note that both event id and event name needs to be " + \
                   "written without any non-standard characters."
 
     # Make sure the chosen id is not already taken
     if len(run_sql("SELECT NULL FROM staEVENT WHERE id = %s",
                    (event_id, ))) != 0:
         return "Event id [%s] already exists! Aborted." % event_id
 
     # Check if the cols are valid titles
     for argument in cols:
         if (argument == "creation_time") or (argument == "id"):
             return "Invalid column title: %s! Aborted." % argument
 
     # Insert a new row into the events table describing the new event
     sql_param = [event_id]
     if name is not None:
         sql_name = "%s"
         sql_param.append(name)
     else:
         sql_name = "NULL"
     if len(cols) != 0:
         sql_cols = "%s"
         sql_param.append(cPickle.dumps(cols))
     else:
         sql_cols = "NULL"
     run_sql("INSERT INTO staEVENT (id, name, cols) VALUES (%s, " + \
                 sql_name + ", " + sql_cols + ")", tuple(sql_param))
 
     tbl_name = get_customevent_table(event_id)
 
     # Create a table for the new event
     sql_query = ["CREATE TABLE %s (" % tbl_name]
     sql_query.append("id MEDIUMINT unsigned NOT NULL auto_increment,")
     sql_query.append("creation_time TIMESTAMP DEFAULT NOW(),")
     for argument in cols:
         arg = wash_table_column_name(argument)
         sql_query.append("`%s` MEDIUMTEXT NULL," % arg)
         sql_query.append("INDEX `%s` (`%s` (50))," % (arg, arg))
     sql_query.append("PRIMARY KEY (id))")
     sql_str = ' '.join(sql_query)
     run_sql(sql_str)
 
     # We're done! Print notice containing the name of the event.
     return ("Event table [%s] successfully created.\n" +
             "Please use event id [%s] when registering an event.") \
             % (tbl_name, event_id)
 
 
 def modify_customevent(event_id=None, name=None, cols=[]):
     """
     Modify a custom event. It can modify the columns definition
     or/and the descriptive name
 
     @param event_id: Human-readable id of the event.
     @type event_id: str
 
     @param name: Optionally, a descriptive name.
     @type name: str
 
     @param cols: Optionally, the name of the additional columns.
     @type cols: [str]
 
     @return: A status message
     @type: str
     """
     if event_id is None:
         return "Please specify a human-readable ID for the event."
 
     # Only accept name with standard characters
     if not re.search("[^\w]", str(name)) is None:
         return "Please note that event name needs to be written " + \
             "without any non-standard characters."
 
     # Check if the cols are valid titles
     for argument in cols:
         if (argument == "creation_time") or (argument == "id"):
             return "Invalid column title: %s! Aborted." % argument
 
     res = run_sql("SELECT CONCAT('staEVENT', number), cols " + \
                       "FROM staEVENT WHERE id = %s", (event_id, ))
     cols_orig = cPickle.loads(res[0][1])
 
     # add new cols
     cols_add = []
     for col in cols:
         if not col in cols_orig:
             cols_add.append(col)
 
     # del old cols
     cols_del = []
     for col in cols_orig:
         if not col in cols:
             cols_del.append(col)
 
     #modify event table
     if cols_del or cols_add:
         sql_query = ["ALTER TABLE %s " % res[0][0]]
         for col in cols_del:
             sql_query.append("DROP COLUMN `%s`" % col)
             sql_query.append(", ")
         for col in cols_add:
             sql_query.append("ADD COLUMN `%s` MEDIUMTEXT NULL, " % col)
             sql_query.append("ADD INDEX `%s` (`%s`(50))" % (col, col))
             sql_query.append(", ")
         sql_query[-1] = ";"
         run_sql("".join(sql_query))
 
     #modify event definition
     sql_query = ["UPDATE staEVENT SET"]
     sql_param = []
     if cols_del or cols_add:
         sql_query.append("cols = %s")
         sql_query.append(",")
         sql_param.append(cPickle.dumps(cols))
     if name:
         sql_query.append("name = %s")
         sql_query.append(",")
         sql_param.append(name)
     if sql_param:
         sql_query[-1] = "WHERE id = %s"
         sql_param.append(event_id)
         sql_str = ' '.join(sql_query)
         run_sql(sql_str, sql_param)
 
     # We're done! Print notice containing the name of the event.
     return ("Event table [%s] successfully modified." % (event_id, ))
 
 
 def destroy_customevent(event_id=None):
     """
     Removes an existing custom event by destroying the MySQL tables and
     the event data that might be around. Use with caution!
 
     @param event_id: Human-readable id of the event to be removed.
     @type event_id: str
 
     @return: A status message
     @type: str
     """
     if event_id is None:
         return "Please specify an existing event id."
 
     # Check if the specified id exists
     if len(run_sql("SELECT NULL FROM staEVENT WHERE id = %s",
                    (event_id, ))) == 0:
         return "Event id [%s] doesn't exist! Aborted." % event_id
     else:
         tbl_name = get_customevent_table(event_id)
         run_sql("DROP TABLE %s" % wash_table_column_name(tbl_name)) # kwalitee: disable=sql
         run_sql("DELETE FROM staEVENT WHERE id = %s", (event_id, ))
         return ("Event with id [%s] was successfully destroyed.\n" +
                 "Table [%s], with content, was destroyed.") \
                 % (event_id, tbl_name)
 
 
 def register_customevent(event_id, *arguments):
     """
     Registers a custom event. Will add to the database's event tables
     as created by create_customevent().
 
     This function constitutes the "function hook" that should be
     called throughout Invenio where one wants to register a
     custom event! Refer to the help section on the admin web page.
 
     @param event_id: Human-readable id of the event to be registered
     @type event_id: str
 
     @param *arguments: The rest of the parameters of the function call
     @type *arguments: [params]
     """
     res = run_sql("SELECT CONCAT('staEVENT', number),cols " + \
                       "FROM staEVENT WHERE id = %s", (event_id, ))
     if not res:
         return # the id does not exist
     tbl_name = res[0][0]
     if res[0][1]:
         col_titles = cPickle.loads(res[0][1])
     else:
         col_titles = []
     if len(col_titles) != len(arguments[0]):
         return # there is different number of arguments than cols
 
     # Make sql query
     if len(arguments[0]) != 0:
         sql_param = []
         sql_query = ["INSERT INTO %s (" % tbl_name]
         for title in col_titles:
             sql_query.append("`%s`" % title)
             sql_query.append(",")
         sql_query.pop() # del the last ','
         sql_query.append(") VALUES (")
         for argument in arguments[0]:
             sql_query.append("%s")
             sql_query.append(",")
             sql_param.append(argument)
         sql_query.pop() # del the last ','
         sql_query.append(")")
         sql_str = ''.join(sql_query)
         run_sql(sql_str, tuple(sql_param))
     else:
         run_sql("INSERT INTO %s () VALUES ()" % wash_table_column_name(tbl_name)) # kwalitee: disable=sql
 
 
 def cache_keyevent_trend(ids=[]):
     """
     Runs the rawdata gatherer for the specific key events.
     Intended to be run mainly but the BibSched daemon interface.
 
     For a specific id, all possible timespans' rawdata is gathered.
 
     @param ids: The key event ids that are subject to caching.
     @type ids: []
     """
     args = {}
-    timespans = _get_timespans()
 
     for event_id in ids:
         args['event_id'] = event_id
+        if 'type' in KEYEVENT_REPOSITORY[event_id] and \
+             KEYEVENT_REPOSITORY[event_id]['type'] == 'bibcirculation':
+            timespans = _get_timespans(bibcirculation_stat=True)[:-1]
+        else:
+            timespans = _get_timespans()[:-1]
         extraparams = KEYEVENT_REPOSITORY[event_id]['extraparams']
 
         # Construct all combinations of extraparams and store as
         # [{param name: arg value}] so as we can loop over them and just
         # pattern-replace the each dictionary against
         # the KEYEVENT_REPOSITORY['event_id']['cachefilename'].
         combos = [[]]
         for extra in [[(param, extra[0]) for extra in extraparams[param][1]()]
                   for param in extraparams]:
             combos = [i + [y] for y in extra for i in combos]
         combos = [dict(extra) for extra in combos]
 
         for i in range(len(timespans)):
             # Get timespans parameters
             args['timespan'] = timespans[i][0]
 
             args.update({'t_start': timespans[i][2], 't_end': timespans[i][3],
                           'granularity': timespans[i][4],
                           't_format': timespans[i][5],
                           'xtic_format': timespans[i][6]})
 
             for combo in combos:
                 args.update(combo)
 
                 # Create unique filename for this combination of parameters
                 filename = KEYEVENT_REPOSITORY[event_id]['cachefilename'] \
                             % dict([(param, re.subn("[^\w]", "_",
                                            args[param])[0]) for param in args])
 
                 # Create closure of gatherer function in case cache
                 # needs to be refreshed
                 gatherer = lambda: KEYEVENT_REPOSITORY[event_id] \
                     ['gatherer'](args)
 
                 # Get data file from cache, ALWAYS REFRESH DATA!
                 _get_file_using_cache(filename, gatherer, True).read()
 
     return True
 
 
 def cache_customevent_trend(ids=[]):
     """
     Runs the rawdata gatherer for the specific custom events.
     Intended to be run mainly but the BibSched daemon interface.
 
     For a specific id, all possible timespans' rawdata is gathered.
 
     @param ids: The custom event ids that are subject to caching.
     @type ids: []
     """
     args = {}
     timespans = _get_timespans()
 
     for event_id in ids:
         args['event_id'] = event_id
         args['cols'] = []
 
         for i in range(len(timespans)):
             # Get timespans parameters
             args['timespan'] = timespans[i][0]
             args.update({'t_start': timespans[i][2], 't_end': timespans[i][3],
                           'granularity': timespans[i][4],
                           't_format': timespans[i][5],
                           'xtic_format': timespans[i][6]})
 
             # Create unique filename for this combination of parameters
             filename = "webstat_customevent_%(event_id)s_%(timespan)s" \
                         % {'event_id': re.subn("[^\w]", "_", event_id)[0],
                         'timespan': re.subn("[^\w]", "_", args['timespan'])[0]}
 
             # Create closure of gatherer function in case cache
             # needs to be refreshed
             gatherer = lambda: get_customevent_trend(args)
 
             # Get data file from cache, ALWAYS REFRESH DATA!
             _get_file_using_cache(filename, gatherer, True).read()
 
     return True
 
 
 def basket_display():
     """
     Display basket statistics.
     """
     tbl_name = get_customevent_table("baskets")
     if not tbl_name:
         # custom event baskets not defined, so return empty output:
         return []
     try:
         res = run_sql("SELECT creation_time FROM %s ORDER BY creation_time" % wash_table_column_name(tbl_name)) # kwalitee: disable=sql
         days = (res[-1][0] - res[0][0]).days + 1
         public = run_sql("SELECT COUNT(*) FROM %s " % wash_table_column_name(tbl_name) + " WHERE action = 'display_public'")[0][0] # kwalitee: disable=sql
         users = run_sql("SELECT COUNT(DISTINCT user) FROM %s" % wash_table_column_name(tbl_name))[0][0] # kwalitee: disable=sql
         adds = run_sql("SELECT COUNT(*) FROM %s WHERE action = 'add'" % wash_table_column_name(tbl_name))[0][0] # kwalitee: disable=sql
         displays = run_sql("SELECT COUNT(*) FROM %s " % wash_table_column_name(tbl_name) + " WHERE action = 'display' OR action = 'display_public'")[0][0] # kwalitee: disable=sql
         hits = adds + displays
         average = hits / days
 
         res = [("Basket page hits", hits)]
         res.append(("   Average per day", average))
         res.append(("   Unique users", users))
         res.append(("   Additions", adds))
         res.append(("   Public", public))
     except IndexError:
         res = []
 
     return res
 
 
 def alert_display():
     """
     Display alert statistics.
     """
     tbl_name = get_customevent_table("alerts")
     if not tbl_name:
         # custom event alerts not defined, so return empty output:
         return []
     try:
         res = run_sql("SELECT creation_time FROM %s ORDER BY creation_time"
                       % tbl_name)
         days = (res[-1][0] - res[0][0]).days + 1
         res = run_sql("SELECT COUNT(DISTINCT user),COUNT(*) FROM %s" % wash_table_column_name(tbl_name)) # kwalitee: disable=sql
         users = res[0][0]
         hits = res[0][1]
         displays = run_sql("SELECT COUNT(*) FROM %s WHERE action = 'list'"
                            % tbl_name)[0][0]
         search = run_sql("SELECT COUNT(*) FROM %s WHERE action = 'display'"
                          % tbl_name)[0][0]
         average = hits / days
 
         res = [("Alerts page hits", hits)]
         res.append(("   Average per day", average))
         res.append(("   Unique users", users))
         res.append(("   Displays", displays))
         res.append(("   Searches history display", search))
     except IndexError:
         res = []
 
     return res
 
 
 def loan_display():
     """
     Display loan statistics.
     """
     try:
-        total = run_sql("SELECT COUNT(*) FROM crcLOAN")[0][0]
-        toppop = run_sql("SELECT id_bibrec, COUNT(DISTINCT id_crcBORROWER) \
-                      FROM crcLOAN GROUP BY id_bibrec \
-                      HAVING COUNT(DISTINCT id_crcBORROWER) = (SELECT MAX(n) from (\
-                      SELECT COUNT(DISTINCT id_crcBORROWER) n \
-                      FROM crcLOAN GROUP BY id_bibrec) aux)")
-        topreq = run_sql("SELECT id_bibrec, COUNT(DISTINCT id_crcBORROWER) " +
-                         "FROM crcLOANREQUEST GROUP BY id_bibrec " +
-                         "HAVING COUNT(DISTINCT id_crcBORROWER) = (SELECT MAX(n) from (" +
-                         "SELECT COUNT(DISTINCT id_crcBORROWER) n " +
-                         "FROM crcLOANREQUEST GROUP BY id_bibrec) aux)")
-        res = [("Loans", total)]
-        for record in toppop:
-            res.append(("   Most popular records",
-                        book_title_from_MARC(record[0]) + '(%d)' % record[1]))
-        for record in topreq:
-            res.append(("   Most requested records",
-                        book_title_from_MARC(record[0]) + '(%d)' % record[1]))
         loans, renewals, returns, illrequests, holdrequests = \
                 get_keyevent_bibcirculation_report()
-        res.append(("Yearly report", ''))
+        res = [("Yearly report", '')]
         res.append(("   Loans", loans))
         res.append(("   Renewals", renewals))
         res.append(("   Returns", returns))
         res.append(("   ILL requests", illrequests))
         res.append(("   Hold requests", holdrequests))
         return res
     except IndexError:
         return []
 
 
 def get_url_customevent(url_dest, event_id, *arguments):
     """
     Get an url for registers a custom event. Every time is load the
     url will register a customevent as register_customevent().
 
     @param event_id: Human-readable id of the event to be registered
     @type event_id: str
 
     @param *arguments: The rest of the parameters of the function call
                        the param "WEBSTAT_IP" will tell webstat that here
                        should be the IP who request the url
     @type *arguments: [params]
 
     @param url_dest: url to redirect after register the event
     @type url_dest: str
 
     @return: url for register event
     @type: str
     """
     return "%s/stats/customevent_register?event_id=%s&arg=%s&url=%s" % \
             (CFG_SITE_URL, event_id, ','.join(arguments[0]), quote(url_dest))
 
 # WEB
 
 def perform_request_index(ln=CFG_SITE_LANG):
     """
     Displays some informative text, the health box, and a the list of
     key/custom events.
     """
+    out = TEMPLATES.tmpl_welcome(ln=ln)
+
+    # Display the health box
+    out += TEMPLATES.tmpl_system_health_list(ln=ln)
+
+    # Produce a list of the key statistics
+    out += TEMPLATES.tmpl_keyevent_list(ln=ln)
+
+    # Display the custom statistics
+    out += TEMPLATES.tmpl_customevent_list(_get_customevents(), ln=ln)
+
+    # Display error log analyzer
+    out += TEMPLATES.tmpl_error_log_statistics_list(ln=ln)
+
+    # Display annual report
+    out += TEMPLATES.tmpl_custom_summary(ln=ln)
+
+    # Display test for collections
+    out += TEMPLATES.tmpl_collection_stats_main_list(ln=ln)
+
+    return out
+
+
+def perform_display_current_system_health(ln=CFG_SITE_LANG):
     from ConfigParser import ConfigParser
     conf = ConfigParser()
     conf.read(CFG_WEBSTAT_CONFIG_PATH)
-    out = TEMPLATES.tmpl_welcome(ln=ln)
 
     # Prepare the health base data
     health_indicators = []
     now = datetime.datetime.now()
     yesterday = (now - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
     today = now.strftime("%Y-%m-%d")
     tomorrow = (now + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
 
     # Append session information to the health box
     if conf.get("general", "visitors_box") == "True":
         sess = get_keyevent_snapshot_sessions()
         health_indicators.append(("Total active visitors", sum(sess)))
         health_indicators.append(("    Logged in", sess[1]))
         health_indicators.append(None)
 
     # Append searches information to the health box
     if conf.get("general", "search_box") == "True":
         args = {'t_start': today, 't_end': tomorrow,
                  'granularity': "day", 't_format': "%Y-%m-%d"}
         searches = get_keyevent_trend_search_type_distribution(args)
         health_indicators.append(("Searches since midnight",
                                   sum(searches[0][1])))
         health_indicators.append(("    Simple", searches[0][1][0]))
         health_indicators.append(("    Advanced", searches[0][1][1]))
         health_indicators.append(None)
 
     # Append new records information to the health box
     if conf.get("general", "record_box") == "True":
         args = {'collection': CFG_SITE_NAME, 't_start': today,
                  't_end': tomorrow, 'granularity': "day",
                  't_format': "%Y-%m-%d"}
         try:
             tot_records = get_keyevent_trend_collection_population(args)[0][1]
         except IndexError:
             tot_records = 0
         args = {'collection': CFG_SITE_NAME, 't_start': yesterday,
                  't_end': today, 'granularity': "day", 't_format': "%Y-%m-%d"}
         try:
             new_records = tot_records - \
                 get_keyevent_trend_collection_population(args)[0][1]
         except IndexError:
             new_records = 0
         health_indicators.append(("Total records", tot_records))
         health_indicators.append(("    New records since midnight",
                                   new_records))
         health_indicators.append(None)
 
     # Append status of BibSched queue to the health box
     if conf.get("general", "bibsched_box") == "True":
         bibsched = get_keyevent_snapshot_bibsched_status()
         health_indicators.append(("BibSched queue",
                                   sum([x[1] for x in bibsched])))
         for item in bibsched:
             health_indicators.append(("    " + item[0], str(item[1])))
         health_indicators.append(None)
 
     # Append basket stats to the health box
     if conf.get("general", "basket_box") == "True":
         health_indicators += basket_display()
         health_indicators.append(None)
 
     # Append alerts stats to the health box
     if conf.get("general", "alert_box") == "True":
         health_indicators += alert_display()
         health_indicators.append(None)
 
     # Append loans stats to the health box
     if conf.get("general", "loan_box") == "True":
         health_indicators += loan_display()
         health_indicators.append(None)
 
     # Append number of Apache processes to the health box
     if conf.get("general", "apache_box") == "True":
         health_indicators.append(("Apache processes",
                                   get_keyevent_snapshot_apache_processes()))
 
     # Append uptime and load average to the health box
     if conf.get("general", "uptime_box") == "True":
         health_indicators.append(("Uptime cmd",
                                   get_keyevent_snapshot_uptime_cmd()))
 
     # Display the health box
-    out += TEMPLATES.tmpl_system_health(health_indicators, ln=ln)
-
-    # Produce a list of the key statistics
-    out += TEMPLATES.tmpl_keyevent_list(ln=ln)
-
-    # Display the custom statistics
-    out += TEMPLATES.tmpl_customevent_list(_get_customevents(), ln=ln)
-
-    # Display error log analyzer
-    out += TEMPLATES.tmpl_error_log_statistics_list(ln=ln)
-
-    # Display annual report
-    out += TEMPLATES.tmpl_custom_summary(ln=ln)
-
-    # Display test for collections
-    out += TEMPLATES.tmpl_collection_stats_list(get_collection_list_plus_all(), ln=ln)
-
-    return out
+    return TEMPLATES.tmpl_system_health(health_indicators, ln=ln)
 
 
 def perform_display_keyevent(event_id=None, args={},
                              req=None, ln=CFG_SITE_LANG):
     """
     Display key events using a certain output type over the given time span.
 
     @param ids: The ids for the custom events that are to be displayed.
     @type ids: [str]
 
     @param args: { param name: argument value }
     @type args: { str: str }
 
     @param req: The Apache request object, necessary for export redirect.
     @type req:
     """
     # Get all the option lists:
     # { parameter name: [(argument internal name, argument full name)]}
     options = dict()
     order = []
     for param in KEYEVENT_REPOSITORY[event_id]['extraparams']:
         # Order of options
         order.append(param)
 
         if KEYEVENT_REPOSITORY[event_id]['extraparams'][param][0] == 'combobox':
             options[param] = ('combobox',
                      KEYEVENT_REPOSITORY[event_id]['extraparams'][param][1],
                       KEYEVENT_REPOSITORY[event_id]['extraparams'][param][2]())
         else:
             options[param] = (KEYEVENT_REPOSITORY[event_id]['extraparams'][param][0],
                      (KEYEVENT_REPOSITORY[event_id]['extraparams'][param][1]))
 
     # Build a dictionary for the selected parameters:
     # { parameter name: argument internal name }
     choosed = dict([(param, args[param]) for param in KEYEVENT_REPOSITORY
                     [event_id]['extraparams']])
     if KEYEVENT_REPOSITORY[event_id]['output'] == 'Graph':
         options['format'] = ('combobox', 'Output format', _get_formats())
         choosed['format'] = args['format']
         order += ['format']
     if event_id != 'items list':
-        options['timespan'] = ('combobox', 'Time span', _get_timespans())
+        if 'type' in KEYEVENT_REPOSITORY[event_id] and \
+            KEYEVENT_REPOSITORY[event_id]['type'] == 'bibcirculation':
+            options['timespan'] = ('combobox', 'Time span', _get_timespans(bibcirculation_stat=True))
+        else:
+            options['timespan'] = ('combobox', 'Time span', _get_timespans())
         choosed['timespan'] = args['timespan']
         order += ['timespan']
         choosed['s_date'] = args['s_date']
         choosed['f_date'] = args['f_date']
 
     # Send to template to prepare event customization FORM box
-    excel = KEYEVENT_REPOSITORY[event_id]['output'] == 'List'
-    out = TEMPLATES.tmpl_keyevent_box(options, order, choosed, ln=ln, excel=excel)
+    list = KEYEVENT_REPOSITORY[event_id]['output'] == 'List'
+    out = "\n".join(["<p>%s</p>" % parr for parr in KEYEVENT_REPOSITORY[event_id]['description']]) \
+            + TEMPLATES.tmpl_keyevent_box(options, order, choosed, ln=ln, list=list)
 
     # Arguments OK?
 
     # Check for existance. If nothing, only show FORM box from above.
     if len(choosed) == 0:
         return out
 
     # Make sure extraparams are valid, if any
     if KEYEVENT_REPOSITORY[event_id]['output'] == 'Graph' and \
             event_id != 'percentage satisfied ill requests':
         for param in choosed:
             if param in options and options[param] == 'combobox' and \
                     not choosed[param] in [x[0] for x in options[param][2]]:
                 return out + TEMPLATES.tmpl_error(
                 'Please specify a valid value for parameter "%s".'
                                                % options[param][0], ln=ln)
 
     # Arguments OK beyond this point!
 
     # Get unique name for caching purposes (make sure that the params used
     # in the filename are safe!)
     filename = KEYEVENT_REPOSITORY[event_id]['cachefilename'] \
                % dict([(param, re.subn("[^\w]", "_", choosed[param])[0])
                        for param in choosed] +
                       [('event_id', re.subn("[^\w]", "_", event_id)[0])])
 
     # Get time parameters from repository
     if 'timespan' in choosed:
         if choosed['timespan'] == "select date":
             t_args = _get_time_parameters_select_date(args["s_date"], args["f_date"])
         else:
             t_args = _get_time_parameters(options, choosed['timespan'])
     else:
         t_args = args
     for param in KEYEVENT_REPOSITORY[event_id]['extraparams']:
         t_args[param] = choosed[param]
 
+    if 'format' in args and args['format'] == 'Full list':
+        gatherer = lambda: KEYEVENT_REPOSITORY[event_id]['gatherer'](t_args, limit=-1)
+        export_to_file(gatherer(), req)
+        return out
+
     # Create closure of frequency function in case cache needs to be refreshed
-    gatherer = lambda: KEYEVENT_REPOSITORY[event_id]['gatherer'](t_args)
+    gatherer = lambda return_sql: KEYEVENT_REPOSITORY[event_id]['gatherer'](t_args, return_sql=return_sql)
 
     # Determine if this particular file is due for scheduling cacheing,
     # in that case we must not allow refreshing of the rawdata.
     allow_refresh = not _is_scheduled_for_cacheing(event_id)
 
     # Get data file from cache (refresh if necessary)
     force = 'timespan' in choosed and choosed['timespan'] == "select date"
     data = eval(_get_file_using_cache(filename, gatherer, force,
                                       allow_refresh=allow_refresh).read())
 
     if KEYEVENT_REPOSITORY[event_id]['output'] == 'Graph':
         # If type indicates an export, run the export function and we're done
         if _is_type_export(choosed['format']):
             _get_export_closure(choosed['format'])(data, req)
             return out
         # Prepare the graph settings that are being passed on to grapher
         settings = {"title": KEYEVENT_REPOSITORY[event_id]['specificname']\
                      % choosed,
                   "xlabel": t_args['t_fullname'] + ' (' + \
                      t_args['granularity'] + ')',
                   "ylabel": KEYEVENT_REPOSITORY[event_id]['ylabel'],
                   "xtic_format": t_args['xtic_format'],
                   "format": choosed['format'],
                   "multiple": KEYEVENT_REPOSITORY[event_id]['multiple']}
     else:
-        if 'format' in args and args['format'] == 'Excel':
-            export_to_excel(data, req)
-            return out
         settings = {"title": KEYEVENT_REPOSITORY[event_id]['specificname']\
                      % choosed, "format": 'Table',
                      "rows": KEYEVENT_REPOSITORY[event_id]['rows']}
+    if args['sql']:
+        sql = gatherer(True)
+    else:
+        sql = ''
     return out + _perform_display_event(data,
-                                 os.path.basename(filename), settings, ln=ln)
+                        os.path.basename(filename), settings, ln=ln) + sql
 
 
 def perform_display_customevent(ids=[], args={}, req=None, ln=CFG_SITE_LANG):
     """
     Display custom events using a certain output type over the given time span.
 
     @param ids: The ids for the custom events that are to be displayed.
     @type ids: [str]
 
     @param args: { param name: argument value }
     @type args: { str: str }
 
     @param req: The Apache request object, necessary for export redirect.
     @type req:
     """
     # Get all the option lists:
     # { parameter name: [(argument internal name, argument full name)]}
     cols_dict = _get_customevent_cols()
     cols_dict['__header'] = 'Argument'
     cols_dict['__none'] = []
     options = {'ids': ('Custom event', _get_customevents()),
                 'timespan': ('Time span', _get_timespans()),
                 'format': ('Output format', _get_formats(True)),
                 'cols': cols_dict}
 
     # Build a dictionary for the selected parameters:
     # { parameter name: argument internal name }
     choosed = {'ids': args['ids'], 'timespan': args['timespan'],
                 'format': args['format'], 's_date': args['s_date'],
                 'f_date': args['f_date']}
     # Calculate cols
     index = []
     for key in args.keys():
         if key[:4] == 'cols':
             index.append(key[4:])
     index.sort()
     choosed['cols'] = [zip([""] + args['bool' + i], args['cols' + i],
                             args['col_value' + i]) for i in index]
     # Send to template to prepare event customization FORM box
     out = TEMPLATES.tmpl_customevent_box(options, choosed, ln=ln)
 
     # Arguments OK?
 
     # Make sure extraparams are valid, if any
     for param in ['ids', 'timespan', 'format']:
         legalvalues = [x[0] for x in options[param][1]]
 
         if type(args[param]) is list:
             # If the argument is a list, like the content of 'ids'
             # every value has to be checked
             if len(args[param]) == 0:
                 return out + TEMPLATES.tmpl_error(
                     'Please specify a valid value for parameter "%s".'
                     % options[param][0], ln=ln)
             for arg in args[param]:
                 if not arg in legalvalues:
                     return out + TEMPLATES.tmpl_error(
                         'Please specify a valid value for parameter "%s".'
                         % options[param][0], ln=ln)
         else:
             if not args[param] in legalvalues:
                 return out + TEMPLATES.tmpl_error(
                     'Please specify a valid value for parameter "%s".'
                         % options[param][0], ln=ln)
 
     # Fetch time parameters from repository
     if choosed['timespan'] == "select date":
         args_req = _get_time_parameters_select_date(args["s_date"],
                                                     args["f_date"])
     else:
         args_req = _get_time_parameters(options, choosed['timespan'])
 
     # ASCII dump data is different from the standard formats
     if choosed['format'] == 'asciidump':
         data = perform_display_customevent_data_ascii_dump(ids, args,
                                                            args_req, choosed)
     else:
         data = perform_display_customevent_data(ids, args_req, choosed)
 
     # If type indicates an export, run the export function and we're done
     if _is_type_export(args['format']):
         _get_export_closure(args['format'])(data, req)
         return out
 
     # Get full names, for those that have them
     names = []
     events = _get_customevents()
     for event_id in ids:
         temp = events[[x[0] for x in events].index(event_id)]
         if temp[1] != None:
             names.append(temp[1])
         else:
             names.append(temp[0])
 
     # Generate a filename for the graph
     filename = "tmp_webstat_customevent_" + ''.join([re.subn("[^\w]", "",
                                        event_id)[0] for event_id in ids]) + "_"
     if choosed['timespan'] == "select date":
         filename += args_req['t_start'] + "_" + args_req['t_end']
     else:
         filename += choosed['timespan']
     settings = {"title": 'Custom event',
                  "xlabel": args_req['t_fullname'] + ' (' + \
                      args_req['granularity'] + ')',
                  "ylabel": "Action quantity",
                  "xtic_format": args_req['xtic_format'],
                  "format": choosed['format'],
                  "multiple": (type(ids) is list) and names or []}
 
     return out + _perform_display_event(data, os.path.basename(filename),
                                         settings, ln=ln)
 
 
 def perform_display_customevent_data(ids, args_req, choosed):
     """Returns the trend data"""
     data_unmerged = []
     for event_id, i in [(ids[i], str(i)) for i in range(len(ids))]:
         # Calculate cols
         args_req['cols'] = choosed['cols'][int(i)]
 
         # Get unique name for the rawdata file (wash arguments!)
         filename = "webstat_customevent_" + re.subn("[^\w]", "", event_id + \
                    "_" + choosed['timespan'] + "_" + '-'.join([':'.join(col)
                                             for col in args_req['cols']]))[0]
 
         # Add the current id to the gatherer's arguments
         args_req['event_id'] = event_id
 
         # Prepare raw data gatherer, if cache needs refreshing.
-        gatherer = lambda: get_customevent_trend(args_req)
+        gatherer = lambda x: get_customevent_trend(args_req)
 
         # Determine if this particular file is due for scheduling cacheing,
         # in that case we must not allow refreshing of the rawdata.
         allow_refresh = not _is_scheduled_for_cacheing(event_id)
 
         # Get file from cache, and evaluate it to trend data
         force = choosed['timespan'] == "select date"
         data_unmerged.append(eval(_get_file_using_cache(filename, gatherer,
                              force, allow_refresh=allow_refresh).read()))
 
     # Merge data from the unmerged trends into the final destination
     return [(x[0][0], tuple([y[1] for y in x])) for x in zip(*data_unmerged)]
 
 
 def perform_display_customevent_data_ascii_dump(ids, args, args_req, choosed):
     """Returns the trend data"""
     for i in [str(j) for j in range(len(ids))]:
         args['bool' + i].insert(0, "")
         args_req['cols' + i] = zip(args['bool' + i], args['cols' + i],
                                  args['col_value' + i])
     filename = "webstat_customevent_" + re.subn("[^\w]", "", ''.join(ids) +
                 "_" + choosed['timespan'] + "_" + '-'.join([':'.join(col) for
                 col in [args['cols' + str(i)] for i in range(len(ids))]]) +
                                                 "_asciidump")[0]
     args_req['ids'] = ids
     gatherer = lambda: get_customevent_dump(args_req)
     force = choosed['timespan'] == "select date"
     return eval(_get_file_using_cache(filename, gatherer, force).read())
 
 
-def perform_display_stats_per_coll(collection='All', req=None, ln=CFG_SITE_LANG):
+def perform_display_coll_list(req=None, ln=CFG_SITE_LANG):
+    """
+    Display list of collections
+
+    @param req: The Apache request object, necessary for export redirect.
+    @type req:
+    """
+    return TEMPLATES.tmpl_collection_stats_complete_list(get_collection_list_plus_all())
+
+
+def perform_display_stats_per_coll(args={}, req=None, ln=CFG_SITE_LANG):
     """
     Display general statistics for a given collection
 
+    @param args: { param name: argument value }
+    @type args: { str: str }
+
     @param req: The Apache request object, necessary for export redirect.
     @type req:
     """
-    timespan = 'this month'
-    events_id = ('download frequency', 'collection population', 'comments frequency')
-    gformat = 'gnuplot'
+    events_id = ('collection population', 'download frequency', 'comments frequency')
+    # Get all the option lists:
     # Make sure extraparams are valid, if any
-    if not collection in [x[0] for x in get_collection_list_plus_all()]:
+    if not args['collection'] in [x[0] for x in get_collection_list_plus_all()]:
         return TEMPLATES.tmpl_error('Please specify a valid value for parameter "Collection".')
 
+    # { parameter name: [(argument internal name, argument full name)]}
+    options = {'collection': ('combobox', 'Collection', get_collection_list_plus_all()),
+               'timespan': ('combobox', 'Time span', _get_timespans()),
+               'format': ('combobox', 'Output format', _get_formats())}
+    order = options.keys()
+
     # Arguments OK beyond this point!
 
     # Get unique name for caching purposes (make sure that the params
     # used in the filename are safe!)
-    choosed = {'timespan': timespan, 'collection': collection}
-    out = "<table>"
+    out = TEMPLATES.tmpl_keyevent_box(options, order, args, ln=ln)
+    out += "<table>"
     pair = False
     for event_id in events_id:
+        # Get unique name for caching purposes (make sure that the params used
+        # in the filename are safe!)
         filename = KEYEVENT_REPOSITORY[event_id]['cachefilename'] \
-               % dict([(param, re.subn("[^\w]", "_", choosed[param])[0])
-                       for param in choosed] +
+               % dict([(param, re.subn("[^\w]", "_", args[param])[0])
+                       for param in args] +
                       [('event_id', re.subn("[^\w]", "_", event_id)[0])])
 
         # Get time parameters from repository
-        _, t_fullname, t_start, t_end, granularity, t_format, xtic_format = \
-            _get_timespans()[3]
-        args = {'t_start': t_start, 't_end': t_end, 'granularity': granularity,
-                't_format': t_format, 'xtic_format': xtic_format, 'collection': collection}
+        if args['timespan'] == "select date":
+            t_args = _get_time_parameters_select_date(args["s_date"], args["f_date"])
+        else:
+            t_args = _get_time_parameters(options, args['timespan'])
+        for param in KEYEVENT_REPOSITORY[event_id]['extraparams']:
+            t_args[param] = args[param]
         # Create closure of frequency function in case cache needs to be refreshed
-        gatherer = lambda: KEYEVENT_REPOSITORY[event_id]['gatherer'](args)
+        gatherer = lambda return_sql: KEYEVENT_REPOSITORY[event_id]['gatherer'](t_args, return_sql=return_sql)
 
         # Determine if this particular file is due for scheduling cacheing,
         # in that case we must not allow refreshing of the rawdata.
         allow_refresh = not _is_scheduled_for_cacheing(event_id)
 
         # Get data file from cache (refresh if necessary)
         data = eval(_get_file_using_cache(filename, gatherer, allow_refresh=allow_refresh).read())
 
         # Prepare the graph settings that are being passed on to grapher
-        settings = {"title": KEYEVENT_REPOSITORY[event_id]['specificname'] % args,
-                  "xlabel": t_fullname + ' (' + granularity + ')',
+        settings = {"title": KEYEVENT_REPOSITORY[event_id]['specificname'] % t_args,
+                  "xlabel":  t_args['t_fullname'] + ' (' + \
+                     t_args['granularity'] + ')',
                   "ylabel": KEYEVENT_REPOSITORY[event_id]['ylabel'],
-                  "xtic_format": xtic_format,
-                  "format": gformat,
+                  "xtic_format": t_args['xtic_format'],
+                  "format": args['format'],
                   "multiple": KEYEVENT_REPOSITORY[event_id]['multiple'],
                   "size": '360,270'}
         if not pair:
             out += '<tr>'
         out += '<td>%s</td>' % _perform_display_event(data,
                                     os.path.basename(filename), settings, ln=ln)
         if pair:
             out += '</tr>'
         pair = not pair
     return out + "</table>"
 
 
 def perform_display_customevent_help(ln=CFG_SITE_LANG):
     """Display the custom event help"""
     return TEMPLATES.tmpl_customevent_help(ln=ln)
 
 
 def perform_display_error_log_analyzer(ln=CFG_SITE_LANG):
     """Display the error log analyzer"""
     update_error_log_analyzer()
     return TEMPLATES.tmpl_error_log_analyzer(get_invenio_error_log_ranking(),
                                              get_invenio_last_n_errors(5),
                                              get_apache_error_log_ranking())
 
 
 def perform_display_custom_summary(args, ln=CFG_SITE_LANG):
     """Display the custom summary (annual report)
 
     @param args: { param name: argument value } (chart title, search query and output tag)
     @type args: { str: str }
     """
     if args['tag'] == '':
-        args['tag'] = "909C4p"
+        args['tag'] = "773__p"
     data = get_custom_summary_data(args['query'], args['tag'])
     tag_name = _get_tag_name(args['tag'])
     if tag_name == '':
         tag_name = args['tag']
     path = WEBSTAT_GRAPH_DIRECTORY + os.path.basename("tmp_webstat_custom_summary_"
                                                 + args['query'] + args['tag'])
     create_custom_summary_graph(data[:-1], path, args['title'])
     return TEMPLATES.tmpl_display_custom_summary(tag_name, data, args['title'],
                                     args['query'], args['tag'], path, ln=ln)
 
 # INTERNALS
 
 def _perform_display_event(data, name, settings, ln=CFG_SITE_LANG):
     """
     Retrieves a graph or a table.
 
     @param data: The trend/dump data
     @type data: [(str, str|int|(str|int,...))] | [(str|int,...)]
 
     @param name: The name of the trend (to be used as basename of graph file)
     @type name: str
 
     @param settings: Dictionary of graph parameters
     @type settings: dict
 
     @return: The URL of the graph (ASCII or image)
     @type: str
     """
     path = WEBSTAT_GRAPH_DIRECTORY + "tmp_" + name
 
     # Generate, and insert using the appropriate template
     if settings["format"] == "asciidump":
         path += "_asciidump"
         create_graph_dump(data, path)
         return TEMPLATES.tmpl_display_event_trend_ascii(settings["title"],
                                                         path, ln=ln)
 
     if settings["format"] == "Table":
         create_graph_table(data, path, settings)
         return TEMPLATES.tmpl_display_event_trend_text(settings["title"], path, ln=ln)
 
     create_graph_trend(data, path, settings)
     if settings["format"] == "asciiart":
         return TEMPLATES.tmpl_display_event_trend_ascii(
             settings["title"], path, ln=ln)
     else:
         if settings["format"] == "gnuplot":
             try:
                 import Gnuplot
             except ImportError:
                 return 'Gnuplot is not installed. Returning ASCII art.' + \
                        TEMPLATES.tmpl_display_event_trend_ascii(
                     settings["title"], path, ln=ln)
 
             return TEMPLATES.tmpl_display_event_trend_image(
                 settings["title"], path, ln=ln)
         elif settings["format"] == "flot":
             return TEMPLATES.tmpl_display_event_trend_text(
                 settings["title"], path, ln=ln)
         return TEMPLATES.tmpl_display_event_trend_ascii(
             settings["title"], path, ln=ln)
 
 
 def _get_customevents():
     """
     Retrieves registered custom events from the database.
 
     @return: [(internal name, readable name)]
     @type: [(str, str)]
     """
     return [(x[0], x[1]) for x in run_sql("SELECT id, name FROM staEVENT")]
 
 
-def _get_timespans(dttime=None):
+def _get_timespans(dttime=None, bibcirculation_stat=False):
     """
     Helper function that generates possible time spans to be put in the
     drop-down in the generation box. Computes possible years, and also some
     pre-defined simpler values. Some items in the list returned also tweaks the
     output graph, if any, since such values are closely related to the nature
     of the time span.
 
     @param dttime: A datetime object indicating the current date and time
     @type dttime: datetime.datetime
 
     @return: [(Internal name, Readable name, t_start, t_end, granularity, format, xtic_format)]
     @type [(str, str, str, str, str, str, str)]
     """
     if dttime is None:
         dttime = datetime.datetime.now()
 
     dtformat = "%Y-%m-%d"
     # Helper function to return a timediff object reflecting a diff of x days
     d_diff = lambda x: datetime.timedelta(days=x)
     # Helper function to return the number of days in the month x months ago
     d_in_m = lambda x: calendar.monthrange(
         ((dttime.month - x < 1) and dttime.year - 1 or dttime.year),
                                            (((dttime.month - 1) - x) % 12 + 1))[1]
     to_str = lambda x: x.strftime(dtformat)
     dt_str = to_str(dttime)
 
     spans = [("today", "Today",
               dt_str,
               to_str(dttime + d_diff(1)),
               "hour", dtformat, "%H"),
              ("this week", "This week",
               to_str(dttime - d_diff(dttime.weekday())),
               to_str(dttime + d_diff(1)),
               "day", dtformat, "%a"),
              ("last week", "Last week",
               to_str(dttime - d_diff(dttime.weekday() + 7)),
               to_str(dttime - d_diff(dttime.weekday())),
               "day", dtformat, "%a"),
              ("this month", "This month",
               to_str(dttime - d_diff(dttime.day) + d_diff(1)),
               to_str(dttime + d_diff(1)),
               "day", dtformat, "%d"),
              ("last month", "Last month",
               to_str(dttime - d_diff(d_in_m(1)) - d_diff(dttime.day) + d_diff(1)),
               to_str(dttime - d_diff(dttime.day) + d_diff(1)),
               "day", dtformat, "%d"),
              ("last three months", "Last three months",
               to_str(dttime - d_diff(d_in_m(1)) - d_diff(d_in_m(2)) -
                      d_diff(dttime.day) + d_diff(1)),
               dt_str,
               "month", dtformat, "%b"),
              ("last year", "Last year",
               to_str((dttime - datetime.timedelta(days=365)).replace(day=1)),
               to_str((dttime + datetime.timedelta(days=31)).replace(day=1)),
               "month", dtformat, "%b")]
 
-    # Get first year as indicated by the content's in bibrec
+    # Get first year as indicated by the content's in bibrec or
+    # CFG_WEBSTAT_BIBCIRCULATION_START_YEAR
     try:
-        year1 = run_sql("SELECT creation_date FROM bibrec ORDER BY \
-                creation_date LIMIT 1")[0][0].year
-    except IndexError:
+        if bibcirculation_stat and CFG_WEBSTAT_BIBCIRCULATION_START_YEAR:
+            year1 = int(CFG_WEBSTAT_BIBCIRCULATION_START_YEAR)
+        else:
+            year1 = run_sql("SELECT creation_date FROM bibrec ORDER BY \
+                    creation_date LIMIT 1")[0][0].year
+    except:
         year1 = dttime.year
 
     year2 = time.localtime()[0]
     diff_year = year2 - year1
     if diff_year >= 2:
         spans.append(("last 2 years", "Last 2 years",
                       to_str((dttime - datetime.timedelta(days=365 * 2)).replace(day=1)),
                       to_str((dttime + datetime.timedelta(days=31)).replace(day=1)),
                       "month", dtformat, "%b"))
     if diff_year >= 5:
         spans.append(("last 5 years", "Last 5 years",
                       to_str((dttime - datetime.timedelta(days=365 * 5)).replace(day=1)),
                       to_str((dttime + datetime.timedelta(days=31)).replace(day=1)),
                       "year", dtformat, "%Y"))
     if diff_year >= 10:
         spans.append(("last 10 years", "Last 10 years",
                       to_str((dttime - datetime.timedelta(days=365 * 10)).replace(day=1)),
                       to_str((dttime + datetime.timedelta(days=31)).replace(day=1)),
                       "year", dtformat, "%Y"))
     spans.append(("full history", "Full history", str(year1), str(year2 + 1),
                   "year", "%Y", "%Y"))
     spans.extend([(str(x), str(x), str(x), str(x + 1), "month", "%Y", "%b")
                   for x in range(year2, year1 - 1, -1)])
 
     spans.append(("select date", "Select date...", "", "",
                   "hour", dtformat, "%H"))
 
     return spans
 
 
 def _get_time_parameters(options, timespan):
     """
     Returns the time parameters from the repository when it is a default timespan
     @param options: A dictionary with the option lists
     @type options: { parameter name: [(argument internal name, argument full name)]}
 
     @param timespan: name of the chosen timespan
     @type timespan: str
 
     @return: [(Full name, t_start, t_end, granularity, format, xtic_format)]
     @type [(str, str, str, str, str, str, str)]
     """
     if len(options['timespan']) == 2:
         i = 1
     else:
         i = 2
     _, t_fullname, t_start, t_end, granularity, t_format, xtic_format = \
             options['timespan'][i][[x[0]
                           for x in options['timespan'][i]].index(timespan)]
     return {'t_fullname': t_fullname, 't_start': t_start, 't_end': t_end,
             'granularity': granularity, 't_format': t_format,
             'xtic_format': xtic_format}
 
 
 def _get_time_parameters_select_date(s_date, f_date):
     """
     Returns the time parameters from the repository when it is a custom timespan
     @param s_date: start date for the graph
     @type s_date: str %m/%d/%Y %H:%M
 
     @param f_date: finish date for the graph
     @type f_date: str %m/%d/%Y %H:%M
 
     @return: [(Full name, t_start, t_end, granularity, format, xtic_format)]
     @type [(str, str, str, str, str, str, str)]
     """
 
     t_fullname = "%s-%s" % (s_date, f_date)
-    dt_start = datetime.datetime.strptime(s_date, "%m/%d/%Y %H:%M")
-    dt_end = datetime.datetime.strptime(f_date, "%m/%d/%Y %H:%M")
+    dt_start = datetime.datetime(*(time.strptime(s_date, "%m/%d/%Y %H:%M")[0:6]))
+    dt_end = datetime.datetime(*(time.strptime(f_date, "%m/%d/%Y %H:%M")[0:6]))
     if dt_end - dt_start <= timedelta(hours=1):
         xtic_format = "%m:%s"
         granularity = 'second'
     elif dt_end - dt_start <= timedelta(days=3):
         xtic_format = "%H:%m"
         granularity = 'minute'
     elif dt_end - dt_start <= timedelta(days=7):
         xtic_format = "%H"
         granularity = 'hour'
     elif dt_end - dt_start <= timedelta(days=60):
         xtic_format = "%a"
         granularity = 'day'
     elif dt_end - dt_start <= timedelta(days=730):
         xtic_format = "%d"
         granularity = 'month'
     else:
         xtic_format = "%H"
         granularity = 'hour'
     t_format = "%Y-%m-%d %H:%M:%S"
     t_start = dt_start.strftime("%Y-%m-%d %H:%M:%S")
     t_end = dt_end.strftime("%Y-%m-%d %H:%M:%S")
     return {'t_fullname': t_fullname, 't_start': t_start, 't_end': t_end,
             'granularity': granularity, 't_format': t_format,
             'xtic_format': xtic_format}
 
 
 def _get_formats(with_dump=False):
     """
     Helper function to retrieve a Invenio friendly list of all possible
     output types (displaying and exporting) from the central repository as
     stored in the variable self.types at the top of this module.
 
     @param with_dump: Optionally displays the custom-event only type 'asciidump'
     @type with_dump: bool
 
     @return: [(Internal name, Readable name)]
     @type [(str, str)]
     """
     # The third tuple value is internal
     if with_dump:
         return [(x[0], x[1]) for x in TYPE_REPOSITORY]
     else:
         return [(x[0], x[1]) for x in TYPE_REPOSITORY if x[0] != 'asciidump']
 
 
 def _get_customevent_cols(event_id=""):
     """
     List of all the diferent name of columns in customevents.
 
     @return: {id: [(internal name, readable name)]}
     @type: {str: [(str, str)]}
     """
     sql_str = "SELECT id,cols FROM staEVENT"
     sql_param = []
     if event_id:
         sql_str += "WHERE id = %s"
         sql_param.append(event_id)
     cols = {}
     for event in run_sql(sql_str, sql_param):
         if event[0]:
             if event[1]:
                 cols[event[0]] = [(name, name) for name
                                    in cPickle.loads(event[1])]
             else:
                 cols[event[0]] = []
     return cols
 
 
 def _is_type_export(typename):
     """
     Helper function that consults the central repository of types to determine
     whether the input parameter represents an export type.
 
     @param typename: Internal type name
     @type typename: str
 
     @return: Information whether a certain type exports data
     @type: bool
     """
     return len(TYPE_REPOSITORY[[x[0] for x in
                                 TYPE_REPOSITORY].index(typename)]) == 3
 
 
 def _get_export_closure(typename):
     """
     Helper function that for a certain type, gives back the corresponding export
     closure.
 
     @param typename: Internal type name
     @type type: str
 
     @return: Closure that exports data to the type's format
     @type: function
     """
     return TYPE_REPOSITORY[[x[0] for x in TYPE_REPOSITORY].index(typename)][2]
 
 
 def _get_file_using_cache(filename, closure, force=False, allow_refresh=True):
     """
     Uses the Invenio cache, i.e. the tempdir, to see if there's a recent
     cached version of the sought-after file in there. If not, use the closure to
     compute a new, and return that instead. Relies on Invenio configuration
     parameter WEBSTAT_CACHE_INTERVAL.
 
     @param filename: The name of the file that might be cached
     @type filename: str
 
     @param closure: A function, that executed will return data to be cached. The
                     function should return either a string, or something that
                     makes sense after being interpreted with str().
     @type closure: function
 
     @param force: Override cache default value.
     @type force: bool
 
 
     """
     # Absolute path to cached files, might not exist.
     filename = os.path.normpath(WEBSTAT_RAWDATA_DIRECTORY + filename)
 
     # Get the modification time of the cached file (if any).
     try:
         mtime = os.path.getmtime(filename)
     except OSError:
         # No cached version of this particular file exists, thus the
         # modification time is set to 0 for easy logic below.
         mtime = 0
 
     # Consider refreshing cache if FORCE or NO CACHE AT ALL,
     # or CACHE EXIST AND REFRESH IS ALLOWED.
     if force or mtime == 0 or (mtime > 0 and allow_refresh):
 
         # Is the file modification time recent enough?
         if force or (time.time() - mtime > WEBSTAT_CACHE_INTERVAL):
 
             # No! Use closure to compute new content
-            content = closure()
+            content = closure(False)
 
             # Cache the data
             open(filename, 'w').write(str(content))
 
     # Return the (perhaps just) cached file
     return open(filename, 'r')
 
 
 def _is_scheduled_for_cacheing(event_id):
     """
     @param event_id: The event id
     @type event_id: str
 
     @return: Indication of if the event id is scheduling for BibSched execution.
     @type: bool
     """
     if not is_task_scheduled('webstatadmin'):
         return False
 
     # Get the task id
     try:
         task_id = get_task_ids_by_descending_date('webstatadmin',
                                                   ['RUNNING', 'WAITING'])[0]
     except IndexError:
         return False
     else:
         args = get_task_options(task_id)
         return event_id in (args['keyevents'] + args['customevents'])
diff --git a/modules/webstat/lib/webstat_engine.py b/modules/webstat/lib/webstat_engine.py
index 875de662c..83b016f61 100644
--- a/modules/webstat/lib/webstat_engine.py
+++ b/modules/webstat/lib/webstat_engine.py
@@ -1,2420 +1,2568 @@
 ## This file is part of Invenio.
 ## Copyright (C) 2007, 2008, 2010, 2011 CERN.
 ##
 ## Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 __revision__ = "$Id$"
 __lastupdated__ = "$Date$"
 
-import calendar, commands, datetime, time, os, cPickle, random
-try:
-    import xlwt
-    xlwt_imported = True
-except ImportError:
-    xlwt_imported = False
-from invenio.config import CFG_TMPDIR, CFG_SITE_URL, CFG_SITE_NAME, CFG_BINDIR
+import calendar, commands, datetime, time, os, cPickle, random, cgi
+from operator import itemgetter
+from invenio.config import CFG_TMPDIR, \
+    CFG_SITE_URL, \
+    CFG_SITE_NAME, \
+    CFG_BINDIR, \
+    CFG_CERN_SITE
+from invenio.bibindex_engine import CFG_JOURNAL_TAG
 from invenio.urlutils import redirect_to_url
 from invenio.search_engine import perform_request_search, \
     get_collection_reclist, \
     get_most_popular_field_values
 from invenio.search_engine_utils import get_fieldvalues
 from invenio.dbquery import run_sql, \
     wash_table_column_name
 from invenio.websubmitadmin_dblayer import get_docid_docname_alldoctypes
 from invenio.bibcirculation_utils import book_title_from_MARC, \
     book_information_from_MARC
 from invenio.bibcirculation_dblayer import get_id_bibrec, \
     get_borrower_data
 
 WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
 WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
 
 # KEY EVENT TREND SECTION
 
-def get_keyevent_trend_collection_population(args):
+def get_keyevent_trend_collection_population(args, return_sql=False):
     """
     Returns the quantity of documents in Invenio for
     the given timestamp range.
 
     @param args['collection']: A collection name
     @type args['collection']: str
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     # collect action dates
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-    if args.get('collection','All') == 'All':
-        sql_query_g = ("SELECT creation_date FROM bibrec WHERE " + \
-                     "creation_date > '%s' AND creation_date < '%s' " + \
-                     "ORDER BY creation_date DESC") % \
-                     (lower, upper)
-        sql_query_i = "SELECT COUNT(id) FROM bibrec " + \
-                "WHERE creation_date < '%s'" % (lower)
+    if args.get('collection', 'All') == 'All':
+        sql_query_g = _get_sql_query("creation_date", args['granularity'],
+                        "bibrec")
+        sql_query_i = "SELECT COUNT(id) FROM bibrec WHERE creation_date < %s"
+        initial_quantity = run_sql(sql_query_i, (lower, ))[0][0]
+        return _get_keyevent_trend(args, sql_query_g, initial_quantity=initial_quantity,
+                            return_sql=return_sql, sql_text=
+                            "Previous count: %s<br />Current count: %%s" % (sql_query_i),
+                            acumulative=True)
     else:
-        ids = perform_request_search(cc=args['collection'])
+        ids = get_collection_reclist(args['collection'])
         if len(ids) == 0:
             return []
-        ids_str = str(ids).replace('[', '(').replace(']', ')')
-        sql_query_g = ("SELECT creation_date FROM bibrec WHERE id IN %s AND " + \
-                     "creation_date > '%s' AND creation_date < '%s' " + \
-                     "ORDER BY creation_date DESC") % \
-                     (ids_str, lower, upper)
-        sql_query_i = "SELECT COUNT(id) FROM bibrec " + \
-                "WHERE id IN %s AND creation_date < '%s'" % (ids_str, lower)
-
-    action_dates = [x[0] for x in run_sql(sql_query_g)]
-    initial_quantity = run_sql(sql_query_i)[0][0]
-
-    return _get_trend_from_actions(action_dates, initial_quantity,
-                                   args['t_start'], args['t_end'],
-                                   args['granularity'], args['t_format'])
+        g = get_keyevent_trend_new_records(args, return_sql, True)
+        sql_query_i = "SELECT id FROM bibrec WHERE creation_date < %s"
+        if return_sql:
+            return "Previous count: %s<br />Current count: %s" % (sql_query_i % lower, g)
+        initial_quantity = len(filter(lambda x: x[0] in ids, run_sql(sql_query_i, (lower, ))))
+        return _get_trend_from_actions(g, initial_quantity, args['t_start'],
+                          args['t_end'], args['granularity'], args['t_format'], acumulative=True)
+
+
+def get_keyevent_trend_new_records(args, return_sql=False, only_action=False):
+    """
+    Returns the number of new records uploaded during the given timestamp range.
+
+    @param args['collection']: A collection name
+    @type args['collection']: str
+
+    @param args['t_start']: Date and time of start point
+    @type args['t_start']: str
+
+    @param args['t_end']: Date and time of end point
+    @type args['t_end']: str
+
+    @param args['granularity']: Granularity of date and time
+    @type args['granularity']: str
+
+    @param args['t_format']: Date and time formatting string
+    @type args['t_format']: str
+    """
+
+    if args.get('collection', 'All') == 'All':
+        return _get_keyevent_trend(args, _get_sql_query("creation_date", args['granularity'],
+                            "bibrec"),
+                            return_sql=return_sql)
+    else:
+        lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
+        upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
+        ids = get_collection_reclist(args['collection'])
+        if len(ids) == 0:
+            return []
+        sql = _get_sql_query("creation_date", args["granularity"], "bibrec",
+                             extra_select=", id", group_by=False, count=False)
+        if return_sql:
+            return sql % (lower, upper)
+
+        recs = run_sql(sql, (lower, upper))
+        if recs:
+            def add_count(i_list, element):
+                """ Reduce function to create a dictionary with the count of ids
+                for each date """
+                if i_list and element == i_list[-1][0]:
+                    i_list[-1][1] += 1
+                else:
+                    i_list.append([element, 1])
+                return i_list
+            action_dates = reduce(add_count,
+                            map(lambda x: x[0], filter(lambda x: x[1] in ids, recs)),
+                            [])
+        else:
+            action_dates = []
+        if only_action:
+            return action_dates
+        return _get_trend_from_actions(action_dates, 0, args['t_start'],
+                          args['t_end'], args['granularity'], args['t_format'])
 
 
-def get_keyevent_trend_search_frequency(args):
+def get_keyevent_trend_search_frequency(args, return_sql=False):
     """
     Returns the number of searches (of any kind) carried out
     during the given timestamp range.
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    # collect action dates
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-    sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
-          "WHERE date > '%s' AND date < '%s' ORDER BY date DESC" % \
-          (lower, upper)
-    action_dates = [x[0] for x in run_sql(sql)]
 
-    return _get_trend_from_actions(action_dates, 0, args['t_start'],
-                          args['t_end'], args['granularity'], args['t_format'])
+    return _get_keyevent_trend(args, _get_sql_query("date", args["granularity"],
+                "query INNER JOIN user_query ON id=id_query"),
+                            return_sql=return_sql)
 
 
-def get_keyevent_trend_comments_frequency(args):
+def get_keyevent_trend_comments_frequency(args, return_sql=False):
     """
     Returns the number of comments (of any kind) carried out
     during the given timestamp range.
 
     @param args['collection']: A collection name
     @type args['collection']: str
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    # collect action dates
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-    if args.get('collection','All') == 'All':
-        sql = "SELECT date_creation FROM cmtRECORDCOMMENT " + \
-          "WHERE date_creation > '%s' AND date_creation < '%s'" \
-          % (lower, upper) + " ORDER BY date_creation DESC"
+    if args.get('collection', 'All') == 'All':
+        sql = _get_sql_query("date_creation", args["granularity"],
+            "cmtRECORDCOMMENT")
     else:
-        ids = get_collection_reclist(args['collection']).tolist()
-        if len(ids) == 0:
-            return []
-        ids_str = str(ids).replace('[', '(').replace(']', ')')
-        sql = "SELECT date_creation FROM cmtRECORDCOMMENT \
-            WHERE date_creation > '%s' AND date_creation < '%s'  \
-            AND id_bibrec IN %s ORDER BY date_creation DESC" \
-            % (lower, upper, ids_str)
-    action_dates = [x[0] for x in run_sql(sql)]
-
-    return _get_trend_from_actions(action_dates, 0, args['t_start'],
-                          args['t_end'], args['granularity'], args['t_format'])
+        sql = _get_sql_query("date_creation", args["granularity"],
+            "cmtRECORDCOMMENT", conditions=
+            _get_collection_recids_for_sql_query(args['collection']))
+    return _get_keyevent_trend(args, sql, return_sql=return_sql)
 
 
-def get_keyevent_trend_search_type_distribution(args):
+def get_keyevent_trend_search_type_distribution(args, return_sql=False):
     """
     Returns the number of searches carried out during the given
     timestamp range, but also partion them by type Simple and
     Advanced.
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-
     # SQL to determine all simple searches:
-    sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
-          "WHERE urlargs LIKE '%p=%' " + \
-          "AND date > '%s' AND date < '%s' ORDER BY date DESC" % (lower, upper)
-    simple = [x[0] for x in run_sql(sql)]
+    simple = _get_sql_query("date", args["granularity"],
+                    "query INNER JOIN user_query ON id=id_query",
+                    conditions="urlargs LIKE '%%p=%%'")
 
     # SQL to determine all advanced searches:
-    sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
-          "WHERE urlargs LIKE '%as=1%' " + \
-          "AND date > '%s' AND date < '%s' ORDER BY date DESC" % (lower, upper)
-    advanced = [x[0] for x in run_sql(sql)]
+    advanced = _get_sql_query("date", args["granularity"],
+                    "query INNER JOIN user_query ON id=id_query",
+                    conditions="urlargs LIKE '%%as=1%%'")
 
     # Compute the trend for both types
-    s_trend = _get_trend_from_actions(simple, 0, args['t_start'],
-                         args['t_end'], args['granularity'], args['t_format'])
-    a_trend = _get_trend_from_actions(advanced, 0, args['t_start'],
-                         args['t_end'], args['granularity'], args['t_format'])
+    s_trend = _get_keyevent_trend(args, simple,
+                        return_sql=return_sql, sql_text="Simple: %s")
+    a_trend = _get_keyevent_trend(args, advanced,
+                        return_sql=return_sql, sql_text="Advanced: %s")
 
     # Assemble, according to return type
+    if return_sql:
+        return "%s <br /> %s" % (s_trend, a_trend)
     return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
             for i in range(len(s_trend))]
 
 
-def get_keyevent_trend_download_frequency(args):
+def get_keyevent_trend_download_frequency(args, return_sql=False):
     """
     Returns the number of full text downloads carried out
     during the given timestamp range.
 
     @param args['collection']: A collection name
     @type args['collection']: str
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
     # Collect list of timestamps of insertion in the specific collection
-    if args.get('collection','All') == 'All':
-        sql = "SELECT download_time FROM rnkDOWNLOADS WHERE download_time > '%s' \
-            AND download_time < '%s'  ORDER BY download_time DESC" % (lower, upper)
+    if args.get('collection', 'All') == 'All':
+        return _get_keyevent_trend(args, _get_sql_query("download_time",
+                args["granularity"], "rnkDOWNLOADS"), return_sql=return_sql)
     else:
-        ids = get_collection_reclist(args['collection']).tolist()
+        lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
+        upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
+        ids = get_collection_reclist(args['collection'])
         if len(ids) == 0:
             return []
-        ids_str = str(ids).replace('[', '(').replace(']', ')')
-        sql = "SELECT download_time FROM rnkDOWNLOADS WHERE download_time > '%s' \
-            AND download_time < '%s' AND id_bibrec IN %s \
-            ORDER BY download_time DESC" % (lower, upper, ids_str)
-    actions = [x[0] for x in run_sql(sql)]
-
-    return _get_trend_from_actions(actions, 0, args['t_start'],
+        sql = _get_sql_query("download_time", args["granularity"], "rnkDOWNLOADS",
+                             extra_select=", GROUP_CONCAT(id_bibrec)")
+        if return_sql:
+            return sql % (lower, upper)
+
+        action_dates = []
+        for result in run_sql(sql, (lower, upper)):
+            count = result[1]
+            for id in result[2].split(","):
+                if id == '' or not int(id) in ids:
+                    count -= 1
+            action_dates.append((result[0], count))
+        return _get_trend_from_actions(action_dates, 0, args['t_start'],
                           args['t_end'], args['granularity'], args['t_format'])
 
 
-def get_keyevent_trend_number_of_loans(args):
+def get_keyevent_trend_number_of_loans(args, return_sql=False):
     """
     Returns the number of loans carried out
     during the given timestamp range.
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    # collect action dates
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-    sql = "SELECT loaned_on FROM crcLOAN " + \
-          "WHERE loaned_on > '%s' AND loaned_on < '%s' ORDER BY loaned_on DESC"\
-          % (lower, upper)
-    action_dates = [x[0] for x in run_sql(sql)]
-
-    return _get_trend_from_actions(action_dates, 0, args['t_start'],
-                          args['t_end'], args['granularity'], args['t_format'])
+    return _get_keyevent_trend(args, _get_sql_query("loaned_on",
+            args["granularity"], "crcLOAN"), return_sql=return_sql)
 
 
-def get_keyevent_trend_web_submissions(args):
+def get_keyevent_trend_web_submissions(args, return_sql=False):
     """
     Returns the quantity of websubmissions in Invenio for
     the given timestamp range.
 
     @param args['doctype']: A doctype name
     @type args['doctype']: str
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    # collect action dates
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
     if args['doctype'] == 'all':
-        sql_query = "SELECT cd FROM sbmSUBMISSIONS " + \
-            "WHERE action='SBI' AND cd > '%s' AND cd < '%s'" % (lower, upper) + \
-            " AND status='finished' ORDER BY cd DESC"
+        sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
+                conditions="action='SBI' AND status='finished'")
+        res = _get_keyevent_trend(args, sql, return_sql=return_sql)
     else:
-        sql_query = "SELECT cd FROM sbmSUBMISSIONS " + \
-            "WHERE doctype='%s' AND action='SBI' " % args['doctype'] + \
-            "AND cd > '%s' AND cd < '%s' " % (lower, upper) + \
-            "AND status='finished' ORDER BY cd DESC"
-    action_dates = [x[0] for x in run_sql(sql_query)]
-    return _get_trend_from_actions(action_dates, 0,
-                                   args['t_start'], args['t_end'],
-                                   args['granularity'], args['t_format'])
+        sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
+                conditions="doctype=%s AND action='SBI' AND status='finished'")
+        res = _get_keyevent_trend(args, sql, extra_param=[args['doctype']],
+                                  return_sql=return_sql)
 
+    return res
 
-def get_keyevent_loan_statistics(args):
+
+def get_keyevent_loan_statistics(args, return_sql=False):
     """
     Data:
     - Number of documents (=records) loaned
     - Number of items loaned on the total number of items
     - Number of items never loaned on the total number of items
     - Average time between the date of the record creation and  the date of the first loan
     Filter by
     - in a specified time span
-    - by user address (=Department)
     - by UDC (see MARC field 080__a - list to be submitted)
     - by item status (available, missing)
     - by date of publication (MARC field 260__c)
     - by date of the record creation in the database
 
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['udc']: MARC field 080__a
     @type args['udc']: str
 
     @param args['item_status']: available, missing...
     @type args['item_status']: str
 
     @param args['publication_date']: MARC field 260__c
     @type args['publication_date']: str
 
     @param args['creation_date']: date of the record creation in the database
     @type args['creation_date']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
+    # collect action dates
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcLOAN l "
-    sql_where = "WHERE loaned_on > '%s' AND loaned_on < '%s' " % (lower, upper)
+    sql_where = "WHERE loaned_on > %s AND loaned_on < %s "
 
-    if 'user_address' in args and args['user_address'] != '':
-        sql_from += ", crcBORROWER bor "
-        sql_where += """AND l.id_crcBORROWER = bor.id AND
-             bor.address LIKE '%%%s%%' """ % args['user_address']
+    param = [lower, upper]
     if 'udc' in args and args['udc'] != '':
-        sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
-                  FROM bibrec_bib08x brb, bib08x b \
-                  WHERE brb.id_bibxxx = b.id AND tag='080__a' \
-                  AND value LIKE '%%%s%%')" % args['udc']
+        sql_where += "AND l." + _check_udc_value_where()
+        param.append(_get_udc_truncated(args['udc']))
     if 'item_status' in args and args['item_status'] != '':
         sql_from += ", crcITEM i "
-        sql_where += "AND l.barcode = i.barcode AND i.status = '%s' " % args['item_status']
+        sql_where += "AND l.barcode = i.barcode AND i.status = %s "
+        param.append(args['item_status'])
     if 'publication_date' in args and args['publication_date'] != '':
         sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
-                                   FROM bibrec_bib26x brb, bib26x b \
-                                   WHERE brb.id_bibxxx = b.id AND tag='260__c' \
-                               AND value LIKE '%%%s%%') " % args['publication_date']
+FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
+AND value LIKE %s)"
+        param.append('%%%s%%' % args['publication_date'])
     if 'creation_date' in args and args['creation_date'] != '':
         sql_from += ", bibrec br "
-        sql_where += """AND br.id=l.id_bibrec AND br.creation_date
-            LIKE '%%%s%%' """ % args['creation_date']
-    # Number of loans:
-    loans = run_sql("SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where)[0][0]
-
-    # Number of items loaned on the total number of items:
-    items_loaned = run_sql("SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where)[0][0]
-    total_items = run_sql("SELECT COUNT(*) FROM crcITEM")[0][0]
-    loaned_on_total = float(items_loaned) / float(total_items)
+        sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
+        param.append('%%%s%%' % args['creation_date'])
+    param = tuple(param)
 
-    # Number of items never loaned on the total number of items
-    never_loaned_on_total = float(total_items - items_loaned) / float(total_items)
+    # Number of loans:
+    loans_sql = "SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where
+    items_loaned_sql = "SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where
+    # Only the CERN site wants the items of the collection "Books & Proceedings"
+    if CFG_CERN_SITE:
+        items_in_book_coll = _get_collection_recids_for_sql_query("Books & Proceedings")
+        if items_in_book_coll == "":
+            total_items_sql = 0
+        else:
+            total_items_sql = "SELECT COUNT(*) FROM crcITEM WHERE %s" % \
+                                    items_in_book_coll
+    else: # The rest take all the items
+        total_items_sql = "SELECT COUNT(*) FROM crcITEM"
 
     # Average time between the date of the record creation and  the date of the first loan
-    avg_sql = "SELECT DATEDIFF(MIN(loaned_on), MIN(br.creation_date)) " + sql_from
+    avg_sql = "SELECT AVG(DATEDIFF(loaned_on, br.creation_date)) " + sql_from
     if not ('creation_date' in args and args['creation_date'] != ''):
         avg_sql += ", bibrec br "
     avg_sql += sql_where
     if not ('creation_date' in args and args['creation_date'] != ''):
         avg_sql += "AND br.id=l.id_bibrec "
-    avg_sql += "GROUP BY l.id_bibrec, br.id"
-    res_avg = run_sql(avg_sql)
-    if len(res_avg) > 0:
-        avg = res_avg[0][0]
+    if return_sql:
+        return "<ol><li>%s</li><li>Items loaned * 100 / Number of items <ul><li>\
+Items loaned: %s </li><li>Number of items: %s</li></ul></li><li>100 - Items \
+loaned on total number of items</li><li>%s</li></ol>" % \
+            (loans_sql % param, items_loaned_sql % param, total_items_sql, avg_sql % param)
+    loans = run_sql(loans_sql, param)[0][0]
+    items_loaned = run_sql(items_loaned_sql, param)[0][0]
+    if total_items_sql:
+        total_items = run_sql(total_items_sql)[0][0]
     else:
-        avg = 0
-
+        total_items = 0
+    if total_items == 0:
+        loaned_on_total = 0
+        never_loaned_on_total = 0
+    else:
+        # Number of items loaned on the total number of items:
+        loaned_on_total = float(items_loaned) * 100 / float(total_items)
+        # Number of items never loaned on the total number of items:
+        never_loaned_on_total = 100L - loaned_on_total
+    avg = run_sql(avg_sql, param)[0][0]
+    if avg:
+        avg = float(avg)
+    else:
+        avg = 0L
     return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
 
 
-def get_keyevent_loan_lists(args):
+def get_keyevent_loan_lists(args, return_sql=False, limit=50):
     """
     Lists:
     - List of documents (= records) never loaned
     - List of most loaned documents  (columns: number of loans,
         number of copies and the creation date of the record, in
         order to calculate the number of loans by copy), sorted
         by decreasing order (50 items)
     Filter by
     - in a specified time span
     - by UDC (see MARC field 080__a - list to be submitted)
     - by loan period (4 week loan, one week loan...)
     - by a certain number of loans
     - by date of publication (MARC field 260__c)
     - by date of the record creation in the database
-    - by user address (=Department)
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['udc']: MARC field 080__a
     @type args['udc']: str
 
     @param args['loan_period']: 4 week loan, one week loan...
     @type args['loan_period']: str
 
     @param args['min_loan']: minimum number of loans
     @type args['min_loan']: int
 
     @param args['max_loan']: maximum number of loans
     @type args['max_loan']: int
 
     @param args['publication_date']: MARC field 260__c
     @type args['publication_date']: str
 
     @param args['creation_date']: date of the record creation in the database
     @type args['creation_date']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
-    sql_from = "FROM crcLOAN l "
-    sql_where = "WHERE type = 'normal' AND loaned_on > %s AND loaned_on < %s "
-    param = [lower, upper]
+    sql_where = []
+    param = []
+    sql_from = ""
 
-    if 'user_address' in args and args['user_address'] != '':
-        sql_from += ", crcBORROWER bor "
-        sql_where += "AND l.id_crcBORROWER = bor.id AND bor.address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
     if 'udc' in args and args['udc'] != '':
-        sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
-                  FROM bibrec_bib08x brb, bib08x b \
-                  WHERE brb.id_bibxxx = b.id AND tag='080__a' \
-                  AND value LIKE %s)"
-        param.append('%%%s%%' % args['udc'])
+        sql_where.append("i." + _check_udc_value_where())
+        param.append(_get_udc_truncated(args['udc']))
     if 'loan_period' in args and args['loan_period'] != '':
-        sql_from += ", crcITEM i "
-        sql_where += "AND l.barcode = i.barcode AND i.loan_period = %s "
+        sql_where.append("loan_period = %s")
         param.append(args['loan_period'])
     if 'publication_date' in args and args['publication_date'] != '':
-        sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
-                                   FROM bibrec_bib26x brb, bib26x b \
-                                   WHERE brb.id_bibxxx = b.id AND tag='260__c' \
-                               AND value LIKE %s) "
+        sql_where.append("i.id_bibrec IN ( SELECT brb.id_bibrec \
+FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
+AND value LIKE %s)")
         param.append('%%%s%%' % args['publication_date'])
     if 'creation_date' in args and args['creation_date'] != '':
-        sql_from += ", bibrec br "
-        sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
+        sql_from += ", bibrec br"
+        sql_where.append("br.id=i.id_bibrec AND br.creation_date LIKE %s")
         param.append('%%%s%%' % args['creation_date'])
-    param = tuple(param)
-    res = [("", "Title", "Author", "Edition", "Number of loans",
-            "Number of copies", "Date of creation of the record")]
-    # Documents (= records) never loaned:
-    for rec, copies in run_sql("""SELECT id_bibrec, COUNT(*) FROM crcITEM WHERE
-            id_bibrec NOT IN (SELECT l.id_bibrec """ + sql_from + sql_where +
-            ") GROUP BY id_bibrec", param):
-        loans = run_sql("SELECT COUNT(*) %s %s AND l.id_bibrec=%s" %
-                        (sql_from, sql_where, rec), param)[0][0]
-        try:
-            creation = run_sql("SELECT creation_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
-        except:
-            creation = datetime.datetime(1970, 01, 01)
-        author = get_fieldvalues(rec, "100__a")
-        if len(author) > 0:
-            author = author[0]
-        else:
-            author = ""
-        edition = get_fieldvalues(rec, "250__a")
-        if len(edition) > 0:
-            edition = edition[0]
-        else:
-            edition = ""
-        res.append(('Documents never loaned', book_title_from_MARC(rec), author,
-                    edition, loans, copies, creation))
+    if sql_where:
+        sql_where = "WHERE %s AND" % " AND ".join(sql_where)
+    else:
+        sql_where = "WHERE"
+    param = tuple(param + [lower, upper])
 
-    # Most loaned documents
-    most_loaned = []
-    check_num_loans = ""
+    # SQL for both queries
+    check_num_loans = "HAVING "
     if 'min_loans' in args and args['min_loans'] != '':
         check_num_loans += "COUNT(*) >= %s" % args['min_loans']
     if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
-        if check_num_loans != "":
+        if check_num_loans != "HAVING ":
             check_num_loans += " AND "
         check_num_loans += "COUNT(*) <= %s" % args['max_loans']
-    if check_num_loans != "":
-        check_num_loans = " HAVING " + check_num_loans
-    mldocs = run_sql("SELECT l.id_bibrec, COUNT(*) " + sql_from + sql_where +
-                " GROUP BY l.id_bibrec " + check_num_loans, param)
-
-    for rec, loans in mldocs:
-        copies = run_sql("SELECT COUNT(*) FROM crcITEM WHERE id_bibrec=%s", (rec, ))[0][0]
-        most_loaned.append((rec, loans, copies, loans / copies))
-    if most_loaned == []:
-        return (res)
-    most_loaned.sort(cmp=lambda x, y: cmp(x[3], y[3]))
-    if len(most_loaned) > 50:
-        most_loaned = most_loaned[:49]
-    most_loaned.reverse()
-    for rec, loans, copies, _ in most_loaned:
-        author = get_fieldvalues(rec, "100__a")
-        if len(author) > 0:
-            author = author[0]
-        else:
-            author = ""
-        edition = get_fieldvalues(rec, "250__a")
-        if len(edition) > 0:
-            edition = edition[0]
-        else:
-            edition = ""
-        try:
-            creation = run_sql("SELECT creation_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
-        except:
-            creation = datetime.datetime(1970, 01, 01)
-        res.append(('Most loaned documents', book_title_from_MARC(rec), author,
-                    edition, loans, copies, creation))
+    # Optimized to get all the data in only one query (not call get_fieldvalues several times)
+    mldocs_sql = "SELECT i.id_bibrec, COUNT(*) \
+FROM crcLOAN l, crcITEM i%s %s l.barcode=i.barcode AND type = 'normal' AND \
+loaned_on > %%s AND loaned_on < %%s GROUP BY i.id_bibrec %s" % \
+        (sql_from, sql_where, check_num_loans)
+    limit_n = ""
+    if limit > 0:
+        limit_n = "LIMIT %d" % limit
+    nldocs_sql = "SELECT id_bibrec, COUNT(*) FROM crcITEM i%s %s \
+barcode NOT IN (SELECT id_bibrec FROM crcLOAN WHERE loaned_on > %%s AND \
+loaned_on < %%s AND type = 'normal') GROUP BY id_bibrec ORDER BY COUNT(*) DESC %s" % \
+        (sql_from, sql_where, limit_n)
+
+    items_sql = "SELECT id_bibrec, COUNT(*) items FROM crcITEM GROUP BY id_bibrec"
+    creation_date_sql = "SELECT creation_date FROM bibrec WHERE id=%s"
+    authors_sql = "SELECT bx.value FROM bib10x bx, bibrec_bib10x bibx \
+WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '100__a' AND bibx.id_bibrec=%s"
+    title_sql = "SELECT GROUP_CONCAT(bx.value SEPARATOR ' ') value FROM bib24x bx, bibrec_bib24x bibx \
+WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE %s AND bibx.id_bibrec=%s GROUP BY bibx.id_bibrec"
+    edition_sql = "SELECT bx.value FROM bib25x bx, bibrec_bib25x AS bibx \
+WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '250__a' AND bibx.id_bibrec=%s"
+
+    if return_sql:
+        return "Most loaned: %s<br \>Never loaned: %s" % \
+            (mldocs_sql % param, nldocs_sql % param)
+
+    mldocs = run_sql(mldocs_sql, param)
+    items = dict(run_sql(items_sql))
+    order_m = []
+    for mldoc in mldocs:
+        order_m.append([mldoc[0], mldoc[1], items[mldoc[0]], \
+                      float(mldoc[1]) / float(items[mldoc[0]])])
+    order_m = sorted(order_m, key=itemgetter(3))
+    order_m.reverse()
+    # Check limit values
+
+    if limit > 0:
+        order_m = order_m[:limit]
+
+    res = [("", "Title", "Author", "Edition", "Number of loans",
+            "Number of copies", "Date of creation of the record")]
+    for mldoc in order_m:
+        res.append(("Most loaned documents",
+            _check_empty_value(run_sql(title_sql, ('245__%%', mldoc[0], ))),
+            _check_empty_value(run_sql(authors_sql, (mldoc[0], ))),
+            _check_empty_value(run_sql(edition_sql, (mldoc[0], ))),
+            mldoc[1], mldoc[2],
+            _check_empty_value(run_sql(creation_date_sql, (mldoc[0], )))))
+
+    nldocs = run_sql(nldocs_sql, param)
+    for nldoc in nldocs:
+        res.append(("Not loaned documents",
+            _check_empty_value(run_sql(title_sql, ('245__%%', nldoc[0], ))),
+            _check_empty_value(run_sql(authors_sql, (nldoc[0], ))),
+            _check_empty_value(run_sql(edition_sql, (nldoc[0], ))),
+            0, items[nldoc[0]],
+            _check_empty_value(run_sql(creation_date_sql, (nldoc[0], )))))
+#    nldocs = run_sql(nldocs_sql, param_n)
     return (res)
 
 
-def get_keyevent_renewals_lists(args):
+def get_keyevent_renewals_lists(args, return_sql=False, limit=50):
     """
     Lists:
     - List of most renewed items stored by decreasing order (50 items)
     Filter by
     - in a specified time span
     - by UDC (see MARC field 080__a - list to be submitted)
     - by collection
-    - by user address (=Department)
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['udc']: MARC field 080__a
     @type args['udc']: str
 
     @param args['collection']: collection of the record
     @type args['collection']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcLOAN l, crcITEM i "
     sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
     param = [lower, upper]
-    if 'user_address' in args and args['user_address'] != '':
-        sql_from += ", crcBORROWER bor "
-        sql_where += "AND l.id_crcBORROWER = bor.id AND bor.address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
     if 'udc' in args and args['udc'] != '':
-        sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
-                  FROM bibrec_bib08x brb, bib08x b \
-                  WHERE brb.id_bibxxx = b.id AND tag='080__a' \
-                  AND value LIKE %s)"
-        param.append('%%%s%%' % args['udc'])
+        sql_where += "AND l." + _check_udc_value_where()
+        param.append(_get_udc_truncated(args['udc']))
     filter_coll = False
     if 'collection' in args and args['collection'] != '':
         filter_coll = True
         recid_list = get_collection_reclist(args['collection'])
 
     param = tuple(param)
+
+    if limit > 0:
+        limit = "LIMIT %d" % limit
+    else:
+        limit = ""
+    sql = "SELECT i.id_bibrec, SUM(number_of_renewals) %s %s \
+GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC %s" \
+            % (sql_from, sql_where, limit)
+    if return_sql:
+        return sql % param
     # Results:
     res = [("Title", "Author", "Edition", "Number of renewals")]
-    for rec, renewals in run_sql("SELECT i.id_bibrec, SUM(number_of_renewals) "
-            + sql_from + sql_where +
-            " GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC LIMIT 50", param):
+    for rec, renewals in run_sql(sql, param):
         if filter_coll and rec not in recid_list:
             continue
         author = get_fieldvalues(rec, "100__a")
         if len(author) > 0:
             author = author[0]
         else:
             author = ""
         edition = get_fieldvalues(rec, "250__a")
         if len(edition) > 0:
             edition = edition[0]
         else:
             edition = ""
         res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
     return (res)
 
 
-def get_keyevent_returns_table(args):
+def get_keyevent_returns_table(args, return_sql=False):
     """
     Data:
-    - Number of overdue returns in a year
+    - Number of overdue returns in a timespan
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     # Overdue returns:
-    returns = run_sql("SELECT COUNT(*) FROM crcLOAN l \
-                     WHERE loaned_on > %s AND loaned_on < %s AND \
-                         due_date < NOW() AND (returned_on = '0000-00-00 00:00:00' \
-                         OR returned_on > due_date)", (lower, upper))[0][0]
+    sql = "SELECT COUNT(*) FROM crcLOAN l WHERE loaned_on > %s AND loaned_on < %s AND \
+due_date < NOW() AND (returned_on IS NULL OR returned_on > due_date)"
 
-    return ((returns, ), )
+    if return_sql:
+        return sql % (lower, upper)
+    return ((run_sql(sql, (lower, upper))[0][0], ), )
 
 
-def get_keyevent_trend_returns_percentage(args):
+def get_keyevent_trend_returns_percentage(args, return_sql=False):
     """
     Returns the number of overdue returns and the total number of returns
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-
     # SQL to determine overdue returns:
-    sql = "SELECT due_date FROM crcLOAN " + \
-          "WHERE  loaned_on > %s AND loaned_on < %s AND " + \
-            "due_date < NOW() AND (returned_on = '0000-00-00 00:00:00' " + \
-            "OR returned_on > due_date) ORDER BY due_date DESC"
-    overdue = [x[0] for x in run_sql(sql, (lower, upper))]
+    overdue = _get_sql_query("due_date", args["granularity"], "crcLOAN",
+                conditions="due_date < NOW() AND due_date IS NOT NULL \
+AND (returned_on IS NULL OR returned_on > due_date)",
+                dates_range_param="loaned_on")
 
     # SQL to determine all returns:
-    sql = "SELECT due_date FROM crcLOAN " + \
-          "WHERE loaned_on > %s AND loaned_on < %s AND " + \
-           "due_date < NOW() ORDER BY due_date DESC"
-    total = [x[0] for x in run_sql(sql, (lower, upper))]
+    total = _get_sql_query("due_date", args["granularity"], "crcLOAN",
+                conditions="due_date < NOW() AND due_date IS NOT NULL",
+                dates_range_param="loaned_on")
 
     # Compute the trend for both types
-    s_trend = _get_trend_from_actions(overdue, 0, args['t_start'],
-                         args['t_end'], args['granularity'], args['t_format'])
-    a_trend = _get_trend_from_actions(total, 0, args['t_start'],
-                         args['t_end'], args['granularity'], args['t_format'])
+    o_trend = _get_keyevent_trend(args, overdue,
+                        return_sql=return_sql, sql_text="Overdue: %s")
+    t_trend = _get_keyevent_trend(args, total,
+                        return_sql=return_sql, sql_text="Total: %s")
 
     # Assemble, according to return type
-    return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
-            for i in range(len(s_trend))]
+    if return_sql:
+        return "%s <br /> %s" % (o_trend, t_trend)
+    return [(o_trend[i][0], (o_trend[i][1], t_trend[i][1]))
+            for i in range(len(o_trend))]
 
 
-def get_keyevent_ill_requests_statistics(args):
+def get_keyevent_ill_requests_statistics(args, return_sql=False):
     """
     Data:
     - Number of ILL requests
-    - Number of satisfied ILL requests 3 months after the date of request
-        creation on a period of one year
-    - Percentage of satisfied ILL requests 3 months after the date of
-        request creation on a period of one year
+    - Number of satisfied ILL requests 2 weeks after the date of request
+        creation on a timespan
 
     - Average time between the date and  the hour of the ill request
         date and the date and the hour of the delivery item to the user
-        on a period of one year (with flexibility in the choice of the dates)
+        on a timespan
     - Average time between the date and  the hour the ILL request
         was sent to the supplier and the date and hour of the
-        delivery item on a period of one year (with flexibility in
-        the choice of the dates)
+        delivery item on a timespan
 
     Filter by
     - in a specified time span
     - by type of document (book or article)
-    - by user address
     - by status of the request (= new, sent, etc.)
     - by supplier
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['doctype']: type of document (book or article)
     @type args['doctype']: str
 
     @param args['status']: status of the request (= new, sent, etc.)
     @type args['status']: str
 
     @param args['supplier']: supplier
     @type args['supplier']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcILLREQUEST ill "
     sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
 
     param = [lower, upper]
 
-    if 'user_address' in args and args['user_address'] != '':
-        sql_from += ", crcBORROWER bor "
-        sql_where += "AND ill.id_crcBORROWER = bor.id AND bor.address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
     if 'doctype' in args and args['doctype'] != '':
         sql_where += "AND  ill.request_type=%s"
         param.append(args['doctype'])
     if 'status' in args and args['status'] != '':
         sql_where += "AND ill.status = %s "
         param.append(args['status'])
+    else:
+        sql_where += "AND ill.status != %s "
+        param.append("cancelled") #FIXME: change to CFG variable
     if 'supplier' in args and args['supplier'] != '':
         sql_from += ", crcLIBRARY lib "
         sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
         param.append(args['supplier'])
 
     param = tuple(param)
-
+    requests_sql = "SELECT COUNT(*) %s %s" % (sql_from, sql_where)
+    satrequests_sql = "SELECT COUNT(*) %s %s \
+AND arrival_date IS NOT NULL AND \
+DATEDIFF(arrival_date, period_of_interest_from) < 14 " % (sql_from, sql_where)
+    avgdel_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, period_of_interest_from, arrival_date)) %s %s \
+AND arrival_date IS NOT NULL" % (sql_from, sql_where)
+    avgsup_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, request_date, arrival_date)) %s %s \
+AND arrival_date IS NOT NULL \
+AND request_date IS NOT NULL" % (sql_from, sql_where)
+    if return_sql:
+        return "<ol><li>%s</li><li>%s</li><li>%s</li><li>%s</li></ol>" % \
+            (requests_sql % param, satrequests_sql % param,
+             avgdel_sql % param, avgsup_sql % param)
     # Number of requests:
-    requests = run_sql("SELECT COUNT(*) " + sql_from + sql_where, param)[0][0]
+    requests = run_sql(requests_sql, param)[0][0]
 
-    # Number of satisfied ILL requests 3 months after the date of request creation:
-    satrequests = run_sql("SELECT COUNT(*) " + sql_from + sql_where +
-                          "AND arrival_date != '0000-00-00 00:00:00' AND \
-                          DATEDIFF(arrival_date, period_of_interest_from) < 90 ", param)[0][0]
+    # Number of satisfied ILL requests 2 weeks after the date of request creation:
+    satrequests = run_sql(satrequests_sql, param)[0][0]
 
     # Average time between the date and the hour of the ill request date and
     # the date and the hour of the delivery item to the user
-    avgdel = run_sql("SELECT AVG(TIMESTAMPDIFF(HOUR, period_of_interest_from, request_date)) "
-                     + sql_from + sql_where, param)[0][0]
-    if avgdel is int:
-        avgdel = int(avgdel)
+    avgdel = run_sql(avgdel_sql, param)[0][0]
+    if avgdel:
+        avgdel = float(avgdel)
     else:
         avgdel = 0
     # Average time between the date and  the hour the ILL request was sent to
     # the supplier and the date and hour of the delivery item
-    avgsup = run_sql("SELECT AVG(TIMESTAMPDIFF(HOUR, arrival_date, request_date)) "
-                     + sql_from + sql_where, param)[0][0]
-    if avgsup is int:
-        avgsup = int(avgsup)
+    avgsup = run_sql(avgsup_sql, param)[0][0]
+    if avgsup:
+        avgsup = float(avgsup)
     else:
         avgsup = 0
 
     return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
 
 
-def get_keyevent_ill_requests_lists(args):
+def get_keyevent_ill_requests_lists(args, return_sql=False, limit=50):
     """
     Lists:
     - List of ILL requests
     Filter by
     - in a specified time span
     - by type of request (article or book)
     - by supplier
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['doctype']: type of request (article or book)
     @type args['doctype']: str
 
     @param args['supplier']: supplier
     @type args['supplier']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcILLREQUEST ill "
-    sql_where = "WHERE request_date > %s AND request_date < %s "
+    sql_where = "WHERE status != 'cancelled' AND request_date > %s AND request_date < %s " #FIXME: change 'cancelled' to CFG variable
 
     param = [lower, upper]
 
     if 'doctype' in args and args['doctype'] != '':
-        sql_where += "AND  ill.request_type=%s"
+        sql_where += "AND ill.request_type=%s "
         param.append(args['doctype'])
     if 'supplier' in args and args['supplier'] != '':
         sql_from += ", crcLIBRARY lib "
         sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
         param.append(args['supplier'])
+    param = tuple(param)
 
+    if limit > 0:
+        limit = "LIMIT %d" % limit
+    else:
+        limit = ""
+    sql = "SELECT ill.id, item_info %s %s %s" % (sql_from, sql_where, limit)
+    if return_sql:
+        return sql % param
     # Results:
-    res = [("Title", "Author", "Edition")]
-    for item_info in run_sql("SELECT item_info " + sql_from + sql_where + " LIMIT 100", param):
-        item_info = eval(item_info[0])
+    res = [("Id", "Title", "Author", "Edition")]
+    for req_id, item_info in run_sql(sql, param):
+        item_info = eval(item_info)
         try:
-            res.append((item_info['title'], item_info['authors'], item_info['edition']))
+            res.append((req_id, item_info['title'], item_info['authors'], item_info['edition']))
         except KeyError:
-            None
+            pass
     return (res)
 
 
-def get_keyevent_trend_satisfied_ill_requests_percentage(args):
+def get_keyevent_trend_satisfied_ill_requests_percentage(args, return_sql=False):
     """
-    Returns the number of satisfied ILL requests 3 months after the date of request
+    Returns the number of satisfied ILL requests 2 weeks after the date of request
     creation and the total number of ILL requests
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['doctype']: type of document (book or article)
     @type args['doctype']: str
 
     @param args['status']: status of the request (= new, sent, etc.)
     @type args['status']: str
 
     @param args['supplier']: supplier
     @type args['supplier']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
-    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
-    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
-
-    sql_from = "FROM crcILLREQUEST ill "
-    sql_where = "WHERE request_date > %s AND request_date < %s "
-    param = [lower, upper]
+    sql_from = "crcILLREQUEST ill "
+    sql_where = ""
+    param = []
 
-    if 'user_address' in args and args['user_address'] != '':
-        sql_from += ", crcBORROWER bor "
-        sql_where += "AND ill.id_crcBORROWER = bor.id AND bor.address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
     if 'doctype' in args and args['doctype'] != '':
-        sql_where += "AND  ill.request_type=%s"
+        sql_where += "AND ill.request_type=%s"
         param.append(args['doctype'])
     if 'status' in args and args['status'] != '':
         sql_where += "AND ill.status = %s "
         param.append(args['status'])
+    else:
+        sql_where += "AND ill.status != %s "
+        param.append("cancelled") #FIXME: change to CFG variable
     if 'supplier' in args and args['supplier'] != '':
         sql_from += ", crcLIBRARY lib "
         sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
         param.append(args['supplier'])
 
     # SQL to determine satisfied ILL requests:
-    sql = "SELECT request_date " + sql_from + sql_where + \
-            "AND ADDDATE(request_date, 90) < NOW() AND (arrival_date != '0000-00-00 00:00:00' " + \
-            "OR arrival_date < ADDDATE(request_date, 90)) ORDER BY request_date DESC"
-    satisfied = [x[0] for x in run_sql(sql, param)]
+    satisfied = _get_sql_query("request_date", args["granularity"], sql_from,
+                  conditions="ADDDATE(request_date, 14) < NOW() AND \
+(arrival_date IS NULL OR arrival_date < ADDDATE(request_date, 14)) " + sql_where)
 
     # SQL to determine all ILL requests:
-    sql = "SELECT request_date " + sql_from + sql_where + \
-           " AND ADDDATE(request_date, 90) < NOW() ORDER BY request_date DESC"
-    total = [x[0] for x in run_sql(sql, param)]
+    total = _get_sql_query("request_date", args["granularity"], sql_from,
+                  conditions="ADDDATE(request_date, 14) < NOW() "+ sql_where)
 
     # Compute the trend for both types
-    s_trend = _get_trend_from_actions(satisfied, 0, args['t_start'],
-                         args['t_end'], args['granularity'], args['t_format'])
-    a_trend = _get_trend_from_actions(total, 0, args['t_start'],
-                         args['t_end'], args['granularity'], args['t_format'])
+    s_trend = _get_keyevent_trend(args, satisfied, extra_param=param,
+                        return_sql=return_sql, sql_text="Satisfied: %s")
+    t_trend = _get_keyevent_trend(args, total, extra_param=param,
+                        return_sql=return_sql, sql_text="Total: %s")
 
     # Assemble, according to return type
-    return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
+    if return_sql:
+        return "%s <br /> %s" % (s_trend, t_trend)
+    return [(s_trend[i][0], (s_trend[i][1], t_trend[i][1]))
             for i in range(len(s_trend))]
 
 
-def get_keyevent_items_statistics(args):
+def get_keyevent_items_statistics(args, return_sql=False):
     """
     Data:
     - The total number of items
     - Total number of new items added in last year
     Filter by
     - in a specified time span
     - by collection
     - by UDC (see MARC field 080__a - list to be submitted)
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['udc']: MARC field 080__a
     @type args['udc']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcITEM i "
     sql_where = "WHERE "
 
     param = []
 
     if 'udc' in args and args['udc'] != '':
-        sql_where += "i.id_bibrec IN ( SELECT brb.id_bibrec \
-                  FROM bibrec_bib08x brb, bib08x b \
-                  WHERE brb.id_bibxxx = b.id AND tag='080__a' \
-                  AND value LIKE %s)"
-        param.append('%%%s%%' % args['udc'])
+        sql_where += "i." + _check_udc_value_where()
+        param.append(_get_udc_truncated(args['udc']))
 
     # Number of items:
     if sql_where == "WHERE ":
         sql_where = ""
-    items = run_sql("SELECT COUNT(i.id_bibrec) " + sql_from + sql_where, param)[0][0]
+    items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
 
     # Number of new items:
-    param += [lower, upper]
     if sql_where == "":
         sql_where = "WHERE creation_date > %s AND creation_date < %s "
     else:
         sql_where += " AND creation_date > %s AND creation_date < %s "
-    new_items = run_sql("SELECT COUNT(i.id_bibrec) " + sql_from + sql_where, param)[0][0]
+    new_items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
 
-    return ((items, ), (new_items, ))
+    if return_sql:
+        return "Total: %s <br />New: %s" % (items_sql % tuple(param), new_items_sql % tuple(param + [lower, upper]))
+    return ((run_sql(items_sql, tuple(param))[0][0], ), (run_sql(new_items_sql, tuple(param + [lower, upper]))[0][0], ))
 
 
-def get_keyevent_items_lists(args):
+def get_keyevent_items_lists(args, return_sql=False, limit=50):
     """
     Lists:
     - The list of items
     Filter by
     - by library (=physical location of the item)
     - by status (=on loan, available, requested, missing...)
 
     @param args['library']: physical location of the item
     @type args[library'']: str
 
     @param args['status']: on loan, available, requested, missing...
     @type args['status']: str
     """
 
     sql_from = "FROM crcITEM i "
     sql_where = "WHERE "
 
     param = []
 
     if 'library' in args and args['library'] != '':
         sql_from += ", crcLIBRARY li "
         sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
         param.append(args['library'])
 
     if 'status' in args and args['status'] != '':
         if sql_where != "WHERE ":
             sql_where += "AND "
         sql_where += "i.status = %s "
         param.append(args['status'])
-
+    param = tuple(param)
     # Results:
     res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
     if sql_where == "WHERE ":
         sql_where = ""
+    if limit > 0:
+        limit = "LIMIT %d" % limit
+    else:
+        limit = ""
+    sql = "SELECT i.barcode, i.id_bibrec %s %s %s" % (sql_from, sql_where, limit)
     if len(param) == 0:
-        sqlres = run_sql("SELECT i.barcode, i.id_bibrec " +
-                       sql_from + sql_where + " LIMIT 100")
+        sqlres = run_sql(sql)
     else:
-        sqlres = run_sql("SELECT i.barcode, i.id_bibrec " +
-                       sql_from + sql_where + " LIMIT 100", tuple(param))
+        sqlres = run_sql(sql, tuple(param))
+        sql = sql % param
+    if return_sql:
+        return sql
+
     for barcode, rec in sqlres:
         author = get_fieldvalues(rec, "100__a")
         if len(author) > 0:
             author = author[0]
         else:
             author = ""
         edition = get_fieldvalues(rec, "250__a")
         if len(edition) > 0:
             edition = edition[0]
         else:
             edition = ""
         res.append((book_title_from_MARC(rec),
                     author, edition, barcode,
                     book_information_from_MARC(int(rec))[1]))
     return (res)
 
 
-def get_keyevent_loan_request_statistics(args):
+def get_keyevent_loan_request_statistics(args, return_sql=False):
     """
     Data:
     - Number of hold requests, one week after the date of request creation
     - Number of successful hold requests transactions
     - Average time between the hold request date and the date of delivery document  in a year
     Filter by
     - in a specified time span
     - by item status (available, missing)
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['item_status']: available, missing...
     @type args['item_status']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcLOANREQUEST lr "
     sql_where = "WHERE request_date > %s AND request_date < %s "
 
     param = [lower, upper]
 
     if 'item_status' in args and args['item_status'] != '':
         sql_from += ", crcITEM i "
         sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
         param.append(args['item_status'])
+    param = tuple(param)
 
     custom_table = get_customevent_table("loanrequest")
     # Number of hold requests, one week after the date of request creation:
-    holds = run_sql("""SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND
-        DATEDIFF(ws.creation_time, lr.request_date) >= 7""" %
-        (sql_from, custom_table, sql_where), param)[0][0]
+    holds = "SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND \
+DATEDIFF(ws.creation_time, lr.request_date) >= 7" % (sql_from, custom_table, sql_where)
 
     # Number of successful hold requests transactions
-    succesful_holds = run_sql("SELECT COUNT(*) %s %s AND lr.status='done'" %
-                              (sql_from, sql_where), param)[0][0]
+    succesful_holds = "SELECT COUNT(*) %s %s AND lr.status='done'" % (sql_from, sql_where)
 
     # Average time between the hold request date and the date of delivery document in a year
-    avg = run_sql("""SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date))
-        %s, %s ws %s AND ws.request_id=lr.id""" %
-        (sql_from, custom_table, sql_where), param)[0][0]
+    avg_sql = "SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date)) \
+%s, %s ws %s AND ws.request_id=lr.id" % (sql_from, custom_table, sql_where)
 
+    if return_sql:
+        return "<ol><li>%s</li><li>%s</li><li>%s</li></ol>" % \
+            (holds % param, succesful_holds % param, avg_sql % param)
+    avg = run_sql(avg_sql, param)[0][0]
     if avg is int:
         avg = int(avg)
     else:
         avg = 0
-    return ((holds, ), (succesful_holds, ), (avg, ))
+    return ((run_sql(holds, param)[0][0], ),
+        (run_sql(succesful_holds, param)[0][0], ), (avg, ))
 
 
-def get_keyevent_loan_request_lists(args):
+def get_keyevent_loan_request_lists(args, return_sql=False, limit=50):
     """
     Lists:
     - List of the most requested items
     Filter by
     - in a specified time span
     - by UDC (see MARC field 080__a - list to be submitted)
-    - by user address (=Department)
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['udc']: MARC field 080__a
     @type args['udc']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from = "FROM crcLOANREQUEST lr "
     sql_where = "WHERE request_date > %s AND request_date < %s "
 
     param = [lower, upper]
 
-    if 'user_address' in args and args['user_address'] != '':
-        sql_from += ", crcBORROWER bor "
-        sql_where += "AND lr.id_crcBORROWER = bor.id AND bor.address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
     if 'udc' in args and args['udc'] != '':
-        sql_where += "AND lr.id_bibrec IN ( SELECT brb.id_bibrec \
-                  FROM bibrec_bib08x brb, bib08x b \
-                  WHERE brb.id_bibxxx = b.id AND tag='080__a' \
-                  AND value LIKE %s)"
-        param.append('%%%s%%' % args['udc'])
-
+        sql_where += "AND lr." + _check_udc_value_where()
+        param.append(_get_udc_truncated(args['udc']))
+    if limit > 0:
+        limit = "LIMIT %d" % limit
+    else:
+        limit = ""
+    sql = "SELECT lr.barcode %s %s GROUP BY barcode \
+ORDER BY COUNT(*) DESC %s" % (sql_from, sql_where, limit)
+    if return_sql:
+        return sql
     res = [("Title", "Author", "Edition", "Barcode")]
 
     # Most requested items:
-    for barcode in run_sql("SELECT lr.barcode " + sql_from + sql_where +
-                           " GROUP BY barcode ORDER BY COUNT(*) DESC", param):
+    for barcode in run_sql(sql, param):
         rec = get_id_bibrec(barcode[0])
         author = get_fieldvalues(rec, "100__a")
         if len(author) > 0:
             author = author[0]
         else:
             author = ""
         edition = get_fieldvalues(rec, "250__a")
         if len(edition) > 0:
             edition = edition[0]
         else:
             edition = ""
         res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
 
     return (res)
 
 
-def get_keyevent_user_statistics(args):
+def get_keyevent_user_statistics(args, return_sql=False):
     """
     Data:
     - Total number of  active users (to be defined = at least one transaction in the past year)
     Filter by
     - in a specified time span
-    - by user address
     - by registration date
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     sql_from_ill = "FROM crcILLREQUEST ill "
     sql_from_loan = "FROM crcLOAN l "
     sql_where_ill = "WHERE request_date > %s AND request_date < %s "
     sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
-    sql_address = ""
-    param = [lower, upper, lower, upper]
-    if 'user_address' in args and args['user_address'] != '':
-        sql_address += ", crcBORROWER bor WHERE id = user AND \
-                       address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
+    param = (lower, upper, lower, upper)
 
     # Total number of  active users:
-    users = run_sql("""SELECT COUNT(DISTINCT user)
-        FROM ((SELECT id_crcBORROWER user %s %s) UNION
-        (SELECT id_crcBORROWER user %s %s)) res %s""" %
-        (sql_from_ill, sql_where_ill, sql_from_loan,
-         sql_where_loan, sql_address), param)[0][0]
+    users = "SELECT COUNT(DISTINCT user) FROM ((SELECT id_crcBORROWER user %s %s) \
+UNION (SELECT id_crcBORROWER user %s %s)) res" % \
+        (sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan)
 
-    return ((users, ), )
+    if return_sql:
+        return users % param
+    return ((run_sql(users, param)[0][0], ), )
 
 
-def get_keyevent_user_lists(args):
+def get_keyevent_user_lists(args, return_sql=False, limit=50):
     """
     Lists:
     - List of most intensive users (ILL requests + Loan)
     Filter by
     - in a specified time span
-    - by user address
     - by registration date
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
-    @param args['user_address']: borrower address
-    @type args['user_address']: str
-
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
     """
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
-    sql_from_ill = "FROM crcILLREQUEST ill "
-    sql_from_loan = "FROM crcLOAN l "
-    sql_where_ill = "WHERE request_date > %s AND request_date < %s "
-    sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
-    sql_address = ""
-    param = [lower, upper, lower, upper]
-    if 'user_address' in args and args['user_address'] != '':
-        sql_address += ", crcBORROWER bor WHERE id = user AND \
-                       address LIKE %s "
-        param.append('%%%s%%' % args['user_address'])
+    param = (lower, upper, lower, upper)
 
+    if limit > 0:
+        limit = "LIMIT %d" % limit
+    else:
+        limit = ""
+    sql = "SELECT user, SUM(trans) FROM \
+((SELECT id_crcBORROWER user, COUNT(*) trans FROM crcILLREQUEST ill \
+WHERE request_date > %%s AND request_date < %%s GROUP BY id_crcBORROWER) UNION \
+(SELECT id_crcBORROWER user, COUNT(*) trans FROM crcLOAN l WHERE loaned_on > %%s AND \
+loaned_on < %%s GROUP BY id_crcBORROWER)) res GROUP BY user ORDER BY SUM(trans) DESC \
+%s" % (limit)
+
+    if return_sql:
+        return sql % param
     res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
 
     # List of most intensive users (ILL requests + Loan):
-    for borrower_id, trans in run_sql("SELECT user, SUM(trans) FROM \
-             ((SELECT id_crcBORROWER user, COUNT(*) trans %s %s GROUP BY id_crcBORROWER) UNION \
-             (SELECT id_crcBORROWER user, COUNT(*) trans %s %s GROUP BY id_crcBORROWER)) res %s \
-             GROUP BY user ORDER BY SUM(trans) DESC"
-    % (sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan, sql_address), param):
+    for borrower_id, trans in run_sql(sql, param):
         name, address, mailbox, email = get_borrower_data(borrower_id)
         res.append((name, address, mailbox, email, int(trans)))
 
     return (res)
 
 # KEY EVENT SNAPSHOT SECTION
 
 def get_keyevent_snapshot_uptime_cmd():
     """
     A specific implementation of get_current_event().
 
     @return: The std-out from the UNIX command 'uptime'.
     @type: str
     """
     return _run_cmd('uptime').strip().replace('  ', ' ')
 
 
 def get_keyevent_snapshot_apache_processes():
     """
     A specific implementation of get_current_event().
 
     @return: The std-out from the UNIX command 'uptime'.
     @type: str
     """
     # The number of Apache processes (root+children)
     return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
 
 
 def get_keyevent_snapshot_bibsched_status():
     """
     A specific implementation of get_current_event().
 
     @return: Information about the number of tasks in the different status modes.
     @type: [(str, int)]
     """
     sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
     return [(x[0], int(x[1])) for x in run_sql(sql)]
 
 
 def get_keyevent_snapshot_sessions():
     """
     A specific implementation of get_current_event().
 
     @return: The current number of website visitors (guests, logged in)
     @type: (int, int)
     """
     # SQL to retrieve sessions in the Guests
     sql = "SELECT COUNT(session_expiry) " + \
           "FROM session INNER JOIN user ON uid=id " + \
           "WHERE email = '' AND " + \
           "session_expiry-%d < unix_timestamp() AND " \
           % WEBSTAT_SESSION_LENGTH + \
           "unix_timestamp() < session_expiry"
     guests = run_sql(sql)[0][0]
 
     # SQL to retrieve sessions in the Logged in users
     sql = "SELECT COUNT(session_expiry) " + \
           "FROM session INNER JOIN user ON uid=id " + \
           "WHERE email <> '' AND " + \
           "session_expiry-%d < unix_timestamp() AND " \
           % WEBSTAT_SESSION_LENGTH + \
           "unix_timestamp() < session_expiry"
     logged_ins = run_sql(sql)[0][0]
 
     # Assemble, according to return type
     return (guests, logged_ins)
 
 
 def get_keyevent_bibcirculation_report(freq='yearly'):
     """
     Monthly and yearly report with the total number of circulation
     transactions (loans, renewals, returns, ILL requests, hold request).
     @param freq: yearly or monthly
     @type freq: str
 
     @return: loans, renewals, returns, ILL requests, hold request
     @type: (int, int, int, int, int)
     """
     if freq == 'monthly':
         datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
     else: #yearly
         datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
-    loans, renewals, returns = run_sql("""SELECT COUNT(*),
-        SUM(number_of_renewals), COUNT(returned_on<>'0000-00-00')
-        FROM crcLOAN WHERE loaned_on > %s""", (datefrom, ))[0]
+    loans, renewals = run_sql("SELECT COUNT(*), \
+SUM(number_of_renewals) \
+FROM crcLOAN WHERE loaned_on > %s", (datefrom, ))[0]
+    returns = run_sql("SELECT COUNT(*) FROM crcLOAN \
+WHERE returned_on!='0000-00-00 00:00:00' and loaned_on > %s", (datefrom, ))[0][0]
     illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
                           (datefrom, ))[0][0]
     holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
                           (datefrom, ))[0][0]
     return (loans, renewals, returns, illrequests, holdrequest)
 
 # ERROR LOG STATS
 
 def update_error_log_analyzer():
     """Creates splitted files for today's errors"""
     _run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
 
 
 def get_invenio_error_log_ranking():
     """ Returns the ranking of the errors in the invenio log"""
     return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
 
 
 def get_invenio_last_n_errors(nerr):
     """Returns the last nerr errors in the invenio log (without details)"""
     return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
 
 
 def get_invenio_error_details(error):
     """Returns the complete text of the invenio error."""
     out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
     return out
 
 
 def get_apache_error_log_ranking():
     """ Returns the ranking of the errors in the apache log"""
     return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
 
 # CUSTOM EVENT SECTION
 
 def get_customevent_trend(args):
     """
     Returns trend data for a custom event over a given
     timestamp range.
 
     @param args['event_id']: The event id
     @type args['event_id']: str
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
 
     @param args['cols']: Columns and it's content that will be include
                          if don't exist or it's empty it will include all cols
     @type args['cols']: [ [ str, str ], ]
     """
     # Get a MySQL friendly date
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
     tbl_name = get_customevent_table(args['event_id'])
     col_names = get_customevent_args(args['event_id'])
 
-    sql_query = ["SELECT creation_time FROM %s WHERE creation_time > '%s'"
-                 % (tbl_name, lower)]
-    sql_query.append("AND creation_time < '%s'" % upper)
-    sql_param = []
+    where = []
+    sql_param = [lower, upper]
     for col_bool, col_title, col_content in args['cols']:
         if not col_title in col_names:
             continue
         if col_content:
-            if col_bool == "and" or col_bool == "":
-                sql_query.append("AND %s"
+            if col_bool == "" or not where:
+                where.append(wash_table_column_name(col_title))
+            elif col_bool == "and":
+                where.append("AND %s"
                                  % wash_table_column_name(col_title))
             elif col_bool == "or":
-                sql_query.append("OR %s"
+                where.append("OR %s"
                                  % wash_table_column_name(col_title))
             elif col_bool == "and_not":
-                sql_query.append("AND NOT %s"
+                where.append("AND NOT %s"
                                  % wash_table_column_name(col_title))
             else:
                 continue
-            sql_query.append(" LIKE %s")
+            where.append(" LIKE %s")
             sql_param.append("%" + col_content + "%")
-    sql_query.append("ORDER BY creation_time DESC")
-    sql = ' '.join(sql_query)
 
-    dates = [x[0] for x in run_sql(sql, tuple(sql_param))]
-    return _get_trend_from_actions(dates, 0, args['t_start'], args['t_end'],
+    sql = _get_sql_query("creation_time", args['granularity'], tbl_name, " ".join(where))
+
+    return _get_trend_from_actions(run_sql(sql, tuple(sql_param)), 0,
+                                   args['t_start'], args['t_end'],
                                    args['granularity'], args['t_format'])
 
 
 def get_customevent_dump(args):
     """
     Similar to a get_event_trend implemention, but NO refining aka frequency
     handling is carried out what so ever. This is just a dump. A dump!
 
     @param args['event_id']: The event id
     @type args['event_id']: str
 
     @param args['t_start']: Date and time of start point
     @type args['t_start']: str
 
     @param args['t_end']: Date and time of end point
     @type args['t_end']: str
 
     @param args['granularity']: Granularity of date and time
     @type args['granularity']: str
 
     @param args['t_format']: Date and time formatting string
     @type args['t_format']: str
 
     @param args['cols']: Columns and it's content that will be include
                          if don't exist or it's empty it will include all cols
     @type args['cols']: [ [ str, str ], ]
     """
     # Get a MySQL friendly date
     lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
     upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
 
     # Get customevents
     # events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
     event_list = []
     event_cols = {}
     for event_id, i in [(args['ids'][i], str(i))
                          for i in range(len(args['ids']))]:
         # Get all the event arguments and creation times
         tbl_name = get_customevent_table(event_id)
         col_names = get_customevent_args(event_id)
 
         sql_query = ["SELECT * FROM %s WHERE creation_time > '%%s'" % wash_table_column_name(tbl_name), (lower,)] # kwalitee: disable=sql
         sql_query.append("AND creation_time < '%s'" % upper)
         sql_param = []
         for col_bool, col_title, col_content in args['cols' + i]:
             if not col_title in col_names:
                 continue
             if col_content:
                 if col_bool == "and" or col_bool == "":
                     sql_query.append("AND %s" % \
                                          wash_table_column_name(col_title))
                 elif col_bool == "or":
                     sql_query.append("OR %s" % \
                                          wash_table_column_name(col_title))
                 elif col_bool == "and_not":
                     sql_query.append("AND NOT %s" % \
                                          wash_table_column_name(col_title))
                 else:
                     continue
                 sql_query.append(" LIKE %s")
                 sql_param.append("%" + col_content + "%")
         sql_query.append("ORDER BY creation_time DESC")
         sql = ' '.join(sql_query)
         res = run_sql(sql, tuple(sql_param))
 
         for row in res:
             event_list.append((row[1], event_id, row[2:]))
         # Get the event col names
         try:
             event_cols[event_id] = cPickle.loads(run_sql(
                     "SELECT cols FROM staEVENT WHERE id = %s",
                     (event_id, ))[0][0])
         except TypeError:
             event_cols[event_id] = ["Unnamed"]
     event_list.sort()
 
     output = []
     for row in event_list:
         temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
 
         arguments = ["%s: %s" % (event_cols[row[1]][i],
                                  row[2][i]) for i in range(len(row[2]))]
 
         temp.extend(arguments)
         output.append(tuple(temp))
 
     return output
 
 
 def get_customevent_table(event_id):
     """
     Helper function that for a certain event id retrives the corresponding
     event table name.
     """
     res = run_sql(
         "SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
     try:
         return res[0][0]
     except IndexError:
         # No such event table
         return None
 
 
 def get_customevent_args(event_id):
     """
     Helper function that for a certain event id retrives the corresponding
     event argument (column) names.
     """
     res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
     try:
         if res[0][0]:
             return cPickle.loads(res[0][0])
         else:
             return []
     except IndexError:
         # No such event table
         return None
 
 # CUSTOM SUMMARY SECTION
 
 def get_custom_summary_data(query, tag):
     """Returns the annual report data for the specified year
     @param year: Year of publication on the journal
     @type year: int
 
     @param query: Search query to make customized report
     @type query: str
 
     @param tag: MARC tag for the output
     @type tag: str
     """
 
     # Check arguments
     if tag == '':
-        tag = "909C4p"
+        tag = CFG_JOURNAL_TAG.replace("%", "p")
 
     # First get records of the year
     recids = perform_request_search(p=query, of="id")
 
     # Then return list by tag
     pub = list(get_most_popular_field_values(recids, tag))
 
-    sel = 0
-    for elem in pub:
-        sel += elem[1]
     if len(pub) == 0:
         return []
-    if len(recids) - sel != 0:
-        pub.append(('Others', len(recids) - sel))
-    pub.append(('TOTAL', len(recids)))
+    others = 0
+    total = 0
+    first_other = -1
+    for elem in pub:
+        total += elem[1]
+        if elem[1] < 2:
+            if first_other == -1:
+                first_other = pub.index(elem)
+            others += elem[1]
+    del pub[first_other:]
+
+    if others != 0:
+        pub.append(('Others', others))
+    pub.append(('TOTAL', total))
 
     return pub
 
 
 def create_custom_summary_graph(data, path, title):
     """
     Creates a pie chart with the information from the custom summary and
     saves it in the file specified by the path argument
     """
     # If no input, we don't bother about anything
     if len(data) == 0:
         return
     os.environ['HOME'] = CFG_TMPDIR
 
     try:
         import matplotlib
         matplotlib.use('Agg')
         import matplotlib.pyplot as plt
     except ImportError:
         return
     # make a square figure and axes
     matplotlib.rcParams['font.size'] = 8
     labels = [x[0] for x in data]
-    numb_elem = float(len(labels))
-    width = 6 + numb_elem / 7
+    numb_elem = len(labels)
+    width = 6 + float(numb_elem) / 7
     gfile = plt.figure(1, figsize=(width, 6))
 
     plt.axes([0.1, 0.1, 4.2 / width, 0.7])
 
     numb = [x[1] for x in data]
     total = sum(numb)
     fracs = [x * 100 / total for x in numb]
     colors = []
 
     random.seed()
     for i in range(numb_elem):
-        col = 0.5 + float(i) / (numb_elem * 2.0)
+        col = 0.5 + float(i) / (float(numb_elem) * 2.0)
         rand = random.random() / 2.0
         if i % 3 == 0:
             red = col
             green = col + rand
             blue = col - rand
             if green > 1.0:
                 green = 1
         elif i % 3 == 1:
             red = col - rand
             green = col
             blue = col + rand
             if blue > 1.0:
                 blue = 1
         elif i % 3 == 2:
             red = col + rand
             green = col - rand
             blue = col
             if red > 1.0:
                 red = 1
         colors.append((red, green, blue))
     patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
                       autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
     ttext = plt.title(title)
     plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
     legend_keywords = {"prop": {"size": "small"}}
     plt.figlegend(patches, labels, 'lower right', **legend_keywords)
     plt.savefig(path)
     plt.close(gfile)
 
 # GRAPHER
 
 def create_graph_trend(trend, path, settings):
     """
     Creates a graph representation out of data produced from get_event_trend.
 
     @param trend: The trend data
     @type trend: [(str, str|int|(str|int,...))]
 
     @param path: Where to store the graph
     @type path: str
 
     @param settings: Dictionary of graph parameters
     @type settings: dict
     """
     # If no input, we don't bother about anything
-    if len(trend) == 0:
+    if not trend or len(trend) == 0:
         return
 
     # If no filename is given, we'll assume STD-out format and ASCII.
     if path == '':
         settings["format"] = 'asciiart'
     if settings["format"] == 'asciiart':
         create_graph_trend_ascii_art(trend, path, settings)
     elif settings["format"] == 'gnuplot':
         create_graph_trend_gnu_plot(trend, path, settings)
     elif settings["format"] == "flot":
         create_graph_trend_flot(trend, path, settings)
 
 
 def create_graph_trend_ascii_art(trend, path, settings):
     """Creates the graph trend using ASCII art"""
     out = ""
 
     if settings["multiple"] is not None:
         # Tokens that will represent the different data sets (maximum 16 sets)
         # Set index (=100) to the biggest of the histogram sums
         index = max([sum(x[1]) for x in trend])
 
         # Print legend box
         out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
                     for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
     else:
         index = max([x[1] for x in trend])
 
     width = 82
 
-     # Figure out the max length of the xtics, in order to left align
+    # Figure out the max length of the xtics, in order to left align
     xtic_max_len = max([len(_to_datetime(x[0]).strftime(
                     settings["xtic_format"])) for x in trend])
 
     for row in trend:
         # Print the xtic
         xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
         out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
 
         try:
             col_width = (1.0 * width / index)
         except ZeroDivisionError:
             col_width = 0
 
         if settings["multiple"] is not None:
             # The second value of the row-tuple, represents the n values from
             # the n data sets. Each set, will be represented by a different
             # ASCII character, chosen from the randomized string
             # 'WEBSTAT_GRAPH_TOKENS'.
             # NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
             # sets are supported.
             total = sum(row[1])
 
             for i in range(len(row[1])):
                 col = row[1][i]
                 try:
                     out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
                 except ZeroDivisionError:
                     break
             if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
                 out_row += out_row[-1]
 
         else:
             total = row[1]
             try:
                 out_row += '-' * int(1.0 * total * col_width)
             except ZeroDivisionError:
                 break
 
-         # Print sentinel, and the total
+        # Print sentinel, and the total
         out += out_row + '>' + ' ' * (xtic_max_len + 4 +
                                     width - len(out_row)) + str(total) + '\n'
 
-     # Write to destination file
+    # Write to destination file
     if path == '':
         print out
     else:
         open(path, 'w').write(out)
 
 
 def create_graph_trend_gnu_plot(trend, path, settings):
     """Creates the graph trend using the GNU plot library"""
     try:
         import Gnuplot
     except ImportError:
         return
 
     gnup = Gnuplot.Gnuplot()
 
     gnup('set style data linespoints')
     if 'size' in settings:
         gnup('set terminal png tiny size %s' % settings['size'])
     else:
         gnup('set terminal png tiny')
     gnup('set output "%s"' % path)
 
     if settings["title"] != '':
         gnup.title(settings["title"].replace("\"", ""))
     if settings["xlabel"] != '':
         gnup.xlabel(settings["xlabel"])
     if settings["ylabel"] != '':
         gnup.ylabel(settings["ylabel"])
 
     if settings["xtic_format"] != '':
         xtics = 'set xtics ('
         xtics += ', '.join(['"%s" %d' %
                             (_to_datetime(trend[i][0], '%Y-%m-%d \
                  %H:%M:%S').strftime(settings["xtic_format"]), i)
                             for i in range(len(trend))]) + ')'
         gnup(xtics)
     gnup('set format y "%.0f"')
 
     # If we have multiple data sets, we need to do
     # some magic to make Gnuplot eat it,
     # This is basically a matrix transposition,
     # and the addition of index numbers.
     if settings["multiple"] is not None:
         cols = len(trend[0][1])
         rows = len(trend)
         plot_items = []
         y_max = 0
         y_min = 0
         for col in range(cols):
             data = []
             for row in range(rows):
                 data.append([row, trend[row][1][col]])
             plot_items.append(Gnuplot.PlotItems
                                   .Data(data, title=settings["multiple"][col]))
             tmp_max = max([x[col] for x in data])
             tmp_min = min([x[col] for x in data])
             if tmp_max > y_max:
                 y_max = tmp_max
             if tmp_min < y_min:
                 y_min = tmp_min
         if y_max - y_min < 5 and y_min != 0:
             gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
         elif y_max < 5:
             gnup('set ytic 1')
         gnup.plot(*plot_items)
     else:
         data = [x[1] for x in trend]
         y_max = max(data)
         y_min = min(data)
         if y_max - y_min < 5 and y_min != 0:
             gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
         elif y_max < 5:
             gnup('set ytic 1')
         gnup.plot(data)
 
 
 def create_graph_trend_flot(trend, path, settings):
     """Creates the graph trend using the flot library"""
+    size = settings.get("size", "500,400").split(",")
+    title = cgi.escape(settings["title"].replace(" ", "")[:10])
     out = """<!--[if IE]><script language="javascript" type="text/javascript"
                     src="%(site)s/js/excanvas.min.js"></script><![endif]-->
               <script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.min.js"></script>
               <script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.selection.min.js"></script>
               <script id="source" language="javascript" type="text/javascript">
-                     document.write('<div style="float:left"><div id="placeholder" style="width:500px;height:400px"></div></div>'+
-              '<div id="miniature" style="float:left;margin-left:20px;margin-top:50px">' +
-              '<div id="overview" style="width:250px;height:200px"></div>' +
-              '<p id="overviewLegend" style="margin-left:10px"></p>' +
+                     document.write('<div style="float:left"><div id="placeholder%(title)s" style="width:%(width)spx;height:%(height)spx"></div></div>'+
+              '<div id="miniature%(title)s" style="float:left;margin-left:20px;margin-top:50px">' +
+              '<div id="overview%(title)s" style="width:%(hwidth)dpx;height:%(hheigth)dpx"></div>' +
+              '<p id="overviewLegend%(title)s" style="margin-left:10px"></p>' +
               '</div>');
                      $(function () {
-                             function parseDate(sdate){
+                             function parseDate%(title)s(sdate){
                                  var div1 = sdate.split(' ');
                                  var day = div1[0].split('-');
                                  var hour = div1[1].split(':');
                                  return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime()
                                  - (new Date().getTimezoneOffset() * 60 * 1000) ;
                              }
-                             function getData() {""" % \
-        {'site': CFG_SITE_URL}
+                             function getData%(title)s() {""" % \
+        {'site': CFG_SITE_URL, 'width': size[0], 'height': size[1], 'hwidth': int(size[0]) / 2,
+         'hheigth': int(size[1]) / 2, 'title': title}
+    if(len(trend) > 1):
+        granularity_td = (_to_datetime(trend[1][0], '%Y-%m-%d %H:%M:%S') -
+                        _to_datetime(trend[0][0], '%Y-%m-%d %H:%M:%S'))
+    else:
+        granularity_td = datetime.timedelta()
     # Create variables with the format dn = [[x1,y1], [x2,y2]]
     minx = trend[0][0]
     maxx = trend[0][0]
     if settings["multiple"] is not None:
         cols = len(trend[0][1])
         rows = len(trend)
         first = 0
         for col in range(cols):
             out += """var d%d = [""" % (col)
             for row in range(rows):
                 if(first == 0):
                     first = 1
                 else:
                     out += ", "
                 if trend[row][0] < minx:
                     minx = trend[row][0]
                 if trend[row][0] > maxx:
                     maxx = trend[row][0]
-                out += '[parseDate("%s"),%d]' % \
-                    (_to_datetime(trend[row][0], '%Y-%m-%d \
-                     %H:%M:%S'), trend[row][1][col])
-            out += "];\n"
+                out += '[parseDate%s("%s"),%d]' % \
+                    (title, _to_datetime(trend[row][0], '%Y-%m-%d \
+%H:%M:%S'), trend[row][1][col])
+            out += ", [parseDate%s('%s'), %d]];\n" % (title,
+                    _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') + granularity_td, trend[-1][1][col])
         out += "return [\n"
         first = 0
         for col in range(cols):
             if first == 0:
                 first = 1
             else:
                 out += ", "
             out += '{data : d%d, label : "%s"}' % \
                 (col, settings["multiple"][col])
         out += "];\n}\n"
     else:
         out += """var d1 = ["""
         rows = len(trend)
         first = 0
         for row in range(rows):
             if trend[row][0] < minx:
                 minx = trend[row][0]
             if trend[row][0] > maxx:
                 maxx = trend[row][0]
             if first == 0:
                 first = 1
             else:
                 out += ', '
-            out += '[parseDate("%s"),%d]' % \
-                (_to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
+            out += '[parseDate%s("%s"),%d]' % \
+                (title, _to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
                  trend[row][1])
-        out += """];
+        out += """, [parseDate%s("%s"), %d]];
                      return [d1];
                       }
-            """
-
+            """ % (title, _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') +
+                   granularity_td, trend[-1][1])
 
     # Set options
-    tics = ""
-    if settings["xtic_format"] != '':
-        tics = 'xaxis: { mode:"time",min:parseDate("%s"),max:parseDate("%s")},'\
-            % (_to_datetime(minx, '%Y-%m-%d %H:%M:%S'),
-               _to_datetime(maxx, '%Y-%m-%d %H:%M:%S'))
-    tics += """
-        yaxis: {
+    tics = """yaxis: {
                 tickDecimals : 0
-        },
-        """
-    out += """var options ={
+        },"""
+    if settings["xtic_format"] != '':
+        tics = 'xaxis: { mode:"time",min:parseDate%s("%s"),max:parseDate%s("%s")},'\
+            % (title, _to_datetime(minx, '%Y-%m-%d %H:%M:%S'), title,
+               _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') + granularity_td)
+
+    out += """var options%s ={
                 series: {
                    lines: { show: true },
                    points: { show: false }
                 },
-                legend: { show : false},
+                legend: {show: false},
                 %s
                 grid: { hoverable: true, clickable: true },
                 selection: { mode: "xy" }
                 };
-                """ % tics
+                """ % (title, tics, )
         # Write the plot method in javascript
 
-    out += """var startData = getData();
-        var plot = $.plot($("#placeholder"), startData, options);
-        var overview = $.plot($("#overview"), startData, {
+    out += """var startData%(title)s = getData%(title)s();
+        var plot%(title)s = $.plot($("#placeholder%(title)s"), startData%(title)s, options%(title)s);
+        var overview%(title)s = $.plot($("#overview%(title)s"), startData%(title)s, {
                  legend: { show: true, container: $("#overviewLegend") },
                  series: {
                     lines: { show: true, lineWidth: 1 },
                     shadowSize: 0
                  },
-                 %s
+                 %(tics)s
                  grid: { color: "#999" },
                  selection: { mode: "xy" }
                });
-               """ % tics
+               """ % {"title": title, "tics": tics}
 
         # Tooltip and zoom
     out += """    function showTooltip(x, y, contents) {
         $('<div id="tooltip">' + contents + '</div>').css( {
             position: 'absolute',
             display: 'none',
             top: y - 5,
             left: x + 10,
             border: '1px solid #fdd',
             padding: '2px',
             'background-color': '#fee',
             opacity: 0.80
         }).appendTo("body").fadeIn(200);
     }
 
     var previousPoint = null;
-    $("#placeholder").bind("plothover", function (event, pos, item) {
+    $("#placeholder%(title)s").bind("plothover", function (event, pos, item) {
 
         if (item) {
             if (previousPoint != item.datapoint) {
                 previousPoint = item.datapoint;
 
                 $("#tooltip").remove();
                 var y = item.datapoint[1];
 
                 showTooltip(item.pageX, item.pageY, y);
             }
         }
         else {
             $("#tooltip").remove();
             previousPoint = null;
         }
     });
 
-    $("#placeholder").bind("plotclick", function (event, pos, item) {
+    $("#placeholder%(title)s").bind("plotclick", function (event, pos, item) {
         if (item) {
             plot.highlight(item.series, item.datapoint);
         }
     });
-        $("#placeholder").bind("plotselected", function (event, ranges) {
+        $("#placeholder%(title)s").bind("plotselected", function (event, ranges) {
         // clamp the zooming to prevent eternal zoom
 
         if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
             ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
         if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
             ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
 
         // do the zooming
-        plot = $.plot($("#placeholder"), startData,
-                      $.extend(true, {}, options, {
+        plot = $.plot($("#placeholder%(title)s"), startData%(title)s,
+                      $.extend(true, {}, options%(title)s, {
                           xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
                           yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
                       }));
 
         // don't fire event on the overview to prevent eternal loop
-        overview.setSelection(ranges, true);
+        overview%(title)s.setSelection(ranges, true);
     });
-    $("#overview").bind("plotselected", function (event, ranges) {
+    $("#overview%(title)s").bind("plotselected", function (event, ranges) {
         plot.setSelection(ranges);
     });
 });
                 </script>
 <noscript>Your browser does not support JavaScript!
-Please, select another output format</noscript>"""
+Please, select another output format</noscript>""" % {'title' : title}
     open(path, 'w').write(out)
 
 
+def get_numeric_stats(data, multiple):
+    """ Returns average, max and min values for data """
+    data = [x[1] for x in data]
+    if data == []:
+        return (0, 0, 0)
+    if multiple:
+        lists = []
+        for i in range(len(data[0])):
+            lists.append([x[i] for x in data])
+        return ([float(sum(x)) / len(x) for x in lists], [max(x) for x in lists],
+                [min(x) for x in lists])
+    else:
+        return (float(sum(data)) / len(data), max(data), min(data))
+
+
 def create_graph_table(data, path, settings):
     """
     Creates a html table representation out of data.
 
     @param data: The data
     @type data: (str,...)
 
     @param path: Where to store the graph
     @type path: str
 
     @param settings: Dictionary of table parameters
     @type settings: dict
     """
     out = """<table border="1">
 """
     if settings['rows'] == []:
         for row in data:
             out += """<tr>
 """
             for value in row:
                 out += """<td>%s</td>
 """ % value
             out += "</tr>"
     else:
         for dta, value in zip(settings['rows'], data):
             out += """<tr>
                  <td>%s</td>
                  <td>
 """ % dta
             for vrow in value:
                 out += """%s<br />
                         """ % vrow
                 out = out[:-6] + "</td></tr>"
     out += "</table>"
     open(path, 'w').write(out)
 
 
 def create_graph_dump(dump, path):
     """
     Creates a graph representation out of data produced from get_event_trend.
 
     @param dump: The dump data
     @type dump: [(str|int,...)]
 
     @param path: Where to store the graph
     @type path: str
     """
     out = ""
 
     if len(dump) == 0:
         out += "No actions for this custom event " + \
             "are registered in the given time range."
     else:
         # Make every row in dump equally long, insert None if appropriate.
         max_len = max([len(x) for x in dump])
         events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
 
         cols = ["Event", "Date and time"] + ["Argument %d" % i
                                              for i in range(max_len - 2)]
 
         column_widths = [max([len(str(x[i])) \
                     for x in events + [cols]]) + 3 for i in range(len(events[0]))]
 
         for i in range(len(cols)):
             out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
         out += "\n"
         for i in range(len(cols)):
             out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
         out += "\n\n"
 
         for action in dump:
             for i in range(len(action)):
                 if action[i] is None:
                     temp = ''
                 else:
                     temp = action[i]
                 out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
             out += "\n"
 
     # Write to destination file
     if path == '':
         print out
     else:
         open(path, 'w').write(out)
 
 # EXPORT DATA TO SLS
 
 def get_search_frequency(day=datetime.datetime.now().date()):
     """Returns the number of searches performed in the chosen day"""
     searches = get_keyevent_trend_search_type_distribution(get_args(day))
     return sum(searches[0][1])
 
 
 def get_total_records(day=datetime.datetime.now().date()):
     """Returns the total number of records which existed in the chosen day"""
     tomorrow = (datetime.datetime.now() +
                 datetime.timedelta(days=1)).strftime("%Y-%m-%d")
     args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
             't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
     try:
         return get_keyevent_trend_collection_population(args)[0][1]
     except IndexError:
         return 0
 
 
 def get_new_records(day=datetime.datetime.now().date()):
     """Returns the number of new records submitted in the chosen day"""
     args = {'collection': CFG_SITE_NAME,
             't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
             't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
             't_format': "%Y-%m-%d"}
     try:
         return (get_total_records(day) -
              get_keyevent_trend_collection_population(args)[0][1])
     except IndexError:
         return 0
 
 
 def get_download_frequency(day=datetime.datetime.now().date()):
     """Returns the number of downloads during the chosen day"""
     return get_keyevent_trend_download_frequency(get_args(day))[0][1]
 
 
 def get_comments_frequency(day=datetime.datetime.now().date()):
     """Returns the number of comments during the chosen day"""
     return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
 
 
 def get_loans_frequency(day=datetime.datetime.now().date()):
     """Returns the number of comments during the chosen day"""
     return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
 
 
 def get_web_submissions(day=datetime.datetime.now().date()):
     """Returns the number of web submissions during the chosen day"""
     args = get_args(day)
     args['doctype'] = 'all'
     return get_keyevent_trend_web_submissions(args)[0][1]
 
 
 def get_alerts(day=datetime.datetime.now().date()):
     """Returns the number of alerts during the chosen day"""
     args = get_args(day)
     args['cols'] = [('', '', '')]
     args['event_id'] = 'alerts'
     return get_customevent_trend(args)[0][1]
 
 
 def get_journal_views(day=datetime.datetime.now().date()):
     """Returns the number of journal displays during the chosen day"""
     args = get_args(day)
     args['cols'] = [('', '', '')]
     args['event_id'] = 'journals'
     return get_customevent_trend(args)[0][1]
 
 
 def get_basket_views(day=datetime.datetime.now().date()):
     """Returns the number of basket displays during the chosen day"""
     args = get_args(day)
     args['cols'] = [('', '', '')]
     args['event_id'] = 'baskets'
     return get_customevent_trend(args)[0][1]
 
 
 def get_args(day):
     """Returns the most common arguments for the exporting to SLS methods"""
     return {'t_start': day.strftime("%Y-%m-%d"),
             't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
          'granularity': "day", 't_format': "%Y-%m-%d"}
 
 # EXPORTER
 
 def export_to_python(data, req):
     """
     Exports the data to Python code.
 
     @param data: The Python data that should be exported
     @type data: []
 
     @param req: The Apache request object
     @type req:
     """
     _export("text/x-python", str(data), req)
 
 
 def export_to_csv(data, req):
     """
     Exports the data to CSV.
 
     @param data: The Python data that should be exported
     @type data: []
 
     @param req: The Apache request object
     @type req:
     """
     csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
                  ((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
     _export('text/csv', '\n'.join(csv_list), req)
 
 
-def export_to_excel(data, req):
+def export_to_file(data, req):
     """
-    Exports the data to excel.
+    Exports the data to a file.
 
     @param data: The Python data that should be exported
     @type data: []
 
     @param req: The Apache request object
     @type req:
     """
-    if not xlwt_imported:
-        raise Exception("Module xlwt not installed")
-    book = xlwt.Workbook(encoding="utf-8")
-    sheet1 = book.add_sheet('Sheet 1')
-    for row in range(0, len(data)):
-        for col in range(0, len(data[row])):
-            sheet1.write(row, col, "%s" % data[row][col])
-    filename = CFG_TMPDIR + "/webstat_export_" + \
-        str(time.time()).replace('.', '') + '.xls'
-    book.save(filename)
-    redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
+    try:
+        import xlwt
+        book = xlwt.Workbook(encoding="utf-8")
+        sheet1 = book.add_sheet('Sheet 1')
+        for row in range(0, len(data)):
+            for col in range(0, len(data[row])):
+                sheet1.write(row, col, "%s" % data[row][col])
+        filename = CFG_TMPDIR + "/webstat_export_" + \
+            str(time.time()).replace('.', '') + '.xls'
+        book.save(filename)
+        redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
                         % (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
+    except ImportError:
+        csv_list = []
+        for row in data:
+            row = ['"%s"' % str(col) for col in row]
+            csv_list.append(",".join(row))
+        _export('text/csv', '\n'.join(csv_list), req)
 # INTERNAL
 
 def _export(mime, content, req):
     """
     Helper function to pass on the export call. Create a
     temporary file in which the content is stored, then let
     redirect to the export web interface.
     """
     filename = CFG_TMPDIR + "/webstat_export_" + \
         str(time.time()).replace('.', '')
     open(filename, 'w').write(content)
     redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
                         % (CFG_SITE_URL, os.path.basename(filename), mime))
 
 
 def _get_trend_from_actions(action_dates, initial_value,
-                            t_start, t_end, granularity, dt_format):
+                            t_start, t_end, granularity, dt_format, acumulative=False):
     """
     Given a list of dates reflecting some sort of action/event, and some additional parameters,
     an internal data format is returned. 'initial_value' set to zero, means that the frequency
     will not be accumulative, but rather non-causal.
 
     @param action_dates: A list of dates, indicating some sort of action/event.
     @type action_dates: [datetime.datetime]
 
     @param initial_value: The numerical offset the first action's value should make use of.
     @type initial_value: int
 
-    @param t_start: Start time for the time domain in format %Y-%m-%d %H:%M:%S
+    @param t_start: Start time for the time domain in dt_format
     @type t_start: str
 
-    @param t_stop: End time for the time domain in format %Y-%m-%d %H:%M:%S
-    @type t_stop: str
+    @param t_end: End time for the time domain in dt_format
+    @type t_end: str
 
     @param granularity: The granularity of the time domain, span between values.
                         Possible values are [year,month,day,hour,minute,second].
     @type granularity: str
 
     @param dt_format: Format of the 't_start' and 't_stop' parameters
     @type dt_format: str
 
     @return: A list of tuples zipping a time-domain and a value-domain
     @type: [(str, int)]
     """
     # Append the maximum date as a sentinel indicating we're done
-    action_dates.insert(0, datetime.datetime.max)
-
-    # Create an iterator running from the first day of activity
-    dt_iter = _get_datetime_iter(t_start, granularity, dt_format)
+    action_dates = list(action_dates)
 
     # Construct the datetime tuple for the stop time
     stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
 
-    # If our t_start is more recent than the initial action_dates, we need to
-    # drop those.
-    t_start_dt = _to_datetime(t_start, dt_format)
-    while action_dates[-1] < t_start_dt:
-        action_dates = action_dates[:-1]
-
     vector = [(None, initial_value)]
-    # pylint: disable=E1101
-    old = dt_iter.next()
-    # pylint: enable=E1101
-    upcoming_action = action_dates.pop()
 
-    for current in dt_iter:
+    try:
+        upcoming_action = action_dates.pop()
+        #Do not count null values (when year, month or day is 0)
+        if granularity in ("year", "month", "day") and upcoming_action[0] == 0:
+            upcoming_action = action_dates.pop()
+    except IndexError:
+        upcoming_action = (datetime.datetime.max, 0)
+
+    # Create an iterator running from the first day of activity
+    for current in _get_datetime_iter(t_start, granularity, dt_format):
         # Counter of action_dates in the current span, set the initial value to
         # zero to avoid accumlation.
-        if initial_value != 0:
+        if acumulative:
             actions_here = vector[-1][1]
         else:
             actions_here = 0
-
         # Check to see if there's an action date in the current span
-        while old <= upcoming_action < current:
-            actions_here += 1
+        if upcoming_action[0] == {"year": current.year,
+            "month": current.month,
+            "day": current.day,
+            "hour": current.hour,
+            "minute": current.minute,
+            "second": current.second
+            }[granularity]:
+            actions_here += upcoming_action[1]
             try:
                 upcoming_action = action_dates.pop()
             except IndexError:
-                upcoming_action = datetime.datetime.max
+                upcoming_action = (datetime.datetime.max, 0)
 
-        vector.append((old.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
-        old = current
+        vector.append((current.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
 
         # Make sure to stop the iteration at the end time
-        if current > stop_at:
+        if {"year": current.year >= stop_at.year,
+            "month": current.month >= stop_at.month and current.year == stop_at.year,
+            "day": current.day >= stop_at.day and current.month == stop_at.month,
+            "hour": current.hour >= stop_at.hour and current.day == stop_at.day,
+            "minute": current.minute >= stop_at.minute and current.hour == stop_at.hour,
+            "second": current.second >= stop_at.second and current.minute == stop_at.minute
+            }[granularity]:
             break
-
     # Remove the first bogus tuple, and return
     return vector[1:]
 
 
+def _get_keyevent_trend(args, sql, initial_quantity=0, extra_param=[],
+                        return_sql=False, sql_text='%s', acumulative=False):
+    """
+    Returns the trend for the sql passed in the given timestamp range.
+
+    @param args['t_start']: Date and time of start point
+    @type args['t_start']: str
+
+    @param args['t_end']: Date and time of end point
+    @type args['t_end']: str
+
+    @param args['granularity']: Granularity of date and time
+    @type args['granularity']: str
+
+    @param args['t_format']: Date and time formatting string
+    @type args['t_format']: str
+    """
+    # collect action dates
+    lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
+    upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
+    param = tuple([lower, upper] + extra_param)
+    if return_sql:
+        sql = sql % param
+        return sql_text % sql
+
+    return _get_trend_from_actions(run_sql(sql, param), initial_quantity, args['t_start'],
+                          args['t_end'], args['granularity'], args['t_format'], acumulative)
+
+
 def _get_datetime_iter(t_start, granularity='day',
                        dt_format='%Y-%m-%d %H:%M:%S'):
     """
     Returns an iterator over datetime elements starting at an arbitrary time,
     with granularity of a [year,month,day,hour,minute,second].
 
     @param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
     @type t_start: str
 
     @param granularity: The span between iterable elements, default is 'days'.
                         Possible values are [year,month,day,hour,minute,second].
     @type granularity: str
 
     @param format: Format of the 't_start' parameter
     @type format: str
 
     @return: An iterator of points in time
     @type: iterator over datetime elements
     """
     tim = _to_datetime(t_start, dt_format)
 
     # Make a time increment depending on the granularity and the current time
     # (the length of years and months vary over time)
     span = ""
     while True:
         yield tim
 
         if granularity == "year":
             span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
         elif granularity == "month":
             span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
         elif granularity == "day":
             span = "days=1"
         elif granularity == "hour":
             span = "hours=1"
         elif granularity == "minute":
             span = "minutes=1"
         elif granularity == "second":
             span = "seconds=1"
         else:
             # Default just in case
             span = "days=1"
 
         tim += eval("datetime.timedelta(" + span + ")")
 
 
 def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
     """
     Transforms a string into a datetime
     """
     return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
 
 
 def _run_cmd(command):
     """
     Runs a certain command and returns the string output. If the command is
     not found a string saying so will be returned. Use with caution!
 
     @param command: The UNIX command to execute.
     @type command: str
 
     @return: The std-out from the command.
     @type: str
     """
     return commands.getoutput(command)
 
 
 def _get_doctypes():
     """Returns all the possible doctypes of a new submission"""
     doctypes = [("all", "All")]
     for doctype in get_docid_docname_alldoctypes():
         doctypes.append(doctype)
     return doctypes
 
 
 def _get_item_statuses():
     """Returns all the possible status of an item"""
     return [("available", "Available"), ("requested", "Requested"),
             ("on loan", "On loan"), ("missing", "Missing")]
 
 
 def _get_item_doctype():
     """Returns all the possible types of document for an item"""
     dts = []
     for dat in run_sql("""SELECT DISTINCT(request_type)
         FROM crcILLREQUEST ORDER BY request_type ASC"""):
         dts.append((dat[0], dat[0]))
     return dts
 
 
 def _get_request_statuses():
     """Returns all the possible statuses for an ILL request"""
     dts = []
     for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
         dts.append((dat[0], dat[0]))
     return dts
 
 
 def _get_libraries():
     """Returns all the possible libraries"""
     dts = []
     for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
-        dts.append((dat[0], dat[0]))
+        if not CFG_CERN_SITE or not "CERN" in dat[0]: # do not add internal libraries for CERN site
+            dts.append((dat[0], dat[0]))
     return dts
 
 
 def _get_loan_periods():
     """Returns all the possible loan periods for an item"""
     dts = []
     for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
         dts.append((dat[0], dat[0]))
     return dts
 
 
 def _get_tag_name(tag):
     """
     For a specific MARC tag, it returns the human-readable name
     """
     res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag + '%',))
     if res:
         return res[0][0]
     res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag[:-1] + '%',))
     if res:
         return res[0][0]
     return ''
+
+def _get_collection_recids_for_sql_query(coll):
+    ids = get_collection_reclist(coll).tolist()
+    if len(ids) == 0:
+        return ""
+    return "id_bibrec IN %s" % str(ids).replace('[', '(').replace(']', ')')
+
+def _check_udc_value_where():
+    return "id_bibrec IN (SELECT brb.id_bibrec \
+FROM bibrec_bib08x brb, bib08x b WHERE brb.id_bibxxx = b.id AND tag='080__a' \
+AND value LIKE %s) "
+
+def _get_udc_truncated(udc):
+    if udc[-1] == '*':
+        return "%s%%" % udc[:-1]
+    if udc[0] == '*':
+        return "%%%s" % udc[1:]
+    return "%s" % udc
+
+def _check_empty_value(value):
+    if len(value) == 0:
+        return ""
+    else:
+        return value[0][0]
+
+def _get_granularity_sql_functions(granularity):
+    try:
+        return {
+            "year": ("YEAR",),
+            "month": ("YEAR", "MONTH",),
+            "day": ("MONTH", "DAY",),
+            "hour": ("DAY", "HOUR",),
+            "minute": ("HOUR", "MINUTE",),
+            "second": ("MINUTE", "SECOND")
+            }[granularity]
+    except KeyError:
+        return ("MONTH", "DAY",)
+
+def _get_sql_query(creation_time_name, granularity, tables_from, conditions="",
+                   extra_select="", dates_range_param="", group_by=True, count=True):
+    if len(dates_range_param) == 0:
+        dates_range_param = creation_time_name
+    conditions = "%s > %%s AND %s < %%s %s" % (dates_range_param, dates_range_param,
+                                    len(conditions) > 0 and "AND %s" % conditions or "")
+    values = {'creation_time_name': creation_time_name,
+         'granularity_sql_function': _get_granularity_sql_functions(granularity)[-1],
+         'count': count and ", COUNT(*)" or "",
+         'tables_from': tables_from,
+         'conditions': conditions,
+         'extra_select': extra_select,
+         'group_by': ""}
+    if group_by:
+        values['group_by'] = "GROUP BY "
+        for fun in _get_granularity_sql_functions(granularity):
+            values['group_by'] += "%s(%s), " % (fun, creation_time_name)
+        values['group_by'] = values['group_by'][:-2]
+    return "SELECT %(granularity_sql_function)s(%(creation_time_name)s) %(count)s %(extra_select)s \
+FROM %(tables_from)s WHERE %(conditions)s \
+%(group_by)s \
+ORDER BY %(creation_time_name)s DESC" % values
diff --git a/modules/webstat/lib/webstat_templates.py b/modules/webstat/lib/webstat_templates.py
index cb9129536..2e758cc91 100644
--- a/modules/webstat/lib/webstat_templates.py
+++ b/modules/webstat/lib/webstat_templates.py
@@ -1,837 +1,856 @@
 ## This file is part of Invenio.
 ## Copyright (C) 2007, 2008, 2010, 2011 CERN.
 ##
 ## Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 __revision__ = "$Id$"
 __lastupdated__ = "$Date$"
 
 import datetime, cgi, urllib, os
 from invenio.config import \
      CFG_WEBDIR, \
      CFG_SITE_URL, \
-     CFG_SITE_LANG
-
-from invenio.webstat_engine import xlwt_imported
+     CFG_SITE_LANG, \
+     CFG_SITE_NAME
+from invenio.search_engine import get_coll_sons
 from invenio.webstat_engine import get_invenio_error_details
 
 
 class Template:
 
     def tmpl_welcome(self, ln=CFG_SITE_LANG):
         """
         Generates a welcome page for the Webstat module.
         """
         return """<p>On these pages, you can review measurements of Invenio usage
                      and performance. Output is available in several formats, and its
                      raw data can be exported for offline processing. Further on, a general
                      overview is presented below under the label Current System Health.</p>"""
 
-    def tmpl_system_health(self, health_statistics, ln=CFG_SITE_LANG):
+    def tmpl_system_health_list(self, ln=CFG_SITE_LANG):
         """
         Generates a box with current information from the system providing the administrator
         an easy way of overlooking the 'health', i.e. the current performance/efficency, of
         the system.
         """
-        out = """<h3>Current system health</h3>"""
+        return """<h3>Current system health</h3>
+            See <a href="%s/stats/system_health%s">current system health</a>""" % \
+            (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '')
 
+    def tmpl_system_health(self, health_statistics, ln=CFG_SITE_LANG):
         temp_out = ""
         for statistic in health_statistics:
             if statistic is None:
                 temp_out += '\n'
             elif statistic[1] is None:
                 temp_out += statistic[0] + '\n'
             else:
                 temp_out += statistic[0] + \
                             '.' * (85 - len(str(statistic[0])) - len(str(statistic[1]))) + \
                             str(statistic[1]) + '\n'
 
-        out += "<pre>" + temp_out + "</pre>"
-
-        return out
+        return "<pre>" + temp_out + "</pre>"
 
     def tmpl_keyevent_list(self, ln=CFG_SITE_LANG):
         """
         Generates a list of available key statistics.
         """
         return """<h3>Key statistics</h3>
                   <p>Please choose a statistic from below to review it in detail.</p>
                   <ul>
                     <li><a href="%(CFG_SITE_URL)s/stats/collection_population%(ln_link)s">Collection population</a></li>
+                    <li><a href="%(CFG_SITE_URL)s/stats/new_records%(ln_link)s">New records</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/search_frequency%(ln_link)s">Search frequency</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/search_type_distribution%(ln_link)s">Search type distribution</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/download_frequency%(ln_link)s">Download frequency</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/comments_frequency%(ln_link)s">Comments frequency</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/number_of_loans%(ln_link)s">Number of loans</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/web_submissions%(ln_link)s">Web submissions</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/loans_stats%(ln_link)s">Loan statistics</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/loans_lists%(ln_link)s">Loan lists</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/renewals_lists%(ln_link)s">Renewals lists</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/returns_table%(ln_link)s">Number of overdue returns</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/returns_graph%(ln_link)s">Percentage of overdue returns</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/ill_requests_stats%(ln_link)s">ILL Requests statistics</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/ill_requests_lists%(ln_link)s">ILL Requests list</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/ill_requests_graph%(ln_link)s">Percentage of satisfied ILL requests</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/items_stats%(ln_link)s">Items statistics</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/items_list%(ln_link)s">Items list</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/loans_requests%(ln_link)s">Hold requests statistics</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/loans_request_lists%(ln_link)s">Hold requests lists</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/user_stats%(ln_link)s">User statistics</a></li>
                     <li><a href="%(CFG_SITE_URL)s/stats/user_lists%(ln_link)s">User lists</a></li>
                   </ul>""" % {'CFG_SITE_URL': CFG_SITE_URL,
                               'ln_link': (CFG_SITE_LANG != ln and '?ln=' + ln) or ''}
 
     def tmpl_customevent_list(self, customevents, ln=CFG_SITE_LANG):
         """
         Generates a list of available custom statistics.
         """
         out = """<h3>Custom events</h3>
                  <p>The Webstat module supplies a mean for the administrators of Invenio
                  to define their own custom events, more abstract than the Key Statistics above.
                  A technical walk-through how to create these, is available <a href="%s/stats/customevent_help">here</a>.
                  When a custom event has been made available, it is displayed below.</p>
                  """ % CFG_SITE_URL
 
 
         temp_out = ""
         for event in customevents:
             temp_out += """<li><a href="%s/stats/customevent?ids=%s">%s</a></li>""" \
                         % (CFG_SITE_URL, event[0], (event[1] is None) and event[0] or event[1])
         if len(customevents) == 0:
             out += self.tmpl_error("There are currently no custom events available.", ln=ln)
         else:
             out += "<ul>" + temp_out + "</ul>"
 
         return out
 
     def tmpl_loans_statistics(self, ln=CFG_SITE_LANG):
         """
         Generates the tables with the bibcirculation statistics
         """
         out = """<h3>Bibcirculation stats</h3>"""
 
         return out
 
     def tmpl_error_log_statistics_list(self, ln=CFG_SITE_LANG):
         """
         Link to error log analyzer
         """
         return """<h3>Error log statistics</h3>
                  <p>Displays statistics about the last errors in the Invenio and Apache logs</p>
                  <ul><li><a href="%s/stats/error_log%s">Error log analyzer</a></li>
                  </ul>""" % (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '')
 
     def tmpl_error_log_analyzer(self, invenio_ranking, invenio_last_errors, apache_ranking):
         """
         Generates the statistics of the last errors
         """
         out = """<h4>Invenio error log</h4>
                  <h5>Ranking</h5>
                  <pre>%s</pre>
                  <h5>Last errors</h5>
 """ % (cgi.escape(invenio_ranking))
         lines = invenio_last_errors.splitlines()
         error_number = len(lines)
         for line in lines:
             out += """<div>
                           %(line)s<button id="bt_toggle%(error_number)s">Toggle</button>
                           <pre id="txt_error%(error_number)s">%(error_details)s</pre>
                       </div>
                       <script>
                           $("#txt_error%(error_number)s").slideToggle("fast");
                           $("#bt_toggle%(error_number)s").click(function () {
                               $("#txt_error%(error_number)s").slideToggle("fast");
                           });
                       </script>
 
 """ % {'line': cgi.escape(line),
        'error_number': error_number,
        'error_details': cgi.escape(get_invenio_error_details(error_number))}
             error_number -= 1
         out += """<h4>Apache error log</h4>
                   <pre>%s</pre>""" % apache_ranking
         return out
 
     def tmpl_custom_summary(self, ln=CFG_SITE_LANG):
         """
         Link to custom annual report
         """
         return """<h3>Library report</h3>
                  <ul><li><a href="%s/stats/custom_summary">Custom query summary</a></li></ul>
                  """ % CFG_SITE_URL
 
-    def tmpl_collection_stats_list(self, collections, ln=CFG_SITE_LANG):
+    def tmpl_collection_stats_main_list(self, ln=CFG_SITE_LANG):
         """
         Generates a list of available collections statistics.
         """
-        out = """<h3>Collections stats</h3>"""
-
-        temp_out = ""
+        out = """<h3>Collections stats</h3>
+                 <ul>"""
+        for coll in get_coll_sons(CFG_SITE_NAME):
+            out += """<li><a href="%s/stats/collections?%s">%s</a></li>""" \
+                        % (CFG_SITE_URL, urllib.urlencode({'collection': coll}) +
+                           ((CFG_SITE_LANG != ln and '&ln=' + ln) or ''), coll)
+        out += """<li><a href="%s/stats/collection_stats%s">Other collections</a></li>""" \
+                % (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '')
+        return out + "</ul>"
+
+    def tmpl_collection_stats_complete_list(self, collections, ln=CFG_SITE_LANG):
+        if len(collections) == 0:
+            return self.tmpl_error("There are currently no collections available.", ln=ln)
+        temp_out = """<h4>Collections stats</h4>
+                 <ul>"""
         for coll in collections:
             temp_out += """<li><a href="%s/stats/collections?%s">%s</a></li>""" \
-                        % (CFG_SITE_URL, urllib.urlencode({'coll': coll[0]}), coll[1])
-        if len(collections) == 0:
-            out += self.tmpl_error("There are currently no collections available.", ln=ln)
-        else:
-            out += "<ul>" + temp_out + "</ul>"
-        return out
+                        % (CFG_SITE_URL, urllib.urlencode({'collection': coll[0]}), coll[1])
+        return temp_out + "</ul>"
 
     def tmpl_customevent_help(self, ln=CFG_SITE_LANG):
         """
         Display help for custom events.
         """
         return """<h3>General overview</h3>
 
                   <p>A custom event is a measure indicating the frequency of some kind of
                   "action", such as e.g. the number of advanced searches carried out using
                   the Swedish language interface. The custom event functionality is intended
                   to give administrators a mean to log abstract activity, as opposed to
                   trivial measurements like "collection population" and "search frequency".
                   Thus, a custom event is fully customizable and defined by an administrator
                   but it is important to understand that the Webstat module merely supplies
                   the mean to register an action and associate it with a predefined custom event,
                   while the actual use case leading up to the very registration of the action
                   is left to the user.</p>
 
                   <p>After a custom event has been created and the process of collecting data
                   has started, the event is accessible for review through the Webstat webpage.</p>
 
                   <h3>How to create a new custom event</h3>
 
                   <ol>
                     <li>Edit <strong>/opt/invenio/etc/webstat/webstat.cfg</strong> adding
                     the definition of the customevent:
                     <pre>
                     [webstat_custom_event_1]
                     name = baskets
                     param1 = action
                     param2 = basket
                     param3 = user</pre>
                     </li>
                     <li>The title must be <em>webstat_custom_event_(num)</em> where <em>(num)</em>
                     is a number. The number can not be repeated in two different customevents.
                     </li>
                     <li>The option <em>name</em> is the name of the customevent.</li>
                     <li>Each param in the customevent must be given as <em>param(num)</em> where
                     <em>(num)</em> is an unique number.</li>
                   </ol>"""
 
     def tmpl_error(self, msg, ln=CFG_SITE_LANG):
         """
         Provides a common way of outputting error messages.
         """
         return """<div class="important">%s</div>""" % msg
 
-    def tmpl_keyevent_box(self, options, order, choosed, ln=CFG_SITE_LANG, excel=False):
+    def tmpl_keyevent_box(self, options, order, choosed, ln=CFG_SITE_LANG, list=False):
         """
         Generates a FORM box with dropdowns for keyevents.
 
         @param options: { parameter name: [(argument internal, argument full)]}
         @type options: { str: [(str, str)]}
 
         @param order: A permutation of the keys in options, for design purpose.
         @type order: [str]
 
         @param options: The selected parameters, and its values.
         @type options: { str: str }
         """
         # Create the FORM's header
         formheader = """<form method="get">
-        <input type="hidden" name="ln"value="%s" />""" % ln
+        <input type="hidden" name="ln" value="%s" />""" % ln
 
         # Create the headers using the options permutation
         headers = [[options[param][1] for param in order]]
         headers[0].append("")
 
         # Create all SELECT boxes
         sels = [[]]
         for param in order:
             if choosed[param] == 'select date':
                 sels[0].append(self._tmpl_select_box(options[param][2], # SELECT box data
                     " - select " + options[param][1], # first item info
                     param, # name
                     [choosed['s_date'], choosed['f_date']], # selected value (perhaps several)
                     True, # multiple box?
                     ln=ln))
             elif options[param][0] == 'combobox':
                 sels[0].append(self._tmpl_select_box(options[param][2], # SELECT box data
                     " - select " + options[param][1], # first item info
                     param, # name
                     choosed[param], # selected value (perhaps several)
                     type(choosed[param]) is list, # multiple box?
                     ln=ln))
             elif options[param][0] == 'textbox':
                 sels[0].append(self._tmpl_text_box(param, # name
                     choosed[param], # selected value
                     ln=ln))
 
         # Create button
         sels[0].append("""<input class="formbutton" type="submit"
         name="action_gen" value="Generate"/>""")
 
-        # Export to excel option
-        if excel and xlwt_imported:
+        # Export option
+        if list:
             sels[0].append("""<input class="formbutton" type="submit"
-            name="format" value="Excel"/>""")
+            name="format" value="Full list"/>""")
         # Create form footer
         formfooter = """</form>"""
 
         return self._tmpl_box(formheader, formfooter, ["keyevent_table"],
                               headers, sels, [""], ln=ln)
 
     def tmpl_customevent_box(self, options, choosed, ln=CFG_SITE_LANG):
         """
         Generates a FORM box with dropdowns for customevents.
 
         @param options: { parameter name: (header,  [(argument internal, argument full)]) or
                                           {param father: [(argument internal, argument full)]}}
                         The dictionary is for options that are dependient of other.
                         It's use for 'cols'
                         With "param father"="__header" the headers
                         With "param father"="__none" indicate the arguments by default
         @type options: { str: (str, [(str, str)])|{str: [(str, str)]}}
 
         @param choosed: The selected parameters, and its values.
         @type choosed: { str: str }
         """
         if choosed['ids'] == []:
             choosed['ids'] = [""]
             choosed['cols'] = [[("", "", "")]]
         num_ids = len(choosed['ids'])
 
         operators = [('and', 'AND'), ('or', 'OR'), ('and_not', 'AND NOT')]
 
         # Crate the ids of the tables
         table_id = ["time_format"]
         table_id.extend(['cols' + str(i) for i in range(num_ids)])
 
         # Create the headers using the options permutation
         headers = [(options['timespan'][0], options['format'][0])]
         headers.extend([(options['ids'][0], "", options['cols']['__header'], "value")
                         for event_id in choosed['ids']])
 
         # Create all SELECT boxes
         sels = [[]]
         for param in ['timespan', 'format']:
             if choosed[param] == 'select date':
                 sels[0].append(self._tmpl_select_box(options[param][1], # SELECT box data
                     " - select " + options[param][0], # first item info
                     param, # name
                     [choosed['s_date'], choosed['f_date']], # selected value (perhaps several)
                     True, # multiple box?
                     ln=ln))
             else:
                 sels[0].append(self._tmpl_select_box(options[param][1], # SELECT box data
                     " - select " + options[param][0], # first item info
                     param, # name
                     choosed[param], # selected value (perhaps several)
                     type(choosed[param]) is list, # multiple box?
                     ln=ln))
         for event_id, i in zip(choosed['ids'], range(num_ids)):
             select_table = []
             select_row = [self._tmpl_select_box(options['ids'][1],
                 " - select " + options['ids'][0],
                 'ids',
                 event_id,
                 attribute='onChange="javascript: \
                     changed_customevent(customevent[\'ids\'],%d);"' % i,
                 ln=ln)]
             is_first_loop = True
             row = 0
             if len(choosed['cols']) <= i:
                 choosed['cols'].append([("", "", "")])
             if choosed['cols'][i] == []:
                 choosed['cols'][i] = [("", "", "")]
             for _, col, value in choosed['cols'][i]:
                 select_row.append("")
                 if not is_first_loop:
                     select_row.append(self._tmpl_select_box(operators, "", "bool%d" % i, bool))
                 if event_id:
                     select_row.append(self._tmpl_select_box(options['cols'][event_id],
                                                     " - select " + options['cols']['__header'],
                                                     'cols' + str(i),
                                                     col,
                                                     ln=ln))
                 else:
                     select_row.append(self._tmpl_select_box(options['cols']['__none'],
                                                     "Choose CustomEvent",
                                                     'cols' + str(i),
                                                     "",
                                                     ln=ln))
                 if is_first_loop:
                     select_row.append("<input name=\"col_value%d\" value=\"%s\">" % (i, value))
                 else:
                     select_row.append("""<input name="col_value%d" value="%s">
                             <a href="javascript:;" onclick="delrow(%d,%d);">Remove row</a>""" \
                             % (i, value, i, row))
                 select_table.append(select_row)
                 select_row = []
                 if is_first_loop:
                     is_first_loop = False
                 row += 1
             sels.append(select_table)
 
         # javascript for add col selectors
         sels_col = []
         sels_col.append(self._tmpl_select_box(options['ids'][1], " - select "
             + options['ids'][0], 'ids', "",
             False,
             attribute='onChange="javascript: \
                 changed_customevent(customevent[\\\'ids\\\'],\' + col + \');"',
             ln=ln))
         sels_col.append("")
         sels_col.append(self._tmpl_select_box(options['cols']['__none'], "Choose CustomEvent",
                                             'cols\' + col + \'', "", False, ln=ln))
         sels_col.append("""<input name="col_value' + col + '">""")
         col_table = self._tmpl_box("", "", ["cols' + col + '"], headers[1:], [sels_col],
                     ["""<a id="add' + col + '" href="javascript:;"
                     onclick="addcol(\\'cols' + col + '\\', ' + col + ');">Add more arguments</a>
                     <a id="del' + col + '" href="javascript:;" onclick="delblock(' + col + ');">
                     Remove block</a>"""], ln=ln)
         col_table = col_table.replace('\n', '')
         formheader = """<script type="text/javascript">
                 var col = %d;
                 var col_select = new Array(%s,0);
                 var block_pos_max = %d;
                 var block_pos = new Array(%s,0);
                 var rows_pos_max = [%s];
                 var rows_pos = [%s];
 
                 function addcol(id, num){
                     col_select[num]++;
                     var table = document.getElementById(id);
                     var body = table.getElementsByTagName('tbody')[0];
                     var row = document.createElement('tr');
                     var cel0 = document.createElement('td');
                     row.appendChild(cel0);
                     var cel1 = document.createElement('td');
                     cel1.innerHTML = '<select name="bool' + num + '"> <option value="and">AND</option> <option value="or">OR</option> <option value="and_not">AND NOT</option> </select>';
                     row.appendChild(cel1);
                     var cel2 = document.createElement('td');
                     cel2.innerHTML = '%s';
                     row.appendChild(cel2);
                     var cel3 = document.createElement('td');
                     cel3.innerHTML = '%s';
                     row.appendChild(cel3);
                     body.appendChild(row);
 
                     // Change arguments
                     arguments = document['customevent']['cols' + num]
                     if (col_select[1] == 0) {
                         value = document['customevent']['ids'].value;
                     } else {
                         value = document['customevent']['ids'][block_pos[num]].value;
                     }
                     _change_select_options(arguments[arguments.length -1], get_argument_list(value), '');
                     rows_pos[num][col_select[num]-1] = rows_pos_max[num];
                     rows_pos_max[num]++;
                 } """ % (num_ids,
                         ','.join([str(len(choosed['cols'][i])) for i in range(num_ids)]),
                         num_ids,
                         ','.join([str(i) for i in range(num_ids)]),
                         ','.join([str(len(block)) for block in choosed['cols']]),
                         ','.join([str(range(len(block))) for block in choosed['cols']]),
                         sels_col[2].replace("' + col + '", "' + num + '"),
                         sels_col[3].replace("' + col + '", "' + num + '") + \
                                 """ <a href="javascript:;" onclick="delrow(' + num + ',' + (col_select[num]-1) + ');">Remove row</a>""")
         formheader += """
                 function addblock() {
                     col_select[col] = 1;
                     var ni = document.getElementById('block');
                     var newdiv = document.createElement('div'+col);
                     newdiv.innerHTML = '%s';
                     ni.appendChild(newdiv);
                     block_pos[col] = block_pos_max;
                     block_pos_max++;
                     rows_pos[col] = [0];
                     rows_pos_max[col] = 1;
                     col++;
                 }""" % col_table
         formheader += """
                 function delblock(id) {
                     var block = document.getElementById("cols" + id);
                     var add = document.getElementById("add" + id);
                     var del = document.getElementById("del" + id);
                     block.parentNode.removeChild(block);
                     add.parentNode.removeChild(add);
                     del.parentNode.removeChild(del);
                     for (var i = id+1; i < col_select.length; i++) {
                         block_pos[i]--;
                     }
                     block_pos_max--;
                 }
 
                 function delrow(table_id,row_num) {
                     var table = document.getElementById('cols' + table_id);
                     table.tBodies[0].deleteRow(rows_pos[table_id][row_num]);
                     col_select[table_id]--;
                     for (var i = row_num+1; i < rows_pos[table_id].length; i++) {
                         rows_pos[table_id][i]--;
                     }
                     rows_pos_max[table_id]--;
                 } """
         formheader += """
                 function change_select_options(selectList, isList, optionArray, chooseDefault) {
                     if (isList) {
                         for (var select = 0; select < selectList.length; select++) {
                             _change_select_options(selectList[select], optionArray, chooseDefault);
                         }
                     } else {
                         _change_select_options(selectList, optionArray, chooseDefault);
                     }
                 }
 
                 function _change_select_options(select, optionArray, chooseDefault) {
                     select.options.length = 0;
                     for (var option = 0; option*2 < optionArray.length - 1; option++) {
                         if (chooseDefault == optionArray[option*2+1]) {
                             select.options[option] = new Option(optionArray[option*2], optionArray[option*2+1], true, true);
                         } else {
                             select.options[option] = new Option(optionArray[option*2], optionArray[option*2+1]);
                         }
                     }
                 }
 
                 function changed_customevent(select, num){
                     if (select.length) {
                         value = select[block_pos[num]].value;
                     } else {
                         value = select.value;
                     }
                     list = get_argument_list(value);
                     select_list = (col_select[num] > 1);
                     change_select_options(document['customevent']['cols' + num], select_list, list, '');
                 }
 
                 function get_argument_list(value) {
                     if (value == "") {
                         return ['Choose CustomEvent',''];"""
         for event_id, cols in options['cols'].items():
             if event_id not in ['__header', '__none']:
                 str_cols = "[' - select %s', ''," % options['cols']['__header']
                 for internal, full in cols:
                     str_cols += "'%s','%s'," % (full, internal)
                 str_cols = str_cols[:-1] + ']'
                 formheader += """
                     } else if (value == "%s") {
                         return %s;""" % (event_id, str_cols)
         formheader += """
                     }
                 }
             </script>"""
 
         # Create the FORM's header
         formheader += """<form method="get" name="customevent">
         <input type="hidden" name="ln"value="%s" />""" % ln
 
         # Create all footers
         footers = []
         footers.append("")
         footers.append("""<a href="javascript:;" onclick="addcol('cols0', 0);">
                             Add more arguments</a>""")
         for i in range(1, num_ids):
             footers.append("""
                     <a id="add%(i)d" href="javascript:;" onclick="addcol('cols%(i)d', %(i)d);">Add more arguments</a>
                     <a id="del%(i)d" href="javascript:;" onclick="delblock(%(i)d);">Remove block</a>
                     """ % {'i': i})
         footers[-1] += """<div  id="block"> </div>"""
 
         # Create formfooter
         formfooter = """<p><a href="javascript:;" onclick="addblock();">Add more events</a>
                     <input class="formbutton" type="submit" name="action_gen" value="Generate"></p>
                     </form>"""
 
         return self._tmpl_box(formheader, formfooter, table_id, headers, sels, footers, ln=ln)
 
     def tmpl_display_event_trend_ascii(self, title, filename, ln=CFG_SITE_LANG):
         """Displays a ASCII graph representing a trend"""
         try:
             return self.tmpl_display_trend(title, "<div><pre>%s</pre></div>" %
                                            open(filename, 'r').read(), ln=ln)
         except IOError:
             return "No data found"
 
     def tmpl_display_event_trend_image(self, title, filename, ln=CFG_SITE_LANG):
         """Displays an image graph representing a trend"""
         if os.path.isfile(filename):
             return self.tmpl_display_trend(title, """<div><img src="%s" /></div>""" %
                                     filename.replace(CFG_WEBDIR, CFG_SITE_URL), ln=ln)
         else:
             return "No data found"
 
     def tmpl_display_event_trend_text(self, title, filename, ln=CFG_SITE_LANG):
         """Displays a text representing a trend"""
         try:
             return self.tmpl_display_trend(title, "<div>%s</div>" %
                                            open(filename, 'r').read(), ln=ln)
         except IOError:
             return "No data found"
 
     def tmpl_display_custom_summary(self, tag_name, data, title, query, tag,
                                     path, ln=CFG_SITE_LANG):
         """Display the custom summary (annual report)"""
         # Create the FORM's header
         formheader = """<form method="get">
         <input type="hidden" name="ln"value="%s" />""" % ln
 
         # Create the headers
         headers = [("Chart title", "Query", "Output tag", "")]
 
         # Create the body (text boxes and button)
         fields = (("""<input type="text" name="title" value="%s" size="20"/>""" % cgi.escape(title),
                    """<input type="text" name="query" value="%s" size="35"/>""" % cgi.escape(query),
                    """<input type="text" name="tag" value="%s" size="10"/>""" % cgi.escape(tag),
                    """<input class="formbutton" type="submit" name="action_gen" value="Generate"/>"""), )
 
         # Create form footer
         formfooter = """</form>"""
 
 
         out = self._tmpl_box(formheader, formfooter, [("custom_summary_table", )],
                              headers, fields, [""], ln=ln)
 
         out += """<div>
 <table border>
 
 <tr>
 
 <td colspan=2>
 <b><center>
 Distribution across %s
 </center>
 </td>
 </tr>
 
 <tr>
 <td align="right"><b>Nb.</b></td>
 <td><b>%s</b></td>
 </tr>
 
 """ % (cgi.escape(tag_name), cgi.escape(tag_name[0].capitalize() + tag_name[1:]))
+        if len(query) > 0:
+            query += " and "
         for title, number in data:
-            out += """<tr>
+            if title in ('Others', 'TOTAL'):
+                out += """<tr>
 <td align="right">%d</td>
 <td>%s</td>
 </tr>
 """ % (number, cgi.escape(title))
+            else:
+                out += """<tr>
+<td align="right"><a href="%s/search?p=%s&ln=%s">%d</a></td>
+<td>%s</td>
+</tr>
+""" % (CFG_SITE_URL, cgi.escape(urllib.quote(query + " " + tag + ':"' + title + '"')), ln, number, cgi.escape(title))
         out += """</table></div>
 <div><img src="%s" /></div>""" % cgi.escape(path.replace(CFG_WEBDIR, CFG_SITE_URL))
         return out
 
     # INTERNALS
     def tmpl_display_trend(self, title, html, ln=CFG_SITE_LANG):
         """
         Generates a generic display box for showing graphs (ASCII and IMGs)
         alongside to some metainformational boxes.
         """
         return """<table class="narrowsearchbox">
                    <thead><tr><th colspan="2" class="narrowsearchboxheader" align="left">%s</th></tr></thead>
                    <tbody><tr><td class="narrowsearchboxbody" valign="top">%s</td></tr></tbody>
                   </table> """ % (title, html)
 
     def _tmpl_box(self, formheader, formfooter, table_id, headers, selectboxes,
                   footers, ln=CFG_SITE_LANG):
         """
         Aggregates together the parameters in order to generate the
         corresponding box for customevent.
 
         @param formheader: Start tag for the FORM element.
         @type formheader: str
 
         @param formfooter: End tag for the FORM element.
         @type formfooter: str
 
         @param table_id: id for each table
         @type table_id: list<str>
 
         @param headers: Headers for the SELECT boxes
         @type headers: list<list<str>>
 
         @param selectboxes: The actual HTML drop-down boxes, with appropriate content.
         @type selectboxes: list<list<str>>|list<list<list<str>>>
 
         @param footers: footer for each table
         @type footers: list<str>
 
         @return: HTML describing a particular FORM box.
         @type: str
         """
         out = formheader
         for table in range(len(table_id)):
             out += """<table id="%s" class="searchbox">
                     <thead>
                         <tr>""" % table_id[table]
 
             #Append the headers
             for header in headers[table]:
                 out += """<th class="searchboxheader">%s</th>""" % header
 
             out += """</tr>
                 </thead>
                 <tbody>"""
 
             # Append the SELECT boxes
             is_first_loop = True
             out += """<tr valign="bottom">"""
             for selectbox in selectboxes[table]:
                 if type(selectbox) is list:
                     if is_first_loop:
                         is_first_loop = False
                     else:
                         out += """</tr>
                                 <tr valign="bottom">"""
                     for select in selectbox:
                         out += """<td class="searchboxbody" valign="top">%s</td>""" % select
                 else:
                     out += """<td class="searchboxbody" valign="top">%s</td>""" % selectbox
             out += """
                 </tr>"""
             out += """
                 </tbody>
             </table>"""
 
             # Append footer
             out += footers[table]
 
         out += formfooter
 
         return out
 
     def _tmpl_select_box(self, iterable, explaination, name, preselected,
                          multiple=False, attribute="", ln=CFG_SITE_LANG):
         """
         Generates a HTML SELECT drop-down menu.
 
         @param iterable: A list of values and tag content to be used in the SELECT list
         @type iterable: [(str, str)]
 
         @param explaination: An explainatory string put as the tag content for the first OPTION.
         @type explaination: str
 
         @param name: The name of the SELECT tag. Important for FORM-parsing.
         @type name: str
 
         @param preselected: The value, or list of values, of the OPTION that should be
                             preselected. Blank or empty list for none.
         @type preselected: str | []
 
         @param attribute: Optionally add attributes to the select tag
         @type attribute: str
 
         @param multiple: Optionally sets the SELECT box to accept multiple entries.
         @type multiple: bool
         """
 
 
         if attribute:
             sel = """<select name="%s" %s>""" % (name, attribute)
         else:
             if name == "timespan":
                 sel = """<script type="text/javascript">
                     function changeTimeSpanDates(val){
                         if(val == "select date"){
                             document.getElementById("selectDateTxt").style.display='block';}
                         else{
                             document.getElementById("selectDateTxt").style.display='none';}
                     }
 
                 </script>
                 <select name="timespan" id="timespan"
                     onchange="javascript: changeTimeSpanDates(this.value);">"""
             else:
                 sel = """<select name="%s">""" % name
 
         if multiple is True and name != "timespan":
             sel = sel.replace("<select ", """<select multiple="multiple" size="5" """)
         elif explaination:
             sel += """<option value="">%s</option>""" % explaination
 
         for realname, printname in [(x[0], x[1]) for x in iterable]:
             if printname is None:
                 printname = realname
             option = """<option value="%s">%s</option>""" % (realname, printname)
             if realname == preselected or (type(preselected) is list
                     and realname in preselected) or (name == "timespan" and
                                 realname == 'select date' and multiple):
                 option = option.replace('">', '" selected="selected">')
             sel += option
         sel += "</select>"
         if name == "timespan":
             if multiple:
                 s_date = preselected[0]
                 f_date = preselected[1]
             else:
                 s_date = datetime.datetime.today().date().strftime("%m/%d/%Y %H:%M")
                 f_date = datetime.datetime.now().strftime("%m/%d/%Y %H:%M")
             sel += """<link rel="stylesheet" href="%(CFG_SITE_URL)s/img/jquery-ui.css"
                         type="text/css" />
                       <script language="javascript" type="text/javascript" src="%(CFG_SITE_URL)s/js/jquery-ui-1.7.3.custom.min.js"></script>
                       <script type="text/javascript" src="%(CFG_SITE_URL)s/js/jquery-ui-timepicker-addon.js"></script>
 
-                      <div id="selectDateTxt" onload style="position:relative;display:none">
+                      <div id="selectDateTxt" style="position:relative;display:none">
                       <table align="center">
                           <tr align="center">
                               <td align="right" class="searchboxheader">From: </td>
-                              <td align="left"><input type=text name="s_date" id="s_date" value="%(s_date)s" size="14"></td>
+                              <td align="left"><input type="text" name="s_date" id="s_date" value="%(s_date)s" size="14" /></td>
                           </tr>
                           <tr align="center">
                               <td align="right" class="searchboxheader">To: </td>
-                              <td align="left"><input type=text name="f_date" id="f_date" value="%(f_date)s" size="14"></td>
+                              <td align="left"><input type="text" name="f_date" id="f_date" value="%(f_date)s" size="14" /></td>
                           </tr>
                       </table>
                       </div>
                       <script type="text/javascript">
                         $('#s_date').datetimepicker();
                         $('#f_date').datetimepicker({
                           hour: 23,
                           minute: 59
                         });
                         if(document.getElementById("timespan").value == "select date"){
                             document.getElementById("selectDateTxt").style.display='block';
                         } </script>""" % {'CFG_SITE_URL': CFG_SITE_URL,
                                           's_date': cgi.escape(s_date),
                                           'f_date': cgi.escape(f_date)}
         return sel
 
     def _tmpl_text_box(self, name, preselected, ln=CFG_SITE_LANG):
         """
         Generates a HTML text-box menu.
 
         @param name: The name of the textbox label.
         @type name: str
 
         @param preselected: The value that should be preselected. Blank or empty
         list for none.
         @type preselected: str | []
         """
         if name == 'min_loans' or name == 'max_loans':
             return """<script type="text/javascript">
  function checkNumber(input){
    var num = input.value.replace(/\,/g,'');
    var newtext = parseInt(num);
    if(isNaN(newtext)){
          alert('You may enter only numbers in this field!');
          input.value = 0;
    }
    else {
          input.value = newtext;
    }
  }
 </script>
 
 
-<input type=text name="%s" onchange="checkNumber(this)" value="%s">""" % (name, preselected)
+<input type="text" name="%s" onchange="checkNumber(this)" value="%s" />""" % (name, preselected)
         else:
-            return """<input type=text name="%s" value="%s">""" % (name, preselected)
+            return """<input type="text" name="%s" value="%s" />""" % (name, preselected)
diff --git a/modules/webstat/lib/webstat_webinterface.py b/modules/webstat/lib/webstat_webinterface.py
index e4b297b62..d635ea078 100644
--- a/modules/webstat/lib/webstat_webinterface.py
+++ b/modules/webstat/lib/webstat_webinterface.py
@@ -1,919 +1,1019 @@
 ## This file is part of Invenio.
 ## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
 ##
 ## Invenio is free software; you can redistribute it and/or
 ## modify it under the terms of the GNU General Public License as
 ## published by the Free Software Foundation; either version 2 of the
 ## License, or (at your option) any later version.
 ##
 ## Invenio is distributed in the hope that it will be useful, but
 ## WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 ## General Public License for more details.
 ##
 ## You should have received a copy of the GNU General Public License
 ## along with Invenio; if not, write to the Free Software Foundation, Inc.,
 ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
 
 __revision__ = "$Id$"
 __lastupdated__ = "$Date$"
 
 import os, sys
 from urllib import unquote
 from invenio import webinterface_handler_config as apache
 
 from invenio.config import \
      CFG_TMPDIR, \
      CFG_SITE_URL, \
-     CFG_SITE_NAME, \
      CFG_SITE_LANG
+from invenio.bibindex_engine import CFG_JOURNAL_TAG
 from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
 from invenio.webpage import page
 from invenio.access_control_engine import acc_authorize_action
 from invenio.access_control_config import VIEWRESTRCOLL
 from invenio.search_engine import collection_restricted_p
 from invenio.webuser import collect_user_info, page_not_authorized
 from invenio.urlutils import redirect_to_url
 
-from invenio.webstat import perform_request_index
-from invenio.webstat import perform_display_keyevent
-from invenio.webstat import perform_display_customevent
-from invenio.webstat import perform_display_customevent_help
-from invenio.webstat import perform_display_error_log_analyzer, \
+from invenio.webstat import perform_request_index, \
+    perform_display_keyevent, \
+    perform_display_customevent, \
+    perform_display_customevent_help, \
+    perform_display_error_log_analyzer, \
     register_customevent, \
     perform_display_custom_summary, \
-    perform_display_stats_per_coll
+    perform_display_stats_per_coll, \
+    perform_display_current_system_health, \
+    perform_display_coll_list
 
 
 def detect_suitable_graph_format():
     """
     Return suitable graph format default argument. It is always flot (when there wasn't plot, gnuplot if it is
     present, otherwise asciiart).
     """
     return "flot"
 #    try:
 #        import Gnuplot
 #        suitable_graph_format = "gnuplot"
 #    except ImportError:
 #        suitable_graph_format = "asciiart"
 #    return suitable_graph_format
 
 SUITABLE_GRAPH_FORMAT = detect_suitable_graph_format()
 
 
 class WebInterfaceStatsPages(WebInterfaceDirectory):
     """Defines the set of stats pages."""
 
-    _exports = ['',
-                 'collection_population', 'search_frequency', 'search_type_distribution',
+    _exports = ['', 'system_health',
+                 'collection_population', 'new_records', 'search_frequency', 'search_type_distribution',
                  'download_frequency', 'comments_frequency', 'number_of_loans', 'web_submissions',
                  'loans_stats', 'loans_lists', 'renewals_lists', 'returns_table', 'returns_graph',
                  'ill_requests_stats', 'ill_requests_lists', 'ill_requests_graph', 'items_stats',
                  'items_list', 'loans_requests', 'loans_request_lists', 'user_stats',
                  'user_lists', 'error_log', 'customevent', 'customevent_help',
-                 'customevent_register', 'custom_summary', 'collections', 'export']
+                 'customevent_register', 'custom_summary', 'collections' , 'collection_stats',
+                 'export']
 
     navtrail = """<a class="navtrail" href="%s/stats/%%(ln_link)s">Statistics</a>""" % CFG_SITE_URL
 
     def __call__(self, req, form):
         """Index page."""
         argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='index',
                 ln=ln)
 
         return page(title="Statistics",
                     body=perform_request_index(ln=ln),
                     description="CDS, Statistics",
                     keywords="CDS, statistics",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='stats',
                     language=ln)
 
+    # CURRENT SYSTEM HEALTH
+    def system_health(self, req, form):
+        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
+        ln = argd['ln']
+        user_info = collect_user_info(req)
+        (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
+        if auth_code:
+            return page_not_authorized(req,
+                navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
+                text=auth_msg,
+                navmenuid='current system health',
+                ln=ln)
+
+        return page(title="Current system health",
+                    body=perform_display_current_system_health(ln=ln),
+                    navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
+                    (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
+                    description="CDS, Statistics, Current system health",
+                    keywords="CDS, statistics, current system health",
+                    req=req,
+                    lastupdated=__lastupdated__,
+                    navmenuid='current system health',
+                    language=ln)
+
     # KEY EVENT SECTION
     def collection_population(self, req, form):
         """Collection population statistics page."""
-        argd = wash_urlargd(form, {'collection': (str, CFG_SITE_NAME),
+        argd = wash_urlargd(form, {'collection': (str, "All"),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='collection population',
                 ln=ln)
 
         return page(title="Collection population",
                     body=perform_display_keyevent('collection population', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Collection population",
                     keywords="CDS, statistics, collection population",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='collection population',
                     language=ln)
 
+    def new_records(self, req, form):
+        """Collection population statistics page."""
+        argd = wash_urlargd(form, {'collection': (str, "All"),
+                                   'timespan': (str, "today"),
+                                   's_date': (str, ""),
+                                   'f_date': (str, ""),
+                                   'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
+                                   'ln': (str, CFG_SITE_LANG)})
+        ln = argd['ln']
+        user_info = collect_user_info(req)
+        (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
+        if auth_code:
+            return page_not_authorized(req,
+                navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
+                text=auth_msg,
+                navmenuid='new records',
+                ln=ln)
+
+        return page(title="New records",
+                    body=perform_display_keyevent('new records', argd, req, ln=ln),
+                    navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
+                    (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
+                    description="CDS, Statistics, New records",
+                    keywords="CDS, statistics, new records",
+                    req=req,
+                    lastupdated=__lastupdated__,
+                    navmenuid='new records',
+                    language=ln)
+
+
     def search_frequency(self, req, form):
         """Search frequency statistics page."""
         argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='search frequency',
                 ln=ln)
 
         return page(title="Search frequency",
                     body=perform_display_keyevent('search frequency', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Search frequency",
                     keywords="CDS, statistics, search frequency",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='search frequency',
                     language=ln)
 
     def comments_frequency(self, req, form):
         """Comments frequency statistics page."""
         argd = wash_urlargd(form, {'collection': (str, "All"),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='comments frequency',
                 ln=ln)
 
         return page(title="Comments frequency",
                     body=perform_display_keyevent('comments frequency', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Comments frequency",
                     keywords="CDS, statistics, Comments frequency",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='comments frequency',
                     language=ln)
 
     def search_type_distribution(self, req, form):
         """Search type distribution statistics page."""
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='search type distribution',
                 ln=ln)
 
         return page(title="Search type distribution",
                     body=perform_display_keyevent('search type distribution', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Search type distribution",
                     keywords="CDS, statistics, search type distribution",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='search type distribution',
                     language=ln)
 
     def download_frequency(self, req, form):
         """Download frequency statistics page."""
         argd = wash_urlargd(form, {'collection': (str, "All"),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='download frequency',
                 ln=ln)
 
         return page(title="Download frequency",
                     body=perform_display_keyevent('download frequency', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Download frequency",
                     keywords="CDS, statistics, download frequency",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='download frequency',
                     language=ln)
 
     def number_of_loans(self, req, form):
         """Number of loans statistics page."""
         argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='number of loans',
                 ln=ln)
 
         return page(title="Number of loans",
                     body=perform_display_keyevent('number of loans', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Number of loans",
                     keywords="CDS, statistics, Number of loans",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='number of loans',
                     language=ln)
 
     def web_submissions(self, req, form):
         """Web submissions statistics page."""
         argd = wash_urlargd(form, {'doctype': (str, "all"),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='web submissions',
                 ln=ln)
 
         return page(title="Web submissions",
                     body=perform_display_keyevent('web submissions', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Web submissions",
                     keywords="CDS, statistics, websubmissions",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='web submissions',
                     language=ln)
 
     def loans_stats(self, req, form):
         """Number of loans statistics page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'udc': (str, ""),
+        argd = wash_urlargd(form, {'udc': (str, ""),
                                    'item_status': (str, ""),
                                    'publication_date': (str, ""),
                                    'creation_date': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='loans statistics',
                 ln=ln)
 
         return page(title="Loans statistics",
                     body=perform_display_keyevent('loans statistics', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Loans statistics",
                     keywords="CDS, statistics, Loans statistics",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='loans statistics',
                     language=ln)
 
     def loans_lists(self, req, form):
         """Number of loans lists page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'udc': (str, ""),
+        argd = wash_urlargd(form, {'udc': (str, ""),
                                    'loan_period': (str, ""),
                                    'min_loans': (int, 0),
                                    'max_loans': (int, sys.maxint),
                                    'publication_date': (str, ""),
                                    'creation_date': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         argd['min_loans'] = str(argd['min_loans'])
         argd['max_loans'] = str(argd['max_loans'])
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='loans lists',
                 ln=ln)
 
         return page(title="Loans lists",
                     body=perform_display_keyevent('loans lists', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Loans lists",
                     keywords="CDS, statistics, Loans lists",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='loans lists',
                     language=ln)
 
     def renewals_lists(self, req, form):
         """Renewed items lists page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'udc': (str, ""),
+        argd = wash_urlargd(form, {'udc': (str, ""),
                                    'collection': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='renewals',
                 ln=ln)
 
         return page(title="Renewals lists",
                     body=perform_display_keyevent('renewals', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Renewals lists",
                     keywords="CDS, statistics, Renewals lists",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='renewals lists',
                     language=ln)
 
     def returns_table(self, req, form):
         """Number of returns table page."""
         argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='returns table',
                 ln=ln)
 
         return page(title="Returns table",
                     body=perform_display_keyevent('number returns', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Returns table",
                     keywords="CDS, statistics, Returns table",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='returns table',
                     language=ln)
 
     def returns_graph(self, req, form):
         """Percentage of returns graph page."""
         argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='returns graph',
                 ln=ln)
 
         return page(title="Returns graph",
                     body=perform_display_keyevent('percentage returns', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Returns graph",
                     keywords="CDS, statistics, Returns graph",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='returns graph',
                     language=ln)
 
     def ill_requests_stats(self, req, form):
         """ILL Requests statistics page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'doctype': (str, ""),
+        argd = wash_urlargd(form, {'doctype': (str, ""),
                                    'status': (str, ""),
                                    'supplier': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='ill requests statistics',
                 ln=ln)
 
         return page(title="ILL Requests statistics",
                     body=perform_display_keyevent('ill requests statistics', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, ILL Requests statistics",
                     keywords="CDS, statistics, ILL Requests statistics",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='ill requests statistics',
                     language=ln)
 
     def ill_requests_lists(self, req, form):
         """Number of loans lists page."""
         argd = wash_urlargd(form, {'doctype': (str, ""),
                                    'supplier': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='ill requests list',
                 ln=ln)
 
         return page(title="ILL Requests list",
                     body=perform_display_keyevent('ill requests list', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, ILL Requests list",
                     keywords="CDS, statistics, ILL Requests list",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='ill requests list',
                     language=ln)
 
     def ill_requests_graph(self, req, form):
         """Percentage of satisfied ILL requests graph page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'doctype': (str, ""),
+        argd = wash_urlargd(form, {'doctype': (str, ""),
                                    'status': (str, ""),
                                    'supplier': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='percentage satisfied ill requests',
                 ln=ln)
 
         return page(title="Percentage of satisfied ILL requests",
                     body=perform_display_keyevent('percentage satisfied ill requests',
                                                   argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Percentage of satisfied ILL requests",
                     keywords="CDS, statistics, Percentage of satisfied ILL requests",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='percentage satisfied ill requests',
                     language=ln)
 
     def items_stats(self, req, form):
         """ILL Requests statistics page."""
         argd = wash_urlargd(form, {'udc': (str, ""),
                                    'collection': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='items stats',
                 ln=ln)
 
         return page(title="Items statistics",
                     body=perform_display_keyevent('items stats', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Items statistics",
                     keywords="CDS, statistics, Items statistics",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='items stats',
                     language=ln)
 
     def items_list(self, req, form):
         """Number of loans lists page."""
         argd = wash_urlargd(form, {'library': (str, ""),
                                    'status': (str, ""),
                                    'format': (str, ""),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='items list',
                 ln=ln)
 
         return page(title="Items list",
                     body=perform_display_keyevent('items list', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Items list",
                     keywords="CDS, statistics, Items list",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='items list',
                     language=ln)
 
     def loans_requests(self, req, form):
         """Number of loans statistics page."""
         argd = wash_urlargd(form, {'item_status': (str, ""),
                                    'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='loan request statistics',
                 ln=ln)
 
         return page(title="Hold requests statistics",
                     body=perform_display_keyevent('loan request statistics', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Hold requests statistics",
                     keywords="CDS, statistics, Hold requests statistics",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='loan request statistics',
                     language=ln)
 
+    def loans_request_lists(self, req, form):
+        """Number of loans request lists page."""
+        argd = wash_urlargd(form, {'udc': (str, ""),
+                                   'timespan': (str, "today"),
+                                   's_date': (str, ""),
+                                   'f_date': (str, ""),
+                                   'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
+                                   'ln': (str, CFG_SITE_LANG)})
+        ln = argd['ln']
+        user_info = collect_user_info(req)
+        (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
+        if auth_code:
+            return page_not_authorized(req,
+                navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
+                text=auth_msg,
+                navmenuid='hold request lists',
+                ln=ln)
+
+        return page(title="Loans request lists",
+                    body=perform_display_keyevent('loan request lists', argd, req, ln=ln),
+                    navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
+                    (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
+                    description="CDS, Statistics, Hold request lists",
+                    keywords="CDS, statistics, Hold request lists",
+                    req=req,
+                    lastupdated=__lastupdated__,
+                    navmenuid='hold request lists',
+                    language=ln)
+
     def user_stats(self, req, form):
         """Number of loans statistics page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'timespan': (str, "today"),
+        argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
-                navmenuid='user statistics',
+                navmenuid='library statistics',
                 ln=ln)
 
         return page(title="Users statistics",
                     body=perform_display_keyevent('user statistics', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Users statistics",
                     keywords="CDS, statistics, Users statistics",
                     req=req,
                     lastupdated=__lastupdated__,
-                    navmenuid='user statistics',
+                    navmenuid='library statistics',
                     language=ln)
 
     def user_lists(self, req, form):
         """Number of loans lists page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'timespan': (str, "today"),
+        argd = wash_urlargd(form, {'timespan': (str, "today"),
                                    's_date': (str, ""),
                                    'f_date': (str, ""),
                                    'format': (str, SUITABLE_GRAPH_FORMAT),
+                                   'sql': (int, 0),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
-                navmenuid='user lists',
+                navmenuid='library lists',
                 ln=ln)
 
         return page(title="Users lists",
                     body=perform_display_keyevent('user lists', argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Users lists",
                     keywords="CDS, statistics, Users lists",
                     req=req,
                     lastupdated=__lastupdated__,
-                    navmenuid='user lists',
+                    navmenuid='library lists',
                     language=ln)
 
     # CUSTOM EVENT SECTION
     def customevent(self, req, form):
         """Custom event statistics page"""
         arg_format = {'ids': (list, []),
                      'timespan': (str, "today"),
                      's_date': (str, ""),
                      'f_date': (str, ""),
                      'format': (str, SUITABLE_GRAPH_FORMAT),
                      'ln': (str, CFG_SITE_LANG)}
         for key in form.keys():
             if key[:4] == 'cols':
                 i = key[4:]
                 arg_format['cols' + i] = (list, [])
                 arg_format['col_value' + i] = (list, [])
                 arg_format['bool' + i] = (list, [])
         argd = wash_urlargd(form, arg_format)
 
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='custom event',
                 ln=ln)
 
         body = perform_display_customevent(argd['ids'], argd, req=req, ln=ln)
         return page(title="Custom event",
                     body=body,
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS Personalize, Statistics, Custom event",
                     keywords="CDS, statistics, custom event",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='custom event',
                     language=ln)
 
-    def loans_request_lists(self, req, form):
-        """Number of loans request lists page."""
-        argd = wash_urlargd(form, {'user_address': (str, ""),
-                                   'udc': (str, ""),
-                                   'timespan': (str, "today"),
-                                   's_date': (str, ""),
-                                   'f_date': (str, ""),
-                                   'format': (str, SUITABLE_GRAPH_FORMAT),
-                                   'ln': (str, CFG_SITE_LANG)})
-        ln = argd['ln']
-        user_info = collect_user_info(req)
-        (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
-        if auth_code:
-            return page_not_authorized(req,
-                navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
-                text=auth_msg,
-                navmenuid='loans request lists',
-                ln=ln)
-
-        return page(title="Loans request lists",
-                    body=perform_display_keyevent('loans request lists', argd, req, ln=ln),
-                    navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
-                    (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
-                    description="CDS, Statistics, Loans  request lists",
-                    keywords="CDS, statistics, Loans request lists",
-                    req=req,
-                    lastupdated=__lastupdated__,
-                    navmenuid='loans request lists',
-                    language=ln)
-
     def error_log(self, req, form):
         """Number of loans request lists page."""
         argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='error log analyzer',
                 ln=ln)
 
         return page(title="Error log analyzer",
                     body=perform_display_error_log_analyzer(ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Error log analyzer",
                     keywords="CDS, statistics, Error log analyzer",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='error log analyzer',
                     language=ln)
 
     def customevent_help(self, req, form):
         """Custom event help page"""
         argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='custom event help',
                 ln=ln)
 
         return page(title="Custom event help",
                     body=perform_display_customevent_help(ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS Personalize, Statistics, Custom event help",
                     keywords="CDS, statistics, custom event help",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='custom event help',
                     language=ln)
 
     def customevent_register(self, req, form):
         """Register a customevent and reload to it defined url"""
         argd = wash_urlargd(form, {'event_id': (str, ""),
                                    'arg': (str, ""),
                                    'url': (str, ""),
                                    'ln': (str, CFG_SITE_LANG)})
         params = argd['arg'].split(',')
         if "WEBSTAT_IP" in params:
             index = params.index("WEBSTAT_IP")
             params[index] = str(req.remote_ip)
         register_customevent(argd['event_id'], params)
         return redirect_to_url(req, unquote(argd['url']), apache.HTTP_MOVED_PERMANENTLY)
 
     # CUSTOM REPORT SECTION
     def custom_summary(self, req, form):
         """Custom report page"""
         argd = wash_urlargd(form, {'query': (str, ""),
-                                   'tag': (str, "909C4p"),
+                                   'tag': (str, CFG_JOURNAL_TAG.replace("%", "p")),
                                    'title': (str, "Publications"),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='custom query summary',
                 ln=ln)
 
         return page(title="Custom query summary",
                     body=perform_display_custom_summary(argd, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
                     description="CDS, Statistics, Custom Query Summary",
                     keywords="CDS, statistics, custom query summary",
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='custom query summary',
                     language=ln)
 
     # COLLECTIONS SECTION
+    def collection_stats(self, req, form):
+        """Collection statistics list page"""
+        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
+        ln = argd['ln']
+        user_info = collect_user_info(req)
+        (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
+        if auth_code:
+            return page_not_authorized(req,
+                navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
+                navmenuid='collections list',
+                text=auth_msg,
+                ln=ln)
+
+        return page(title="Collection statistics",
+                    body=perform_display_coll_list(req, ln=ln),
+                    navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
+                    (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
+                    description="CDS, Statistics, Collection statistics",
+                    keywords="CDS, statistics",
+                    req=req,
+                    lastupdated=__lastupdated__,
+                    navmenuid='collections list',
+                    language=ln)
+
+
     def collections(self, req, form):
         """Collections statistics page"""
-        argd = wash_urlargd(form, {'coll': (str, "All"),
+        argd = wash_urlargd(form, {'collection': (str, "All"),
+                                   'timespan': (str, "this month"),
+                                   's_date': (str, ""),
+                                   'f_date': (str, ""),
+                                   'format': (str, "flot"),
                                    'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 navmenuid='collections',
                 text=auth_msg,
                 ln=ln)
 
-        if collection_restricted_p(argd['coll']):
-            (auth_code_coll, auth_msg_coll) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=argd['coll'])
+        if collection_restricted_p(argd['collection']):
+            (auth_code_coll, auth_msg_coll) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=argd['collection'])
             if auth_code_coll:
                 return page_not_authorized(req,
                                            navmenuid='collections',
                                            text=auth_msg_coll,
                                            ln=ln)
-        return page(title="Statistics of %s" % argd['coll'],
-                    body=perform_display_stats_per_coll(argd['coll'], req, ln=ln),
+        return page(title="Statistics of %s" % argd['collection'],
+                    body=perform_display_stats_per_coll(argd, req, ln=ln),
                     navtrail="""<a class="navtrail" href="%s/stats/%s">Statistics</a>""" % \
                     (CFG_SITE_URL, (ln != CFG_SITE_LANG and '?ln=' + ln) or ''),
-                    description="CDS, Statistics, Collection %s" % argd['coll'],
-                    keywords="CDS, statistics, %s" % argd['coll'],
+                    description="CDS, Statistics, Collection %s" % argd['collection'],
+                    keywords="CDS, statistics, %s" % argd['collection'],
                     req=req,
                     lastupdated=__lastupdated__,
                     navmenuid='collections',
                     language=ln)
 
     # EXPORT SECTION
     def export(self, req, form):
         """Exports data"""
         argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
         ln = argd['ln']
         user_info = collect_user_info(req)
         (auth_code, auth_msg) = acc_authorize_action(user_info, 'runwebstatadmin')
         if auth_code:
             return page_not_authorized(req,
                 navtrail=self.navtrail % {'ln_link': (ln != CFG_SITE_LANG and '?ln=' + ln) or ''},
                 text=auth_msg,
                 navmenuid='export',
                 ln=ln)
 
         argd = wash_urlargd(form, {"filename": (str, ""),
                                    "mime": (str, "")})
 
         # Check that the particular file exists and that it's OK to export
         webstat_files = [x for x in os.listdir(CFG_TMPDIR) if x.startswith("webstat")]
         if argd["filename"] not in webstat_files:
             return "Bad file."
 
         # Set correct header type
         req.content_type = argd["mime"]
         req.send_http_header()
 
         # Rebuild path, send it to the user, and clean up.
         filename = CFG_TMPDIR + '/' + argd["filename"]
         req.sendfile(filename)
         os.remove(filename)
 
     index = __call__