diff --git a/config/config.wml b/config/config.wml index 1170474fb..63961f7fd 100644 --- a/config/config.wml +++ b/config/config.wml @@ -1,861 +1,875 @@ ## -*- mode: html; coding: utf-8; -*- ## $Id$ ## This file enables you to configure the parameters of your local CDS ## installation. It should be self-explanatory. Just go ahead and ## change the values within "define-tag" elements according to your ## needs. When done, return to the main CDS Invenio source directory and ## type 'make'. ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ## ##################### ## About "config.wml" ## ## ##################### ## This file ('config.wml') consists of several relatively independent ## configuration parts: ## ## Part 1: Essential parameters ## Part 2: CDS page elements ## Part 3: CDS navigation bar ## Part 5: WebSearch parameters ## Part 4: BibHarvest OAI parameters ## Part 6: WebSubmit parameters ## Part 7: Fulltext Archive parameters ## Part 8: BibFormat parameters ## Part 9: BibIndex parameters ## Part 10: Access Control parameters ## ## The configuration is done by editing the content of the "define-tag" ## WML elements below. Feel free to edit as many as you need. When done, ## return to the main CDS Invenio source directory and type 'make'. Good luck! :-) ## Before starting, let's include helper functions: #include "configbis.wml" ################################### ## Part 1: Essential parameters ## ################################### ## This part defines essential CDS Invenio internal parameters that ## everybody should modify, like the name of the server or the email ## address of the local CDS Invenio administrator. ## CDSNAME -- the visible name of your CDS Invenio installation: ## (example: "My Document Server") Atlantis Institute of Fictive Science ## CDSNAMEINTL -- the international versions of CDSNAME in various ## languages, defined using the standard locale-like language codes. ## (example: "Mon Serveur des Documents") Atlantis Institute of Fictive Science Atlantis Institut des Sciences Fictives Atlantis Institut der fiktiven Wissenschaft Atlantis Instituto de la Ciencia Fictive Institut Atlantis de Ciència Fictícia Instituto Atlantis de Ciência Fictícia Atlantis Istituto di Scienza Fittizia Атлантис Институт фиктивных Наук Atlantis Inštitút Fiktívnych Vied Atlantis Institut Fiktivních Věd Atlantis Institutt for Fiktiv Vitenskap Atlantis Institut för Fiktiv Vetenskap Ινστιτούτο Φανταστικών Επιστημών Ατλαντίδος Інститут вигаданих наук в Атлантісі Fictive 科学のAtlantis の協会 Instytut Fikcyjnej Nauki Atlantis Институт за фиктивни науки Атлантис Institut Fiktivnih Znanosti Atlantis 阿特兰提斯虚拟科学学院 阿特蘭提斯虛擬科學學院 ## CDSLANG -- the default language of the interface: ## (example: "en") en ## CDSLANGS -- list of all languages the user interface should be ## available in, separated by commas. The order specified below will ## be respected on the interface pages. A good default would be to ## use the alphabetical order. Currently supported languages include ## Bulgarian, Catalan, Czech, German, Greek, English, Spanish, French, ## Italian, Japanese, Norwegian, Polish, Portuguese, Russian, Slovak, Swedish, ## and Ukrainian, Chinese (China), Chinese (Taiwan), so that the current ## eventual maximum you can currently select is ## "bg,ca,cs,de,el,en,es,fr,hr,it,ja,no,pl,pt,ru,sk,sv,uk,zh_CN,zh_TW". ## (example: "de,en,fr,it") bg,ca,cs,de,el,en,es,fr,hr,it,ja,no,pl,pt,ru,sk,sv,uk,zh_CN,zh_TW ## ALERTENGINEEMAIL -- the email address from which the alert emails ## will appear to be send: ## (example: "cds.alert@cdsware.cern.ch") cds.alert@cdsdev.cern.ch ## SUPPORTEMAIL -- the email address of the support team for this ## installation: ## (example: "cds.support@cern.ch") cds.support@cern.ch ## ADMINEMAIL -- the email address of the 'superuser' for this ## installation. Enter your email address below and login with this ## address when using CDS Invenio administration modules. You will then ## be automatically recognized as superuser of the system. ## (example: "cds.support@cern.ch") cds.support@cern.ch ## CFG_MAX_CACHED_QUERIES -- maximum cached queries number possible ## after reaching this number of cached queries the cache is pruned ## deleting half of the older accessed cached queries. ## (example: "10000") 10000 ## CFG_MISCUTIL_USE_SQLALCHEMY -- whethever to use SQLAlchemy.pool ## in the db engine of CDS Invenio. ## It is ok to enable this flag even if you have not installed ## SQLAlchemy. ## Note that for the moment Invenio will loose some perfomance, ## whenever CFG_MISCUTIL_USE_SQLALCHEMY is enabled. False ## CFG_MISCUTIL_SMTP_HOST -- which server to use as outgoing mail server to ## send outgoing emails generated by the system, for example concerning ## submissions or email notification alerts. ## (example: "localhost") localhost ## CFG_MISCUTIL_SMTP_PORT -- which port to use on the outgoing mail server ## defined in the previous step. ## (example: "25") 25 ## CFG_APACHE_PASSWORD_FILE -- the file where Apache user credentials ## are stored. Must be an absolute pathname. If the value does not ## start by a slash, it is considered to be the filename of a file ## located under prefix/var/tmp directory. This is useful for the ## demo site testing purposes. For the production site, if you plan ## to restrict access to some collections based on the Apache user ## authentication mechanism, you should put here an absolute path to ## your Apache password file. ## (example: "/usr/local/apache/passwd/passwords") demo-site-apache-user-passwords ## CFG_APACHE_GROUP_FILE -- the file where Apache user groups are ## defined. See the documentation of the preceding config variable. ## (example: "/usr/local/apache/passwd/groups") demo-site-apache-user-groups ## CFG_CERN_SITE -- do we want to enable CERN-specific code, like the ## one that proposes links to famous HEP sites such as Spires and KEK? ## Put "1" for "yes" and "0" for "no". (example: "0") 0 ################################ ## Part 2: CDS page elements ## ################################ ## This part defines CDS portal-like page style and its elements. ## Here is a schematic overview of all the WML-configurable parts: ## ## +-----------------------------------------------------------------------------------------+ ## | CDSPAGEHEADER | ## | (cdspageheaderadd) | ## +-------------------------+------------------------------------+--------------------------+ ## | CDSPAGEBOXLEFTTOP | | CDSPAGEBOXRIGHTTOP | ## | (cdspageboxlefttopadd) | | (cdspageboxrighttopadd) | ## | | | | ## | | | | ## | | | | ## | | | | ## | | main page body | | ## | | | | ## | | | | ## | | | | ## | | | | ## | | | | ## | | | | ## |(cdspageboxleftbottomadd)| |(cdspageboxrightbottomadd)| ## | CDSPAGEBOXLEFTBOTTOM | | CDSPAGEBOXRIGHTBOTTOM | ## +-------------------------+------------------------------------+--------------------------+ ## | (cdspagefooteradd) | ## | CDSPAGEFOOTER | ## +-----------------------------------------------------------------------------------------+ ## ## Here, (i) the upper case elements like CDSPAGEHEADER are globally ## defined in this 'config.wml' file, see below. (ii) the lower case ## elements in parentheses like "(cdspageheaderadd)" are optional ## local add-ons that each WML page can define locally and pass to the ## global WML template as parameters. (iii) Note also that the style ## and colours of all these elements is defined in the cascading style ## sheet in the file 'htdocs/img/cds.css' that you can change at your ## will too. ## CFG_TEMPLATE_SKIN -- what template skin do you want to use? ## (example: "default") default ## CDSPAGEHEADER -- eventual global HTML page header: ## (example: "")
           
## CDSPAGEBOXLEFTTOP -- eventual global HTML left top box: ## (example: "") ## CDSPAGEBOXLEFTBOTTOM -- eventual global HTML left bottom box: ## (example: "") ## CDSPAGEBOXRIGHTTOP -- eventual global HTML right top box: ## (example: "") ## CDSPAGEBOXRIGHTBOTTOM -- eventual global HTML right bottom box: ## (example: "") ## CDSPAGEFOOTER -- eventual global HTML page footer: ## (example: "")
 ::  ::  ::  :: 
CDS Invenio v

################################ ## Part 3: CDS navigation bar ## ################################ ## The navigation bar and sub-bars are defined in a separate ## "cdsnavbar.wml" file. You may want to modify it now, if you ## really know what you are doing. :-) ################################## ## Part 4: WebSearch parameters ## ################################## ## This section contains some WML-based configuration parameters for ## WebSearch module. Please note that WebSearch is mostly configured ## on run-time via its WebSearch Admin web interface. The parameters ## below are the ones that you do not probably want to modify very ## often during the runtime. (Note that you may modify them ## afterwards too, though.) ## CFG_SEARCH_CACHE_SIZE -- how many queries we want to cache in ## memory per one Apache httpd process? This cache is used mainly for ## "next/previous page" functionality, but it caches also "popular" ## user queries if more than one user happen to search for the same ## thing. Note that large numbers may lead to great memory ## consumption. We recommend a value not greater than 100. ## (example: "100") 100 ## CFG_FIELDS_CONVERT -- if you migrate from an older system, you may ## want to map field codes of your old system (such as 'ti') to ## CDS Invenio/MySQL ("title"). Use Python dictionary syntax for the ## translation table, see the example below. Usually you don't want ## to do that, and would use empty dict {}. ## (example: "{'wau':'author', 'wti':'title'}") {} ## CFG_SIMPLESEARCH_PATTERN_BOX_WIDTH -- width of the search pattern ## window in the simple search interface, in characters. ## (example: "50") 40 ## CFG_ADVANCEDSEARCH_PATTERN_BOX_WIDTH -- width of the search pattern ## window in the advanced search interface, in characters. ## (example: "50") 30 ## CFG_NB_RECORDS_TO_SORT -- how many records do we still want to ## sort? For higher numbers we print only a warning and won't perform ## any sorting other than default 'latest records first', as sorting ## would be very time consuming then. We recommend a value of not ## more than a couple of thousands. ## (example: "1000") 1000 ## CFG_CALL_BIBFORMAT -- if a record is being displayed but it was not ## preformatted in the "HTML brief" format, do we want to call ## BibFormatting on the fly? Put "1" for "yes" and "0" for "no". ## Note that "1" will display the record exactly as if it were fully ## preformatted, but it may be slow due to on-the-fly processing; "0" ## will display a default format very fast, but it may not have all ## the fields as in the fully preformatted HTML brief format. ## Note also that this option is active only for old (PHP) formats; ## the new (Python) formats are called on the fly by default anyway, ## since they are much faster. When usure, please set "0" here. ## (example: "0") 0 ## CFG_USE_OLD_SYSNOS -- do we want to make old SYSNOs visible rather ## than MySQL's record IDs? You may use this if you migrate from a ## different e-doc system, and you store your old system numbers into ## 970__a. Put "1" for "yes" and "0" for "no". Usually you don't want ## to do that, though. ## (example: "0") 0 ## CFG_NB_LATEST_ADDITIONS -- the number of records to display under ## 'Latest Additions' in the web collection pages ## (example: "10") 10 +## CFG_NB_LATEST_ADDITIONS_RSS -- the number of records to display in +## the RSS feed +## (example: "25") + +25 + + +## CFG_RSS_TIME_TO_LIVE -- number of minutes that indicates how long a +## feed cache is valid +## (example: "360") + +360 + + ## CFG_AUTHOR_ET_AL_THRESHOLD -- up to how many author names to print ## explicitely; for more print "et al". Note that this is used in ## default formatting that is seldomly used, as usually BibFormat ## defines all the format. The value below is only used when ## BibFormat fails, for example. ## (example: "3") 3 ## CFG_NARROW_SEARCH_SHOW_GRANDSONS -- whether to show or not ## collection grandsons in Narrow Search boxes (sons are shown ## by default, grandsons are configurable here). Use 0 for no ## and 1 for yes. ## (example: "0") 1 ## CFG_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX -- shall we create help ## links for Ellis, Nick or Ellis, Nicholas and friends when Ellis, N ## was searched for? Useful if you have one author stored in the ## database under several name formats, namely surname comma firstname ## and surname comma initial cataloging policy. Use 0 for no and 1 ## for yes. ## (example: "1") 1 ## CFG_WEBSEARCH_USE_JSMATH_FOR_FORMATS -- jsMath is a Javascript library ## that renders math (La)TeX formulas in the client browser. ## This parameter must contain a list of output format for which to apply ## jsMath rendering. If the list is empty, jsMath is disabled. ## Note: the system expect to find jsMath library under /var/www/jsMath. ## (example: "['hd', 'hb']") [] ####################################### ## Part 5: BibHarvest OAI parameters ## ####################################### ## This part defines parameters for the CDS Invenio OAI gateway. ## Useful if you are running CDS Invenio as OAI data provider. ## CFG_OAI_ID_SCHEME -- OAI identifier scheme: ## (example: "oai") oai ## CFG_OAI_ID_TAG -- OAI identifier tag: ## (example: "0248_a") 909COo ## CFG_OAI_SET_TAG -- OAI set tag: ## (example: "0248_p") 909COp ## CFG_OAI_DELETED_TAG -- OAI tag for deleted records mark. OAI record ## is considered deleted if the value of this field is set to ## "DELETED" and the OAI deleted records policy is set either to ## "transient" or "persistent". ## (example: "980__c") 980__c ## CFG_OAI_DELETED_POLICY -- OAI deletedrecordspolicy ## (example: no/transient/persistent) no ## CFG_OAI_ID_PREFIX -- OAI identifier prefix: ## (example: "cds.cern.ch") atlantis.cern.ch ## CFG_OAI_SAMPLE_IDENTIFIER -- OAI sample identifier: ## (example: "oai:cds.cern.ch:CERN-TH-4036") ::CERN-TH-4036 ## CFG_OAI_IDENTIFY_DESCRIPTION -- description for the OAI Identify verb (optional): ## (example:"") : http:/// Free and unlimited use by anybody with obligation to refer to original record Full content, i.e. preprints may not be harvested by robots Submission restricted. Submitted documents are subject of approval by OAI repository admins. ## CFG_OAI_LOAD -- OAI number of records in a response: ## (example: "1000") 1000 ## CFG_OAI_EXPIRE -- OAI resumptionToken expiration time: ## (example: "1000") 90000 ## CFG_OAI_SLEEP -- service unavailable between two consecutive ## requests for CFG_OAI_SLEEP seconds: ## (example: "10") 10 ################################## ## Part 6: WebSubmit parameters ## ################################## ## This section contains some WML-based configuration parameters for ## WebSubmit module. Please note that WebSubmit is mostly configured ## on run-time via its WebSubmit Admin web interface. The parameters ## below are the ones that you do not probably want to modify during ## the runtime. ## CFG_SUBMIT_COUNTER -- indicates where the counters used by websubmit ## are stored /submit/counters ## CFG_SUBMIT_DIR -- this indicates where the websubmit system will ## keep each submissions running data /submit/storage ######################################### ## Part 7: Fulltext Archive parameters ## ######################################### ## This section contains some WML-based configuration parameters for ## fulltext archive. ## CFG_FILE_DIR -- this indicates where the fulltext files will be ## stored /files ## CFG_FILE_DIR_SIZE -- all attached fulltext files are stored ## under the CFG_FILE_DIR directory, inside subdirectories called gX ## this variable indicates the maximum number of files stored in each ## subdirectories 5000 ################################## ## Part 8: BibFormat parameters ## ################################## ## This section contains some WML-based configuration parameters for ## BibFormat module. Please note that BibFormat is mostly configured ## on run-time via its BibFormat Admin web interface. The parameters ## below are the ones that you do not probably want to modify very ## often during the runtime. ## CFG_BIBFORMAT_TIME_LIMIT -- the time limit of BibFormat process ## after which the task will terminate. This is useful to avoid ## eventual runaways. ## (example: "1000") 1000 ################################# ## Part 9: BibIndex parameters ## ################################# ## This section contains some WML-based configuration parameters for ## BibIndex module. Please note that BibIndex is mostly configured ## on run-time via its BibIndex Admin web interface. The parameters ## below are the ones that you do not probably want to modify very ## often during the runtime. ## CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY -- when fulltext indexing, do ## you want to index locally stored files only, or also external URLs? ## Use "0" to say "no" and "1" to say "yes". ## (example: "0") 0 ## CFG_BIBINDEX_STEMMER_DEFAULT_LANGUAGE -- when indexing, do you want to use ## stemming? If so, according to which language do we stem? Use '' for ## no stemming, 'en' for English, or one of the languages supported by ## Snowball's Stemmer: 'da', 'nl', 'en', 'fi', 'de', 'it', 'no', 'pt', ## 'ru', 'es', 'sv'. ## (example: 'en') ## CFG_BIBINDEX_DISABLE_STEMMING_FOR_INDEXES -- contains a list index names ## for which applying stemming would lead to wrong behaviour. ## e.g. collection definitions would be wrong with truncated names, and ## it's semantically wrong to stem proper names such as in authors index. ## (example: ["collection", "author", "year", "reference", "reportnumber"]) ["collection", "author", "year", "reference", "reportnumber"] ## CFG_BIBINDEX_REMOVE_STOPWORDS -- when indexing, do we want to remove ## stopwords? Use "0" to say "no" and "1" to say "yes". ## (example: "0") 0 ## CFG_BIBINDEX_PATH_TO_STOPWORDS_FILE -- path to the stopwords file. You ## probably don't want to change this path, although you may want to ## change the content of that file. Note that the file is used by the ## rank engine internally, so it should be given even if stopword ## removal in the indexes is not used. /bibrank/stopwords.kb ## CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS -- characters considered as ## alphanumeric separators of word-blocks inside words. You probably ## don't want to change this. ## (example: "\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~") \!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~ ## CFG_BIBINDEX_CHARS_PUNCTUATION -- characters considered as punctuation ## between word-blocks inside words. You probably don't want to ## change this. ## (example: "\.\,\:\;\?\!\"") \.\,\:\;\?\!\"\(\)\'\`\<\> ## CFG_BIBINDEX_REMOVE_HTML_MARKUP -- should we attempt to remove HTML markup ## before indexing? Use 1 if you have HTML markup inside metadata ## (e.g. in abstracts), use 0 otherwise. ## (example: "0") 0 ## CFG_BIBINDEX_REMOVE_LATEX_MARKUP -- should we attempt to remove LATEX markup ## before indexing? Use 1 if you have LATEX markup inside metadata ## (e.g. in abstracts), use 0 otherwise. ## (example: "0") 1 ## CFG_BIBINDEX_MIN_WORD_LENGTH -- minimum word length allowed to be added to ## index. The terms smaller then this amount will be discarded. ## Useful to keep the database clean, however you can safely leave ## this value on 0 for up to 1,000,000 documents. ## (example: "0") 0 ## CFG_BIBINDEX_URLOPENER_USERNAME and CFG_BIBINDEX_URLOPENER_PASSWORD -- ## access credentials to access restricted URLs, interesting only if ## you are fulltext-indexing files located on a remote server that is ## only available via username/password. But it's probably better to ## handle this case via IP or some convention; the current scheme is ## mostly there for demo only. ## (example: "mysuperuser") mysuperuser mysuperpass ## CFG_INTBITSET_ENABLE_SANITY_CHECKS -- ## Enable sanity checks for integers passed to the intbitset data ## structures. It is good to enable this during debugging ## and to disable this value for speed improvements. False ######################################## ## Part 10: Access control parameters ## ######################################## ## This section contains some WML-based configuration parameters for ## the access control system. Please note that WebAccess is mostly ## configured on run-time via its WebAccess Admin web interface. The ## parameters below are the ones that you do not probably want to ## modify very often during the runtime. (If you do want to modify ## them during runtime, for example te deny access temporarily because ## of backups, you can edit access_control_config.py directly, no need ## to get back here and no need to redo the make process.) ## CFG_ACCESS_CONTROL_LEVEL_SITE -- defines how open this site is. ## Use 0 for normal operation of the site, 1 for read-only site (all ## write operations temporarily closed), 2 for site fully closed. ## Useful for site maintenance. ## (example: "0") 0 ## CFG_ACCESS_CONTROL_LEVEL_GUESTS -- guest users access policy. Use ## 0 to allow guest users, 1 not to allow them (all users must login). ## (example: "0") 0 ## CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS -- account registration and ## activation policy. When 0, users can register and accounts are ## automatically activated. When 1, users can register but admin must ## activate the accounts. When 2, users cannot register nor update ## their email address, only admin can register accounts. When 3, ## users cannot register nor update email address nor password, only ## admin can register accounts. When 4, the same as 3 applies, nor ## user cannot change his login method. ## (example: "0") 0 ## CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN -- limit account ## registration to certain email addresses? If wanted, give domain ## name below. If not wanted, leave it empty. ## (example: "cern.ch"): ## CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS -- send a ## notification email to the administrator when a new account is ## created? Use 0 for no, 1 for yes. ## (example: "0") 0 ## CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT -- send a ## notification email to the user when a new account is created in order to ## to verify the validity of the provided email address? Use ## 0 for no, 1 for yes. ## (example: "0") 1 ## CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_ACTIVATION -- send a ## notification email to the user when a new account is activated? ## Use 0 for no, 1 for yes. ## (example: "0") 0 ## CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_DELETION -- send a ## notification email to the user when a new account is deleted or ## account demand rejected? Use 0 for no, 1 for yes. ## (example: "0") 0 ########################## ## THAT's ALL, FOLKS! ## ########################## ## And this is the end of "config.wml" WML configuration phase. Now ## please return to the main CDS source directory and type 'make'. ## (Note: if you have bravely edited the "cdsnavbar.wml" file too, ## then please do "make clean" before doing "make".) diff --git a/modules/miscutil/lib/config.py.wml b/modules/miscutil/lib/config.py.wml index 61ec7c6b5..d8fcdd843 100644 --- a/modules/miscutil/lib/config.py.wml +++ b/modules/miscutil/lib/config.py.wml @@ -1,211 +1,213 @@ ## $Id$ ## CDS Invenio config file, to be read by all Python programs. ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ## read config variables: #include "config.wml" #include "configbis.wml" ## -*- coding: utf-8 -*- ## $Id$ ## DO NOT EDIT THIS FILE! IT WAS AUTOMATICALLY GENERATED FROM CDS Invenio WML SOURCES. """CDS Invenio config file, to be read by all Python programs.""" __revision__ = "$Id$" ## fill all the generally-interesting config variables from WML: cdsname = "" cdslang = "" supportemail = "" adminemail = "" alertengineemail = "" CFG_PREFIX = "" webdir = "" weburl = "" sweburl = "" bindir = "" pylibdir = "/python" cachedir = "" logdir = "" tmpdir = "" etcdir = "" version = "" localedir = "" ## Languages available on this server: cdslangs = ['bg', 'ca', 'cs', 'de', 'el', 'en', 'es', 'fr', 'hr', 'it', 'ja', 'no', 'pl', 'pt', 'ru', 'sk', 'sv', 'uk', 'zh_CN', 'zh_TW',] ## intl versions of CDSNAME of this installation: cdsnameintl = {} cdsnameintl['bg'] = "" cdsnameintl['ca'] = "" cdsnameintl['cs'] = "" cdsnameintl['de'] = "" cdsnameintl['el'] = "" cdsnameintl['en'] = "" cdsnameintl['es'] = "" cdsnameintl['fr'] = "" cdsnameintl['it'] = "" cdsnameintl['ja'] = "" cdsnameintl['hr'] = "" cdsnameintl['no'] = "" cdsnameintl['pl'] = "" cdsnameintl['pt'] = "" cdsnameintl['ru'] = "" cdsnameintl['sk'] = "" cdsnameintl['sv'] = "" cdsnameintl['uk'] = "" cdsnameintl['zh_CN'] = "" cdsnameintl['zh_TW'] = "" ## helper programs: CFG_PATH_PHP = "" CFG_PATH_PDFTOTEXT = "" CFG_PATH_PSTOTEXT = "" CFG_PATH_PSTOASCII = "" CFG_PATH_ANTIWORD = "" CFG_PATH_CATDOC = "" CFG_PATH_WVTEXT = "" CFG_PATH_PPTHTML = "" CFG_PATH_XLHTML = "" CFG_PATH_HTMLTOTEXT = "" CFG_PATH_GFILE = "" CFG_PATH_GZIP = "" CFG_PATH_TAR = "" CFG_PATH_GUNZIP = "" CFG_PATH_ACROREAD = "" CFG_PATH_DISTILLER = "" CFG_PATH_CONVERT = "" ## Apache password/group files: CFG_APACHE_PASSWORD_FILE = "" CFG_APACHE_GROUP_FILE = "" ## are we running CERN specifics? CFG_CERN_SITE = ## for websubmit: images = "/img" urlpath = "" accessurl = "/search" counters = "" storage = "" filedir = "" filedirsize = xmlmarc2textmarc = "/xmlmarc2textmarc" bibupload = "/bibupload" bibformat = "/bibformat" bibwords = "/bibwords" bibconvert = "/bibconvert" bibconvertconf = "/bibconvert/config" htdocsurl = "" ## for search engine: CFG_MAX_CACHED_QUERIES = CFG_WEBSEARCH_INSTANT_BROWSE = +CFG_WEBSEARCH_INSTANT_BROWSE_RSS = +CFG_WEBSEARCH_RSS_TTL = CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD = CFG_WEBSEARCH_SEARCH_CACHE_SIZE = CFG_WEBSEARCH_NB_RECORDS_TO_SORT = CFG_WEBSEARCH_CALL_BIBFORMAT = CFG_WEBSEARCH_USE_ALEPH_SYSNOS = CFG_WEBSEARCH_FIELDS_CONVERT = CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH = # FIXME: not used? CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH = CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS = CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX = CFG_WEBSEARCH_USE_JSMATH_FOR_FORMATS = ## for access control: CFG_ACCESS_CONTROL_LEVEL_SITE = CFG_ACCESS_CONTROL_LEVEL_GUESTS = CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS = CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN = "" CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS = CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT = CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_ACTIVATION = CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_DELETION = ## for OAI repository: CFG_OAI_ID_PREFIX = "" CFG_OAI_SAMPLE_IDENTIFIER = "" CFG_OAI_IDENTIFY_DESCRIPTION = """""" CFG_OAI_ID_FIELD = "" CFG_OAI_SET_FIELD = "" CFG_OAI_DELETED_POLICY = "" CFG_OAI_EXPIRE = CFG_OAI_SLEEP = CFG_OAI_LOAD = ## for the indexer: CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY = CFG_BIBINDEX_STEMMER_DEFAULT_LANGUAGE = "" CFG_BIBINDEX_REMOVE_STOPWORDS = CFG_BIBINDEX_PATH_TO_STOPWORDS_FILE = "" CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS = r"[]" CFG_BIBINDEX_CHARS_PUNCTUATION = r"[]" CFG_BIBINDEX_REMOVE_HTML_MARKUP = CFG_BIBINDEX_REMOVE_LATEX_MARKUP = CFG_BIBINDEX_MIN_WORD_LENGTH = CFG_BIBINDEX_URLOPENER_USERNAME = "" # FIXME: not found in modules subdir?! CFG_BIBINDEX_URLOPENER_PASSWORD = "" # FIXME: not found in modules subdir?! CFG_BIBINDEX_DISABLE_STEMMING_FOR_INDEXES = CFG_INTBITSET_ENABLE_SANITY_CHECKS = ## for ranking: CFG_BIBRANK_SHOW_READING_STATS = 1 CFG_BIBRANK_SHOW_DOWNLOAD_STATS = 1 CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS = 1 CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS_CLIENT_IP_DISTRIBUTION = 0 CFG_BIBRANK_SHOW_CITATION_LINKS = 1 CFG_BIBRANK_SHOW_CITATION_STATS = 1 CFG_BIBRANK_SHOW_CITATION_GRAPHS = 1 ## for commenting: CFG_WEBCOMMENT_ALLOW_COMMENTS = 1 CFG_WEBCOMMENT_ALLOW_REVIEWS = 1 CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS = 0 CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN = 5 CFG_WEBCOMMENT_NB_COMMENTS_IN_DETAILED_VIEW = 1 CFG_WEBCOMMENT_NB_REVIEWS_IN_DETAILED_VIEW = 1 CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL = 1 CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS = 20 CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_REVIEWS_IN_SECONDS = 20 CFG_WEBCOMMENT_TIMELIMIT_VOTE_VALIDITY_IN_DAYS = 365 # FIXME: not found in modules subdir?! CFG_WEBCOMMENT_TIMELIMIT_REPORT_VALIDITY_IN_DAYS = 100 # FIXME: not found in modules subdir?! ## page and style elements: CFG_WEBSTYLE_TEMPLATE_SKIN = "" CFG_WEBSTYLE_CDSPAGEHEADER = """""" CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP = """""" CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM = """""" CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP = """""" CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM = """""" CFG_WEBSTYLE_CDSPAGEFOOTER = """""" # for task scheduler: CFG_BIBSCHED_REFRESHTIME = 5 # CFG_BIBSCHED_LOG_PAGER = "/bin/more" CFG_BIBSCHED_LOG_PAGER = None # will use the environment defined one # for db engine: CFG_MISCUTIL_USE_SQLALCHEMY = # for mail sending: CFG_MISCUTIL_SMTP_HOST = "" CFG_MISCUTIL_SMTP_PORT = diff --git a/modules/websearch/lib/search_engine.py b/modules/websearch/lib/search_engine.py index a201956bc..609122c30 100644 --- a/modules/websearch/lib/search_engine.py +++ b/modules/websearch/lib/search_engine.py @@ -1,3929 +1,3997 @@ # -*- coding: utf-8 -*- ## $Id$ ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # pylint: disable-msg=C0301 """CDS Invenio Search Engine in mod_python.""" __lastupdated__ = """$Date$""" __revision__ = "$Id$" ## import general modules: import cgi import copy import string import os import re import time import urllib import zlib ## import CDS Invenio stuff: from invenio.config import \ CFG_CERN_SITE, \ CFG_OAI_ID_FIELD, \ CFG_WEBCOMMENT_ALLOW_REVIEWS, \ CFG_WEBSEARCH_CALL_BIBFORMAT, \ CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \ CFG_WEBSEARCH_FIELDS_CONVERT, \ CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \ CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \ CFG_WEBSEARCH_USE_JSMATH_FOR_FORMATS, \ CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \ CFG_BIBINDEX_DISABLE_STEMMING_FOR_INDEXES, \ cdslang, \ cdsname, \ logdir, \ weburl from invenio.search_engine_config import CFG_EXPERIMENTAL_FEATURES, InvenioWebSearchUnknownCollectionError from invenio.bibrank_record_sorter import get_bibrank_methods, rank_records from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list from invenio.bibindex_engine_stemmer import stem from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box from invenio.data_cacher import DataCacher from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search from invenio.access_control_admin import acc_get_action_id from invenio.access_control_config import VIEWRESTRCOLL from invenio.websearchadminlib import get_detailed_page_tabs from invenio.intbitset import intbitset as HitSet +from invenio.webinterface_handler import wash_urlargd +from invenio.urlutils import make_canonical_urlargd import invenio.template webstyle_templates = invenio.template.load('webstyle') webcomment_templates = invenio.template.load('webcomment') from invenio.bibrank_citation_searcher import calculate_cited_by_list, calculate_co_cited_with_list, get_self_cited_in, get_self_cited_by from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box from invenio.dbquery import run_sql, run_sql_cached, get_table_update_time, Error try: from mod_python import apache from invenio.webuser import getUid from invenio.webpage import page, pageheaderonly, pagefooteronly, create_error_box except ImportError, e: pass # ignore user personalisation, needed e.g. for command-line from invenio.messages import gettext_set_language try: import invenio.template websearch_templates = invenio.template.load('websearch') except: pass ## global vars: search_cache = {} # will cache results of previous searches cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical? collection_reclist_cache_timestamp = 0 field_i18nname_cache_timestamp = 0 collection_i18nname_cache_timestamp = 0 ## precompile some often-used regexp for speed reasons: re_word = re.compile('[\s]') re_quotes = re.compile('[\'\"]') re_doublequote = re.compile('\"') re_equal = re.compile('\=') re_logical_and = re.compile('\sand\s', re.I) re_logical_or = re.compile('\sor\s', re.I) re_logical_not = re.compile('\snot\s', re.I) re_operators = re.compile(r'\s([\+\-\|])\s') re_pattern_wildcards_at_beginning = re.compile(r'(\s)[\*\%]+') re_pattern_single_quotes = re.compile("'(.*?)'") re_pattern_double_quotes = re.compile("\"(.*?)\"") re_pattern_regexp_quotes = re.compile("\/(.*?)\/") re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+') re_pattern_space = re.compile("__SPACE__") re_pattern_today = re.compile("\$TODAY\$") re_unicode_lowercase_a = re.compile(unicode(r"(?u)[áàäâãå]", "utf-8")) re_unicode_lowercase_ae = re.compile(unicode(r"(?u)[æ]", "utf-8")) re_unicode_lowercase_e = re.compile(unicode(r"(?u)[éèëê]", "utf-8")) re_unicode_lowercase_i = re.compile(unicode(r"(?u)[íìïî]", "utf-8")) re_unicode_lowercase_o = re.compile(unicode(r"(?u)[óòöôõø]", "utf-8")) re_unicode_lowercase_u = re.compile(unicode(r"(?u)[úùüû]", "utf-8")) re_unicode_lowercase_y = re.compile(unicode(r"(?u)[ýÿ]", "utf-8")) re_unicode_lowercase_c = re.compile(unicode(r"(?u)[çć]", "utf-8")) re_unicode_lowercase_n = re.compile(unicode(r"(?u)[ñ]", "utf-8")) re_unicode_uppercase_a = re.compile(unicode(r"(?u)[ÁÀÄÂÃÅ]", "utf-8")) re_unicode_uppercase_ae = re.compile(unicode(r"(?u)[Æ]", "utf-8")) re_unicode_uppercase_e = re.compile(unicode(r"(?u)[ÉÈËÊ]", "utf-8")) re_unicode_uppercase_i = re.compile(unicode(r"(?u)[ÍÌÏÎ]", "utf-8")) re_unicode_uppercase_o = re.compile(unicode(r"(?u)[ÓÒÖÔÕØ]", "utf-8")) re_unicode_uppercase_u = re.compile(unicode(r"(?u)[ÚÙÜÛ]", "utf-8")) re_unicode_uppercase_y = re.compile(unicode(r"(?u)[Ý]", "utf-8")) re_unicode_uppercase_c = re.compile(unicode(r"(?u)[ÇĆ]", "utf-8")) re_unicode_uppercase_n = re.compile(unicode(r"(?u)[Ñ]", "utf-8")) re_latex_lowercase_a = re.compile("\\\\[\"H'`~^vu=k]\{?a\}?") re_latex_lowercase_ae = re.compile("\\\\ae\\{\\}?") re_latex_lowercase_e = re.compile("\\\\[\"H'`~^vu=k]\\{?e\\}?") re_latex_lowercase_i = re.compile("\\\\[\"H'`~^vu=k]\\{?i\\}?") re_latex_lowercase_o = re.compile("\\\\[\"H'`~^vu=k]\\{?o\\}?") re_latex_lowercase_u = re.compile("\\\\[\"H'`~^vu=k]\\{?u\\}?") re_latex_lowercase_y = re.compile("\\\\[\"']\\{?y\\}?") re_latex_lowercase_c = re.compile("\\\\['uc]\\{?c\\}?") re_latex_lowercase_n = re.compile("\\\\[c'~^vu]\\{?n\\}?") re_latex_uppercase_a = re.compile("\\\\[\"H'`~^vu=k]\\{?A\\}?") re_latex_uppercase_ae = re.compile("\\\\AE\\{?\\}?") re_latex_uppercase_e = re.compile("\\\\[\"H'`~^vu=k]\\{?E\\}?") re_latex_uppercase_i = re.compile("\\\\[\"H'`~^vu=k]\\{?I\\}?") re_latex_uppercase_o = re.compile("\\\\[\"H'`~^vu=k]\\{?O\\}?") re_latex_uppercase_u = re.compile("\\\\[\"H'`~^vu=k]\\{?U\\}?") re_latex_uppercase_y = re.compile("\\\\[\"']\\{?Y\\}?") re_latex_uppercase_c = re.compile("\\\\['uc]\\{?C\\}?") re_latex_uppercase_n = re.compile("\\\\[c'~^vu]\\{?N\\}?") class RestrictedCollectionDataCacher(DataCacher): def __init__(self): def cache_filler(): ret = [] try: viewcollid = acc_get_action_id(VIEWRESTRCOLL) res = run_sql("""SELECT DISTINCT ar.value FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (viewcollid,)) except Exception: # database problems, return empty cache return [] for coll in res: ret.append(coll[0]) return ret def timestamp_getter(): return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT')) DataCacher.__init__(self, cache_filler, timestamp_getter) def collection_restricted_p(collection): cache = restricted_collection_cache.get_cache() return collection in cache try: restricted_collection_cache.is_ok_p except Exception: restricted_collection_cache = RestrictedCollectionDataCacher() class FieldI18nNameDataCacher(DataCacher): def __init__(self): def cache_filler(): ret = {} try: res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name except Exception: # database problems, return empty cache return {} for f, ln, i18nname in res: if i18nname: if not ret.has_key(f): ret[f] = {} ret[f][ln] = i18nname return ret def timestamp_getter(): return get_table_update_time('fieldname') DataCacher.__init__(self, cache_filler, timestamp_getter) def get_field_i18nname(self, f, ln=cdslang): out = f try: out = self.get_cache()[f][ln] except KeyError: pass # translation in LN does not exist return out try: if not field_i18n_name_cache.is_ok_p: raise Exception except Exception: field_i18n_name_cache = FieldI18nNameDataCacher() class CollectionRecListDataCacher(DataCacher): def __init__(self): def cache_filler(): ret = {} try: res = run_sql("SELECT name,reclist FROM collection") except Exception: # database problems, return empty cache return {} for name, reclist in res: ret[name] = None # this will be filled later during runtime by calling get_collection_reclist(coll) return ret def timestamp_getter(): return get_table_update_time('collection') DataCacher.__init__(self, cache_filler, timestamp_getter) def get_collection_reclist(self, coll): cache = self.get_cache() if not cache[coll]: # not yet it the cache, so calculate it and fill the cache: set = HitSet() query = "SELECT nbrecs,reclist FROM collection WHERE name='%s'" % coll res = run_sql(query, None, 1) if res: try: set = HitSet(res[0][1]) except: pass self.cache[coll] = set cache[coll] = set # finally, return reclist: return cache[coll] try: if not collection_reclist_cache.is_ok_p: raise Exception except Exception: collection_reclist_cache = CollectionRecListDataCacher() class CollectionI18nDataCacher(DataCacher): def __init__(self): def cache_filler(): ret = {} try: res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name except Exception: # database problems, return {} for c, ln, i18nname in res: if i18nname: if not ret.has_key(c): ret[c] = {} ret[c][ln] = i18nname return ret def timestamp_getter(): return get_table_update_time('collectionname') DataCacher.__init__(self, cache_filler, timestamp_getter) def get_coll_i18nname(self, c, ln=cdslang): """Return nicely formatted collection name (of name type 'ln', 'long name') for collection C in language LN.""" cache = self.get_cache() out = c try: out = cache[c][ln] except KeyError: pass # translation in LN does not exist return out try: if not collection_i18n_name_cache.is_ok_p: raise Exception except Exception: collection_i18n_name_cache = CollectionI18nDataCacher() def get_alphabetically_ordered_collection_list(level=0, ln=cdslang): """Returns nicely ordered (score respected) list of collections, more exactly list of tuples (collection name, printable collection name). Suitable for create_search_box().""" out = [] query = "SELECT id,name FROM collection ORDER BY name ASC" res = run_sql(query) for c_id, c_name in res: # make a nice printable name (e.g. truncate c_printable for # long collection names in given language): c_printable = get_coll_i18nname(c_name, ln) if len(c_printable)>30: c_printable = c_printable[:30] + "..." if level: c_printable = " " + level * '-' + " " + c_printable out.append([c_name, c_printable]) return out def get_nicely_ordered_collection_list(collid=1, level=0, ln=cdslang): """Returns nicely ordered (score respected) list of collections, more exactly list of tuples (collection name, printable collection name). Suitable for create_search_box().""" colls_nicely_ordered = [] query = "SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c "\ " WHERE c.id=cc.id_son AND cc.id_dad='%s' ORDER BY score DESC" % collid res = run_sql(query) for c, cid in res: # make a nice printable name (e.g. truncate c_printable for # long collection names in given language): c_printable = get_coll_i18nname(c, ln) if len(c_printable)>30: c_printable = c_printable[:30] + "..." if level: c_printable = " " + level * '-' + " " + c_printable colls_nicely_ordered.append([c, c_printable]) colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln) return colls_nicely_ordered def get_index_id(field): """Returns first index id where the field code FIELD is indexed. Returns zero in case there is no table for this index. Example: field='author', output=4.""" out = 0 res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX LIMIT 1""", (field,)) if res: out = res[0][0] return out def get_words_from_pattern(pattern): "Returns list of whitespace-separated words from pattern." words = {} for word in string.split(pattern): if not words.has_key(word): words[word] = 1; return words.keys() def create_basic_search_units(req, p, f, m=None, of='hb'): """Splits search pattern and search field into a list of independently searchable units. - A search unit consists of '(operator, pattern, field, type, hitset)' tuples where 'operator' is set union (|), set intersection (+) or set exclusion (-); 'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics'); 'field' is either a code like 'title' or MARC tag like '100__a'; 'type' is the search type ('w' for word file search, 'a' for access file search). - Optionally, the function accepts the match type argument 'm'. If it is set (e.g. from advanced search interface), then it performs this kind of matching. If it is not set, then a guess is made. 'm' can have values: 'a'='all of the words', 'o'='any of the words', 'p'='phrase/substring', 'r'='regular expression', 'e'='exact value'. - Warnings are printed on req (when not None) in case of HTML output formats.""" opfts = [] # will hold (o,p,f,t,h) units ## check arguments: if matching type phrase/string/regexp, do we have field defined? if (m=='p' or m=='r' or m=='e') and not f: m = 'a' if of.startswith("h"): print_warning(req, "This matching type cannot be used within any field. I will perform a word search instead." ) print_warning(req, "If you want to phrase/substring/regexp search in a specific field, e.g. inside title, then please choose within title search option.") ## is desired matching type set? if m: ## A - matching type is known; good! if m == 'e': # A1 - exact value: opfts.append(['+', p, f, 'a']) # '+' since we have only one unit elif m == 'p': # A2 - phrase/substring: opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit elif m == 'r': # A3 - regular expression: opfts.append(['+', p, f, 'r']) # '+' since we have only one unit elif m == 'a' or m == 'w': # A4 - all of the words: p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed for word in get_words_from_pattern(p): opfts.append(['+', word, f, 'w']) # '+' in all units elif m == 'o': # A5 - any of the words: p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed for word in get_words_from_pattern(p): if len(opfts)==0: opfts.append(['+', word, f, 'w']) # '+' in the first unit else: opfts.append(['|', word, f, 'w']) # '|' in further units else: if of.startswith("h"): print_warning(req, "Matching type '%s' is not implemented yet." % m, "Warning") opfts.append(['+', "%" + p + "%", f, 'a']) else: ## B - matching type is not known: let us try to determine it by some heuristics if f and p[0] == '"' and p[-1] == '"': ## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search opfts.append(['+', p[1:-1], f, 'a']) elif f and p[0] == "'" and p[-1] == "'": ## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search opfts.append(['+', '%' + p[1:-1] + '%', f, 'a']) elif f and p[0] == "/" and p[-1] == "/": ## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search opfts.append(['+', p[1:-1], f, 'r']) elif f and string.find(p, ',') >= 0: ## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search opfts.append(['+', p, f, 'a']) elif f and str(f[0:2]).isdigit(): ## B2 - does 'f' exist and starts by two digits? => doing ACC search opfts.append(['+', p, f, 'a']) else: ## B3 - doing WRD search, but maybe ACC too # search units are separated by spaces unless the space is within single or double quotes # so, let us replace temporarily any space within quotes by '__SPACE__' p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p) p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p) p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p) # wash argument: p = re_equal.sub(":", p) p = re_logical_and.sub(" ", p) p = re_logical_or.sub(" |", p) p = re_logical_not.sub(" -", p) p = re_operators.sub(r' \1', p) for pi in string.split(p): # iterate through separated units (or items, as "pi" stands for "p item") pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' ' # firstly, determine set operator if pi[0] == '+' or pi[0] == '-' or pi[0] == '|': oi = pi[0] pi = pi[1:] else: # okay, there is no operator, so let us decide what to do by default oi = '+' # by default we are doing set intersection... # secondly, determine search pattern and field: if string.find(pi, ":") > 0: fi, pi = string.split(pi, ":", 1) else: fi, pi = f, pi # look also for old ALEPH field names: if fi and CFG_WEBSEARCH_FIELDS_CONVERT.has_key(string.lower(fi)): fi = CFG_WEBSEARCH_FIELDS_CONVERT[string.lower(fi)] # wash 'pi' argument: if re_quotes.match(pi): # B3a - quotes are found => do ACC search (phrase search) if fi: if pi[0] == '"' and pi[-1] == '"': pi = string.replace(pi, '"', '') # remove quote signs opfts.append([oi, pi, fi, 'a']) elif pi[0] == "'" and pi[-1] == "'": pi = string.replace(pi, "'", "") # remove quote signs opfts.append([oi, "%" + pi + "%", fi, 'a']) else: # unbalanced quotes, so do WRD query: opfts.append([oi, pi, fi, 'w']) else: # fi is not defined, look at where we are doing exact or subphrase search (single/double quotes): if pi[0] == '"' and pi[-1] == '"': opfts.append([oi, pi[1:-1], "anyfield", 'a']) if of.startswith("h"): print_warning(req, "Searching for an exact match inside any field may be slow. You may want to search for words instead, or choose to search within specific field.") else: # nope, subphrase in global index is not possible => change back to WRD search pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed for pii in get_words_from_pattern(pi): # since there may be '-' and other chars that we do not index in WRD opfts.append([oi, pii, fi, 'w']) if of.startswith("h"): print_warning(req, "The partial phrase search does not work in any field. I'll do a boolean AND searching instead.") print_warning(req, "If you want to do a partial phrase search in a specific field, e.g. inside title, then please choose 'within title' search option.", "Tip") print_warning(req, "If you want to do exact phrase matching, then please use double quotes.", "Tip") elif fi and str(fi[0]).isdigit() and str(fi[0]).isdigit(): # B3b - fi exists and starts by two digits => do ACC search opfts.append([oi, pi, fi, 'a']) elif fi and not get_index_id(fi): # B3c - fi exists but there is no words table for fi => try ACC search opfts.append([oi, pi, fi, 'a']) elif fi and pi.startswith('/') and pi.endswith('/'): # B3d - fi exists and slashes found => try regexp search opfts.append([oi, pi[1:-1], fi, 'r']) else: # B3e - general case => do WRD search pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed for pii in get_words_from_pattern(pi): opfts.append([oi, pii, fi, 'w']) ## sanity check: for i in range(0, len(opfts)): try: pi = opfts[i][1] if pi == '*': if of.startswith("h"): print_warning(req, "Ignoring standalone wildcard word.", "Warning") del opfts[i] if pi == '' or pi == ' ': fi = opfts[i][2] if fi: if of.startswith("h"): print_warning(req, "Ignoring empty %s search term." % fi, "Warning") del opfts[i] except: pass ## return search units: return opfts def page_start(req, of, cc, as, ln, uid, title_message=None, description='', keywords='', recID=-1, tab=''): "Start page according to given output format." _ = gettext_set_language(ln) if not title_message: title_message = _("Search Results") if not req: return # we were called from CLI content_type = get_output_format_content_type(of) if of.startswith('x'): if of == 'xr': # we are doing RSS output req.content_type = "application/rss+xml" req.send_http_header() req.write("""\n""") else: # we are doing XML output: req.content_type = "text/xml" req.send_http_header() req.write("""\n""") elif of.startswith('t') or str(of[0:3]).isdigit(): # we are doing plain text output: req.content_type = "text/plain" req.send_http_header() elif of == "id": pass # nothing to do, we shall only return list of recIDs elif content_type == 'text/html': # we are doing HTML output: req.content_type = "text/html" req.send_http_header() if not description: description = "%s %s." % (cc, _("Search Results")) if not keywords: keywords = "%s, WebSearch, %s" % (get_coll_i18nname(cdsname, ln), get_coll_i18nname(cc, ln)) + rssurl = websearch_templates.build_rss_url(cgi.parse_qs(req.args)) + navtrail = create_navtrail_links(cc, as, ln) navtrail_append_title_p = 1 # FIXME: Find a good point to put this code. # This is a nice hack to trigger jsMath only when displaying single # records. if of.lower() in CFG_WEBSEARCH_USE_JSMATH_FOR_FORMATS: metaheaderadd = """ """ else: metaheaderadd = '' if tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb'): # If we are not in information tab in HD format, customize # the nav. trail to have a link back to main record. (Due # to the way perform_request_search() works, hb # (lowercase) is equal to hd) if (of != '' or of.lower() != 'hd') and of != 'hb': # Export format_name = of query = "SELECT name FROM format WHERE code=%s" res = run_sql(query, (of,)) if res: format_name = res[0][0] navtrail += ' > %s > %s' % \ (weburl, recID, title_message, format_name) else: # Discussion, statistics, etc. tabs tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label'] navtrail += ' > %s > %s' % \ (weburl, recID, title_message, _(tab_label)) navtrail_append_title_p = 0 req.write(pageheaderonly(req=req, title=title_message, navtrail=navtrail, description=description, keywords=keywords, metaheaderadd=metaheaderadd, uid=uid, language=ln, navmenuid='search', navtrail_append_title_p=\ - navtrail_append_title_p)) + navtrail_append_title_p, + rssurl=rssurl)) req.write(websearch_templates.tmpl_search_pagestart(ln=ln)) #else: # req.send_http_header() def page_end(req, of="hb", ln=cdslang): "End page according to given output format: e.g. close XML tags, add HTML footer, etc." if of == "id": return [] # empty recID list if not req: return # we were called from CLI if of.startswith('h'): req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req)) return "\n" def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=cdslang): "Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options." _ = gettext_set_language(ln) box = "" # day box += """""" # month box += """""" # year box += """""" return box def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, as, ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action=""): """Create search box for 'search again in the results page' functionality.""" # load the right message language _ = gettext_set_language(ln) # some computations cc_intl = get_coll_i18nname(cc, ln) cc_colID = get_colID(cc) colls_nicely_ordered = [] if cfg_nicely_ordered_collection_list: colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln) else: colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln) colls_nice = [] for (cx, cx_printable) in colls_nicely_ordered: if not cx.startswith("Unnamed collection"): colls_nice.append({ 'value' : cx, 'text' : cx_printable }) coll_selects = [] if colls and colls[0] != cdsname: # some collections are defined, so print these first, and only then print 'add another collection' heading: for c in colls: if c: temp = [] temp.append({ 'value' : '', 'text' : '*** %s ***' % _("any collection") }) for val in colls_nice: # print collection: if not cx.startswith("Unnamed collection"): temp.append({ 'value' : val['value'], 'text' : val['text'], 'selected' : (c == re.sub("^[\s\-]*","", val['value'])) }) coll_selects.append(temp) coll_selects.append([{ 'value' : '', 'text' : '*** %s ***' % _("add another collection") }] + colls_nice) else: # we searched in CDSNAME, so print 'any collection' heading coll_selects.append([{ 'value' : '', 'text' : '*** %s ***' % _("any collection") }] + colls_nice) sort_fields = [{ 'value' : '', 'text' : _("latest first") }] query = """SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff WHERE cff.type='soo' AND cff.id_field=f.id ORDER BY cff.score DESC, f.name ASC""" res = run_sql(query) for code, name in res: sort_fields.append({ 'value' : code, 'text' : name, }) ## ranking methods ranks = [{ 'value' : '', 'text' : "- %s %s -" % (_("OR").lower (), _("rank by")), }] for (code, name) in get_bibrank_methods(cc_colID, ln): # propose found rank methods: ranks.append({ 'value' : code, 'text' : name, }) formats = [] query = """SELECT code,name FROM format WHERE visibility='1' ORDER BY name ASC""" res = run_sql(query) if res: # propose found formats: for code, name in res: formats.append({ 'value' : code, 'text' : name }) else: formats.append({'value' : 'hb', 'text' : _("HTML brief") }) return websearch_templates.tmpl_search_box( ln = ln, as = as, cc_intl = cc_intl, cc = cc, ot = ot, sp = sp, action = action, fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID), f1 = f1, f2 = f2, f3 = f3, m1 = m1, m2 = m2, m3 = m3, p1 = p1, p2 = p2, p3 = p3, op1 = op1, op2 = op2, rm = rm, p = p, f = f, coll_selects = coll_selects, d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d, dt = dt, sort_fields = sort_fields, sf = sf, so = so, ranks = ranks, sc = sc, rg = rg, formats = formats, of = of, pl = pl, jrec = jrec, ec = ec, ) def create_navtrail_links(cc=cdsname, as=0, ln=cdslang, self_p=1, tab=''): """Creates navigation trail links, i.e. links to collection ancestors (except Home collection). If as==1, then links to Advanced Search interfaces; otherwise Simple Search. """ dads = [] for dad in get_coll_ancestors(cc): if dad != cdsname: # exclude Home collection dads.append ((dad, get_coll_i18nname (dad, ln))) if self_p and cc != cdsname: dads.append((cc, get_coll_i18nname(cc, ln))) return websearch_templates.tmpl_navtrail_links( as=as, ln=ln, dads=dads) def get_searchwithin_fields(ln='en', colID=None): """Retrieves the fields name used in the 'search within' selection box for the collection ID colID.""" res = None if colID: res = run_sql_cached("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id ORDER BY cff.score DESC, f.name ASC""", (colID,)) if not res: res = run_sql_cached("SELECT code,name FROM field ORDER BY name ASC") fields = [{ 'value' : '', 'text' : get_field_i18nname("any field", ln) }] for field_code, field_name in res: if field_code and field_code != "anyfield": fields.append({ 'value' : field_code, 'text' : get_field_i18nname(field_name, ln) }) return fields def create_andornot_box(name='op', value='', ln='en'): "Returns HTML code for the AND/OR/NOT selection box." _ = gettext_set_language(ln) out = """ """ % (name, is_selected('a', value), _("AND"), is_selected('o', value), _("OR"), is_selected('n', value), _("AND NOT")) return out def create_matchtype_box(name='m', value='', ln='en'): "Returns HTML code for the 'match type' selection box." _ = gettext_set_language(ln) out = """ """ % (name, is_selected('a', value), _("All of the words:"), is_selected('o', value), _("Any of the words:"), is_selected('e', value), _("Exact phrase:"), is_selected('p', value), _("Partial phrase:"), is_selected('r', value), _("Regular expression:")) return out def is_selected(var, fld): "Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes." if type(var) is int and type(fld) is int: if var == fld: return " selected" elif str(var) == str(fld): return " selected" elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]: return " selected" return "" def wash_colls(cc, c, split_colls=0): """Wash collection list by checking whether user has deselected anything under 'Narrow search'. Checks also if cc is a list or not. Return list of cc, colls_to_display, colls_to_search since the list of collections to display is different from that to search in. This is because users might have chosen 'split by collection' functionality. The behaviour of "collections to display" depends solely whether user has deselected a particular collection: e.g. if it started from 'Articles and Preprints' page, and deselected 'Preprints', then collection to display is 'Articles'. If he did not deselect anything, then collection to display is 'Articles & Preprints'. The behaviour of "collections to search in" depends on the 'split_colls' parameter: * if is equal to 1, then we can wash the colls list down and search solely in the collection the user started from; * if is equal to 0, then we are splitting to the first level of collections, i.e. collections as they appear on the page we started to search from; The function raises exception InvenioWebSearchUnknownCollectionError if cc or one of c collections is not known. """ colls_out = [] colls_out_for_display = [] # check what type is 'cc': if type(cc) is list: for ci in cc: if collection_reclist_cache.has_key(ci): # yes this collection is real, so use it: cc = ci break else: # check once if cc is real: if not collection_reclist_cache.has_key(cc): if cc: raise InvenioWebSearchUnknownCollectionError(cc) else: cc = cdsname # cc is not set, so replace it with Home collection # check type of 'c' argument: if type(c) is list: colls = c else: colls = [c] # remove all 'unreal' collections: colls_real = [] for coll in colls: if collection_reclist_cache.has_key(coll): colls_real.append(coll) else: if coll: raise InvenioWebSearchUnknownCollectionError(coll) colls = colls_real # check if some real collections remain: if len(colls)==0: colls = [cc] # then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll': res = run_sql("""SELECT c.name FROM collection AS c, collection_collection AS cc, collection AS ccc WHERE c.id=cc.id_son AND cc.id_dad=ccc.id AND ccc.name=%s AND cc.type='r' AND c.restricted IS NULL""", (cc,)) l_cc_nonrestricted_sons = [] l_c = colls for row in res: l_cc_nonrestricted_sons.append(row[0]) l_c.sort() l_cc_nonrestricted_sons.sort() if l_cc_nonrestricted_sons == l_c: colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc' else: colls_out_for_display = colls # nope, we need to display all 'colls' successively # remove duplicates: colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1)) colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups) # second, let us decide on collection splitting: if split_colls == 0: # type A - no sons are wanted colls_out = colls_out_for_display # elif split_colls == 1: else: # type B - sons (first-level descendants) are wanted for coll in colls_out_for_display: coll_sons = get_coll_sons(coll) if coll_sons == []: colls_out.append(coll) else: colls_out = colls_out + coll_sons # remove duplicates: colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1)) colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups) return (cc, colls_out_for_display, colls_out) def strip_accents(x): """Strip accents in the input phrase X (assumed in UTF-8) by replacing accented characters with their unaccented cousins (e.g. é by e). Return such a stripped X.""" x = re_latex_lowercase_a.sub("a", x) x = re_latex_lowercase_ae.sub("ae", x) x = re_latex_lowercase_e.sub("e", x) x = re_latex_lowercase_i.sub("i", x) x = re_latex_lowercase_o.sub("o", x) x = re_latex_lowercase_u.sub("u", x) x = re_latex_lowercase_y.sub("x", x) x = re_latex_lowercase_c.sub("c", x) x = re_latex_lowercase_n.sub("n", x) x = re_latex_uppercase_a.sub("A", x) x = re_latex_uppercase_ae.sub("AE", x) x = re_latex_uppercase_e.sub("E", x) x = re_latex_uppercase_i.sub("I", x) x = re_latex_uppercase_o.sub("O", x) x = re_latex_uppercase_u.sub("U", x) x = re_latex_uppercase_y.sub("Y", x) x = re_latex_uppercase_c.sub("C", x) x = re_latex_uppercase_n.sub("N", x) # convert input into Unicode string: try: y = unicode(x, "utf-8") except: return x # something went wrong, probably the input wasn't UTF-8 # asciify Latin-1 lowercase characters: y = re_unicode_lowercase_a.sub("a", y) y = re_unicode_lowercase_ae.sub("ae", y) y = re_unicode_lowercase_e.sub("e", y) y = re_unicode_lowercase_i.sub("i", y) y = re_unicode_lowercase_o.sub("o", y) y = re_unicode_lowercase_u.sub("u", y) y = re_unicode_lowercase_y.sub("y", y) y = re_unicode_lowercase_c.sub("c", y) y = re_unicode_lowercase_n.sub("n", y) # asciify Latin-1 uppercase characters: y = re_unicode_uppercase_a.sub("A", y) y = re_unicode_uppercase_ae.sub("AE", y) y = re_unicode_uppercase_e.sub("E", y) y = re_unicode_uppercase_i.sub("I", y) y = re_unicode_uppercase_o.sub("O", y) y = re_unicode_uppercase_u.sub("U", y) y = re_unicode_uppercase_y.sub("Y", y) y = re_unicode_uppercase_c.sub("C", y) y = re_unicode_uppercase_n.sub("N", y) # return UTF-8 representation of the Unicode string: return y.encode("utf-8") def wash_index_term(term, max_char_length=50): """ Return washed form of the index term TERM that would be suitable for storing into idxWORD* tables. I.e., lower the TERM, and truncate it safely to MAX_CHAR_LENGTH UTF-8 characters (meaning, in principle, 4*MAX_CHAR_LENGTH bytes). The function works by an internal conversion of TERM, when needed, from its input Python UTF-8 binary string format into Python Unicode format, and then truncating it safely to the given number of TF-8 characters, without possible mis-truncation in the middle of a multi-byte UTF-8 character that could otherwise happen if we would have been working with UTF-8 binary representation directly. Note that MAX_CHAR_LENGTH corresponds to the length of the term column in idxINDEX* tables. """ washed_term = term.lower() if len(washed_term) <= max_char_length: # no need to truncate the term, because it will fit # nicely even if it uses four-byte UTF-8 characters return washed_term else: # truncate the term in a safe position: return unicode(washed_term, 'utf-8')[:max_char_length].encode('utf-8') def wash_pattern(p): """Wash pattern passed by URL. Check for sanity of the wildcard by removing wildcards if they are appended to extremely short words (1-3 letters). TODO: instead of this approximative treatment, it will be much better to introduce a temporal limit, e.g. to kill a query if it does not finish in 10 seconds.""" # strip accents: # p = strip_accents(p) # FIXME: when available, strip accents all the time # add leading/trailing whitespace for the two following wildcard-sanity checking regexps: p = " " + p + " " # get rid of wildcards at the beginning of words: p = re_pattern_wildcards_at_beginning.sub("\\1", p) # replace spaces within quotes by __SPACE__ temporarily: p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p) p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p) p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p) # get rid of extremely short words (1-3 letters with wildcards): p = re_pattern_short_words.sub("\\1", p) # replace back __SPACE__ by spaces: p = re_pattern_space.sub(" ", p) # replace special terms: p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p) # remove unnecessary whitespace: p = string.strip(p) return p def wash_field(f): """Wash field passed by URL.""" # get rid of unnecessary whitespace: f = string.strip(f) # wash old-style CDS Invenio/ALEPH 'f' field argument, e.g. replaces 'wau' and 'au' by 'author' if CFG_WEBSEARCH_FIELDS_CONVERT.has_key(string.lower(f)): f = CFG_WEBSEARCH_FIELDS_CONVERT[f] return f def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0): """ Take user-submitted date arguments D1 (full datetime string) or (D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y) and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime strings in the YYYY-MM-DD HH:MM:SS format suitable for time restricted searching. Note that when both D1 and (D1Y, D1M, D1D) parameters are present, the precedence goes to D1. Ditto for D2*. Note that when (D1Y, D1M, D1D) are taken into account, some values may be missing and are completed e.g. to 01 or 12 according to whether it is the starting or the ending date. """ datetext1, datetext2 = "", "" # sanity checking: if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0: return ("", "") # nothing selected, so return empty values # wash first (starting) date: if d1: # full datetime string takes precedence: datetext1 = d1 else: # okay, first date passed as (year,month,day): if d1y: datetext1 += "%04d" % d1y else: datetext1 += "0000" if d1m: datetext1 += "-%02d" % d1m else: datetext1 += "-01" if d1d: datetext1 += "-%02d" % d1d else: datetext1 += "-01" datetext1 += " 00:00:00" # wash second (ending) date: if d2: # full datetime string takes precedence: datetext2 = d2 else: # okay, second date passed as (year,month,day): if d2y: datetext2 += "%04d" % d2y else: datetext2 += "9999" if d2m: datetext2 += "-%02d" % d2m else: datetext2 += "-12" if d2d: datetext2 += "-%02d" % d2d else: datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in # given month, but for our quering it's not # needed, 31 will always do datetext2 += " 00:00:00" # okay, return constructed YYYY-MM-DD HH:MM:SS datetexts: return (datetext1, datetext2) def get_colID(c): "Return collection ID for collection name C. Return None if no match found." colID = None res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1) if res: colID = res[0][0] return colID def get_coll_i18nname(c, ln=cdslang): """Return nicely formatted collection name (of name type 'ln', 'long name') for collection C in language LN.""" global collection_i18nname_cache global collection_i18nname_cache_timestamp # firstly, check whether the collectionname table was modified: if get_table_update_time('collectionname') > collection_i18nname_cache_timestamp: # yes it was, cache clear-up needed: collection_i18nname_cache = create_collection_i18nname_cache() # secondly, read i18n name from either the cache or return common name: out = c try: out = collection_i18nname_cache[c][ln] except KeyError: pass # translation in LN does not exist return out def get_field_i18nname(f, ln=cdslang): """Return nicely formatted field name (of type 'ln', 'long name') for field F in language LN.""" global field_i18nname_cache global field_i18nname_cache_timestamp # firstly, check whether the fieldname table was modified: if get_table_update_time('fieldname') > field_i18nname_cache_timestamp: # yes it was, cache clear-up needed: field_i18nname_cache = create_field_i18nname_cache() # secondly, read i18n name from either the cache or return common name: out = f try: out = field_i18nname_cache[f][ln] except KeyError: pass # translation in LN does not exist return out def get_coll_ancestors(coll): "Returns a list of ancestors for collection 'coll'." coll_ancestors = [] coll_ancestor = coll while 1: res = run_sql("""SELECT c.name FROM collection AS c LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad LEFT JOIN collection AS ccc ON ccc.id=cc.id_son WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""", (coll_ancestor,)) if res: coll_name = res[0][0] coll_ancestors.append(coll_name) coll_ancestor = coll_name else: break # ancestors found, return reversed list: coll_ancestors.reverse() return coll_ancestors def get_coll_sons(coll, type='r', public_only=1): """Return a list of sons (first-level descendants) of type 'type' for collection 'coll'. If public_only, then return only non-restricted son collections. """ coll_sons = [] query = "SELECT c.name FROM collection AS c "\ "LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\ "LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\ "WHERE cc.type=%s AND ccc.name=%s" if public_only: query += " AND c.restricted IS NULL " query += " ORDER BY cc.score DESC" res = run_sql(query, (type, coll)) for name in res: coll_sons.append(name[0]) return coll_sons def get_coll_real_descendants(coll): """Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'. IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided that "A & B" has no associated database query defined. """ coll_sons = [] res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c LEFT JOIN collection_collection AS cc ON c.id=cc.id_son LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad WHERE ccc.name=%s ORDER BY cc.score DESC""", (coll,)) for name, dbquery in res: if dbquery: # this is 'real' collection, so return it: coll_sons.append(name) else: # this is 'composed' collection, so recurse: coll_sons.extend(get_coll_real_descendants(name)) return coll_sons def get_collection_reclist(coll): """Return hitset of recIDs that belong to the collection 'coll'. But firstly check the last updated date of the collection table. If it's newer than the cache timestamp, then empty the cache, since new records could have been added.""" global collection_reclist_cache global collection_reclist_cache_timestamp # firstly, check whether the collection table was modified: if get_table_update_time('collection') > collection_reclist_cache_timestamp: # yes it was, cache clear-up needed: collection_reclist_cache = create_collection_reclist_cache() # secondly, read reclist from either the cache or the database: if not collection_reclist_cache[coll]: # not yet it the cache, so calculate it and fill the cache: query = "SELECT nbrecs,reclist FROM collection WHERE name='%s'" % coll res = run_sql(query, None, 1) if res: try: set = HitSet(res[0][1]) except: set = HitSet() collection_reclist_cache[coll] = set # finally, return reclist: return collection_reclist_cache[coll] def coll_restricted_p(coll): "Predicate to test if the collection coll is restricted or not." if not coll: return 0 res = run_sql("SELECT restricted FROM collection WHERE name=%s", (coll,)) if res and res[0][0] is not None: return 1 else: return 0 def coll_restricted_group(coll): "Return Apache group to which the collection is restricted. Return None if it's public." if not coll: return None res = run_sql("SELECT restricted FROM collection WHERE name=%s", (coll,)) if res: return res[0][0] else: return None def create_collection_reclist_cache(): """Creates list of records belonging to collections. Called on startup and used later for intersecting search results with collection universe.""" global collection_reclist_cache_timestamp # populate collection reclist cache: collrecs = {} try: res = run_sql("SELECT name,reclist FROM collection") except Error: # database problems, set timestamp to zero and return empty cache collection_reclist_cache_timestamp = 0 return collrecs for name, reclist in res: collrecs[name] = None # this will be filled later during runtime by calling get_collection_reclist(coll) # update timestamp: try: collection_reclist_cache_timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) except NameError: collection_reclist_cache_timestamp = 0 return collrecs try: collection_reclist_cache.has_key(cdsname) except: try: collection_reclist_cache = create_collection_reclist_cache() except: collection_reclist_cache = {} def create_collection_i18nname_cache(): """Create cache of I18N collection names of type 'ln' (=long name). Called on startup and used later during the search time.""" global collection_i18nname_cache_timestamp # populate collection I18N name cache: names = {} try: res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name except Error: # database problems, set timestamp to zero and return empty cache collection_i18nname_cache_timestamp = 0 return names for c, ln, i18nname in res: if i18nname: if not names.has_key(c): names[c] = {} names[c][ln] = i18nname # update timestamp: try: collection_i18nname_cache_timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) except NameError: collection_i18nname_cache_timestamp = 0 return names try: collection_i18nname_cache.has_key(cdsname) except: try: collection_i18nname_cache = create_collection_i18nname_cache() except: collection_i18nname_cache = {} def create_field_i18nname_cache(): """Create cache of I18N field names of type 'ln' (=long name). Called on startup and used later during the search time.""" global field_i18nname_cache_timestamp # populate field I18 name cache: names = {} try: res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name except Error: # database problems, set timestamp to zero and return empty cache field_i18nname_cache_timestamp = 0 return names for f, ln, i18nname in res: if i18nname: if not names.has_key(f): names[f] = {} names[f][ln] = i18nname # update timestamp: try: field_i18nname_cache_timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) except NameError: field_i18nname_cache_timestamp = 0 return names try: field_i18nname_cache.has_key(cdsname) except: try: field_i18nname_cache = create_field_i18nname_cache() except: field_i18nname_cache = {} def browse_pattern(req, colls, p, f, rg, ln=cdslang): """Browse either biliographic phrases or words indexes, and display it.""" # load the right message language _ = gettext_set_language(ln) ## do we search in words indexes? if not f: return browse_in_bibwords(req, p, f) p_orig = p ## okay, "real browse" follows: browsed_phrases = get_nearest_terms_in_bibxxx(p, f, rg, 1) while not browsed_phrases: # try again and again with shorter and shorter pattern: try: p = p[:-1] browsed_phrases = get_nearest_terms_in_bibxxx(p, f, rg, 1) except: # probably there are no hits at all: req.write(_("No values found.")) return ## try to check hits in these particular collection selection: browsed_phrases_in_colls = [] if 0: for phrase in browsed_phrases: phrase_hitset = HitSet() phrase_hitsets = search_pattern("", phrase, f, 'e') for coll in colls: phrase_hitset.union_update(phrase_hitsets[coll]) if len(phrase_hitset) > 0: # okay, this phrase has some hits in colls, so add it: browsed_phrases_in_colls.append([phrase, len(phrase_hitset)]) ## were there hits in collections? if browsed_phrases_in_colls == []: if browsed_phrases != []: #print_warning(req, """

No match close to %s found in given collections. #Please try different term.

Displaying matches in any collection...""" % p_orig) ## try to get nbhits for these phrases in any collection: for phrase in browsed_phrases: browsed_phrases_in_colls.append([phrase, get_nbhits_in_bibxxx(phrase, f)]) ## display results now: out = websearch_templates.tmpl_browse_pattern( f=get_field_i18nname(f, ln), ln=ln, browsed_phrases_in_colls=browsed_phrases_in_colls, colls=colls, ) req.write(out) return def browse_in_bibwords(req, p, f, ln=cdslang): """Browse inside words indexes.""" if not p: return _ = gettext_set_language(ln) urlargd = {} urlargd.update(req.argd) urlargd['action'] = 'search' nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0) req.write(websearch_templates.tmpl_search_in_bibwords( p = p, f = f, ln = ln, nearest_box = nearest_box )) return def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=cdslang): """Search for complex pattern 'p' within field 'f' according to matching type 'm'. Return hitset of recIDs. The function uses multi-stage searching algorithm in case of no exact match found. See the Search Internals document for detailed description. The 'ap' argument governs whether an alternative patterns are to be used in case there is no direct hit for (p,f,m). For example, whether to replace non-alphanumeric characters by spaces if it would give some hits. See the Search Internals document for detailed description. (ap=0 forbits the alternative pattern usage, ap=1 permits it.) The 'of' argument governs whether to print or not some information to the user in case of no match found. (Usually it prints the information in case of HTML formats, otherwise it's silent). The 'verbose' argument controls the level of debugging information to be printed (0=least, 9=most). All the parameters are assumed to have been previously washed. This function is suitable as a mid-level API. """ _ = gettext_set_language(ln) hitset_empty = HitSet() # sanity check: if not p: hitset_full = HitSet(trailing_bits=1) hitset_full.discard(0) # no pattern, so return all universe return hitset_full # search stage 1: break up arguments into basic search units: if verbose and of.startswith("h"): t1 = os.times()[4] basic_search_units = create_basic_search_units(req, p, f, m, of) if verbose and of.startswith("h"): t2 = os.times()[4] print_warning(req, "Search stage 1: basic search units are: %s" % basic_search_units) print_warning(req, "Search stage 1: execution took %.2f seconds." % (t2 - t1)) # search stage 2: do search for each search unit and verify hit presence: if verbose and of.startswith("h"): t1 = os.times()[4] basic_search_units_hitsets = [] for idx_unit in range(0, len(basic_search_units)): bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit] basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m) if verbose >= 9 and of.startswith("h"): print_warning(req, "Search stage 1: pattern %s gave hitlist %s" % (bsu_p, list(basic_search_unit_hitset))) if len(basic_search_unit_hitset) > 0 or \ ap==0 or \ bsu_o=="|" or \ ((idx_unit+1) 0: # we retain the new unit instead if of.startswith('h'): print_warning(req, _("No exact match found for %(x_query1)s, using %(x_query2)s instead...") % \ {'x_query1': "" + cgi.escape(bsu_p) + "", 'x_query2': "" + cgi.escape(bsu_pn) + ""}) basic_search_units[idx_unit][1] = bsu_pn basic_search_units_hitsets.append(basic_search_unit_hitset) else: # stage 2-3: no hits found either, propose nearest indexed terms: if of.startswith('h'): if req: if bsu_f == "recid": print_warning(req, "Requested record does not seem to exist.") else: print_warning(req, create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln)) return hitset_empty else: # stage 2-3: no hits found either, propose nearest indexed terms: if of.startswith('h'): if req: if bsu_f == "recid": print_warning(req, "Requested record does not seem to exist.") else: print_warning(req, create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln)) return hitset_empty if verbose and of.startswith("h"): t2 = os.times()[4] for idx_unit in range(0, len(basic_search_units)): print_warning(req, "Search stage 2: basic search unit %s gave %d hits." % (basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit]))) print_warning(req, "Search stage 2: execution took %.2f seconds." % (t2 - t1)) # search stage 3: apply boolean query for each search unit: if verbose and of.startswith("h"): t1 = os.times()[4] # let the initial set be the complete universe: hitset_in_any_collection = HitSet(trailing_bits=1) hitset_in_any_collection.discard(0) for idx_unit in range(0, len(basic_search_units)): this_unit_operation = basic_search_units[idx_unit][0] this_unit_hitset = basic_search_units_hitsets[idx_unit] if this_unit_operation == '+': hitset_in_any_collection.intersection_update(this_unit_hitset) elif this_unit_operation == '-': hitset_in_any_collection.difference_update(this_unit_hitset) elif this_unit_operation == '|': hitset_in_any_collection.union_update(this_unit_hitset) else: if of.startswith("h"): print_warning(req, "Invalid set operation %s." % this_unit_operation, "Error") if len(hitset_in_any_collection) == 0: # no hits found, propose alternative boolean query: if of.startswith('h'): nearestterms = [] for idx_unit in range(0, len(basic_search_units)): bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit] if bsu_p.startswith("%") and bsu_p.endswith("%"): bsu_p = "'" + bsu_p[1:-1] + "'" bsu_nbhits = len(basic_search_units_hitsets[idx_unit]) # create a similar query, but with the basic search unit only argd = {} argd.update(req.argd) argd['p'] = bsu_p argd['f'] = bsu_f nearestterms.append((bsu_p, bsu_nbhits, argd)) text = websearch_templates.tmpl_search_no_boolean_hits( ln=ln, nearestterms=nearestterms) print_warning(req, text) if verbose and of.startswith("h"): t2 = os.times()[4] print_warning(req, "Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection)) print_warning(req, "Search stage 3: execution took %.2f seconds." % (t2 - t1)) return hitset_in_any_collection def search_unit(p, f=None, m=None): """Search for basic search unit defined by pattern 'p' and field 'f' and matching type 'm'. Return hitset of recIDs. All the parameters are assumed to have been previously washed. 'p' is assumed to be already a ``basic search unit'' so that it is searched as such and is not broken up in any way. Only wildcard and span queries are being detected inside 'p'. This function is suitable as a low-level API. """ ## create empty output results set: set = HitSet() if not p: # sanity checking return set if m == 'a' or m == 'r': # we are doing either direct bibxxx search or phrase search or regexp search set = search_unit_in_bibxxx(p, f, m) else: # we are doing bibwords search by default set = search_unit_in_bibwords(p, f) return set def search_unit_in_bibwords(word, f, decompress=zlib.decompress): """Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs.""" set = HitSet() # will hold output result set set_used = 0 # not-yet-used flag, to be able to circumvent set operations # deduce into which bibwordsX table we will search: apply_stemming = not "anyfield" in CFG_BIBINDEX_DISABLE_STEMMING_FOR_INDEXES bibwordsX = "idxWORD%02dF" % get_index_id("anyfield") if f: index_id = get_index_id(f) if index_id: bibwordsX = "idxWORD%02dF" % index_id apply_stemming = not f in CFG_BIBINDEX_DISABLE_STEMMING_FOR_INDEXES else: return HitSet() # word index f does not exist # wash 'word' argument and run query: word = string.replace(word, '*', '%') # we now use '*' as the truncation character words = string.split(word, "->", 1) # check for span query if len(words) == 2: word0 = re_word.sub('', words[0]) word1 = re_word.sub('', words[1]) if apply_stemming: word0 = stem(word0) word1 = stem(word1) res = run_sql("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX, (wash_index_term(word0), wash_index_term(word1))) else: word = re_word.sub('', word) if apply_stemming: word = stem(word) if string.find(word, '%') >= 0: # do we have wildcard in the word? res = run_sql("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX, (wash_index_term(word),)) else: res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX, (wash_index_term(word),)) # fill the result set: for word, hitlist in res: hitset_bibwrd = HitSet(hitlist) # add the results: if set_used: set.union_update(hitset_bibwrd) else: set = hitset_bibwrd set_used = 1 # okay, return result set: return set def search_unit_in_bibxxx(p, f, type): """Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found. The search type is defined by 'type' (e.g. equals to 'r' for a regexp search).""" p_orig = p # saving for eventual future 'no match' reporting query_addons = "" # will hold additional SQL code for the query query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument) # wash arguments: f = string.replace(f, '*', '%') # replace truncation char '*' in field definition if type == 'r': query_addons = "REGEXP %s" query_params = (p,) else: p = string.replace(p, '*', '%') # we now use '*' as the truncation character ps = string.split(p, "->", 1) # check for span query: if len(ps) == 2: query_addons = "BETWEEN %s AND %s" query_params = (ps[0], ps[1]) else: if string.find(p, '%') > -1: query_addons = "LIKE %s" query_params = (ps[0],) else: query_addons = "= %s" query_params = (ps[0],) # construct 'tl' which defines the tag list (MARC tags) to search in: tl = [] if str(f[0]).isdigit() and str(f[1]).isdigit(): tl.append(f) # 'f' seems to be okay as it starts by two digits else: # convert old ALEPH tag names, if appropriate: (TODO: get rid of this before entering this function) if CFG_WEBSEARCH_FIELDS_CONVERT.has_key(string.lower(f)): f = CFG_WEBSEARCH_FIELDS_CONVERT[string.lower(f)] # deduce desired MARC tags on the basis of chosen 'f' tl = get_field_tags(f) if not tl: # f index does not exist, nevermind pass # okay, start search: l = [] # will hold list of recID that matched for t in tl: # deduce into which bibxxx table we will search: digit1, digit2 = int(t[0]), int(t[1]) bx = "bib%d%dx" % (digit1, digit2) bibx = "bibrec_bib%d%dx" % (digit1, digit2) # construct and run query: if t == "001": res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons, query_params) else: query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \ (bx, bibx, query_addons) if len(t) != 6 or t[-1:]=='%': # wildcard query, or only the beginning of field 't' # is defined, so add wildcard character: query += " AND bx.tag LIKE %s" res = run_sql(query, query_params + (t + '%',)) else: # exact query for 't': query += " AND bx.tag=%s" res = run_sql(query, query_params + (t,)) # fill the result set: for id_bibrec in res: if id_bibrec[0]: l.append(id_bibrec[0]) # check no of hits found: nb_hits = len(l) # okay, return result set: set = HitSet(l) return set def search_unit_in_bibrec(datetext1, datetext2, type='c'): """ Return hitset of recIDs found that were either created or modified (according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive. Does not pay attention to pattern, collection, anything. Useful to intersect later on with the 'real' query. """ set = HitSet() if type.startswith("m"): type = "modification_date" else: type = "creation_date" # by default we are searching for creation dates res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (type, type), (datetext1, datetext2)) for row in res: set += row[0] return set def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, ap=0, of="hb", verbose=0, ln=cdslang): """Return dict of hitsets given by intersection of hitset with the collection universes.""" _ = gettext_set_language(ln) # search stage 4: intersect with the collection universe: if verbose and of.startswith("h"): t1 = os.times()[4] results = {} results_nbhits = 0 for coll in colls: results[coll] = hitset_in_any_collection & get_collection_reclist(coll) results_nbhits += len(results[coll]) if results_nbhits == 0: # no hits found, try to search in Home: results_in_Home = hitset_in_any_collection & get_collection_reclist(cdsname) if len(results_in_Home) > 0: # some hits found in Home, so propose this search: if of.startswith("h"): url = websearch_templates.build_search_url(req.argd, cc=cdsname, c=[]) print_warning(req, _("No match found in collection %(x_collection)s. Other public collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %\ {'x_collection': '' + string.join([get_coll_i18nname(coll, ln) for coll in colls], ', ') + '', 'x_url_open': '' % (url), 'x_nb_hits': len(results_in_Home), 'x_url_close': ''}) results = {} else: # no hits found in Home, recommend different search terms: if of.startswith("h"): print_warning(req, _("No public collection matched your query. " "If you were looking for a non-public document, please choose " "the desired restricted collection first.")) results = {} if verbose and of.startswith("h"): t2 = os.times()[4] print_warning(req, "Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits) print_warning(req, "Search stage 4: execution took %.2f seconds." % (t2 - t1)) return results def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"): """Return intersection of search 'results' (a dict of hitsets with collection as key) with the 'hitset', i.e. apply 'hitset' intersection to each collection within search 'results'. If the final 'results' set is to be empty, and 'ap' (approximate pattern) is true, and then print the `warningtext' and return the original 'results' set unchanged. If 'ap' is false, then return empty results set. """ if ap: results_ap = copy.deepcopy(results) else: results_ap = {} # will return empty dict in case of no hits found nb_total = 0 for coll in results.keys(): results[coll].intersection_update(hitset) nb_total += len(results[coll]) if nb_total == 0: if of.startswith("h"): print_warning(req, aptext) results = results_ap return results def create_similarly_named_authors_link_box(author_name, ln=cdslang): """Return a box similar to ``Not satisfied...'' one by proposing author searches for similar names. Namely, take AUTHOR_NAME and the first initial of the firstame (after comma) and look into author index whether authors with e.g. middle names exist. Useful mainly for CERN Library that sometimes contains name forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the same person. The box isn't proposed if no similarly named authors are found to exist. """ # return nothing if not configured: if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0: return "" # return empty box if there is no initial: if re.match(r'[^ ,]+, [^ ]', author_name) is None: return "" # firstly find name comma initial: author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name) # secondly search for similar name forms: similar_author_names = {} for name in author_name_to_search, strip_accents(author_name_to_search): for tag in get_field_tags("author"): # deduce into which bibxxx table we will search: digit1, digit2 = int(tag[0]), int(tag[1]) bx = "bib%d%dx" % (digit1, digit2) bibx = "bibrec_bib%d%dx" % (digit1, digit2) if len(tag) != 6 or tag[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character: res = run_sql("""SELECT bx.value FROM %s AS bx WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx, (name + "%", tag + "%")) else: res = run_sql("""SELECT bx.value FROM %s AS bx WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx, (name + "%", tag)) for row in res: similar_author_names[row[0]] = 1 # remove the original name and sort the list: try: del similar_author_names[author_name] except KeyError: pass # thirdly print the box: out = "" if similar_author_names: out_authors = similar_author_names.keys() out_authors.sort() tmp_authors = [] for out_author in out_authors: nbhits = get_nbhits_in_bibxxx(out_author, "author") if nbhits: tmp_authors.append((out_author, nbhits)) out += websearch_templates.tmpl_similar_author_names( authors=tmp_authors, ln=ln) return out def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=cdslang, intro_text_p=True): """Return text box containing list of 'n' nearest terms above/below 'p' for the field 'f' for matching type 't' (words/phrases) in language 'ln'. Propose new searches according to `urlargs' with the new words. If `intro_text_p' is true, then display the introductory message, otherwise print only the nearest terms in the box content. """ # load the right message language _ = gettext_set_language(ln) out = "" nearest_terms = [] if not p: # sanity check p = "." # look for nearest terms: if t == 'w': nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n) if not nearest_terms: return "%s %s." % (_("No words index available for"), get_field_i18nname(f, ln)) else: nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n) if not nearest_terms: return "%s %s." % (_("No phrase index available for"), get_field_i18nname(f, ln)) terminfo = [] for term in nearest_terms: if t == 'w': hits = get_nbhits_in_bibwords(term, f) else: hits = get_nbhits_in_bibxxx(term, f) argd = {} argd.update(urlargd) # check which fields contained the requested parameter, and replace it. for (px, fx) in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'): if px in argd: if f == argd[fx] or f == "anyfield" or f == "": if string.find(argd[px], p) > -1: argd[px] = string.replace(argd[px], p, term) break else: if string.find(argd[px], f+':'+p) > -1: argd[px] = string.replace(argd[px], f+':'+p, f+':'+term) break elif string.find(argd[px], f+':"'+p+'"') > -1: argd[px] = string.replace(argd[px], f+':"'+p+'"', f+':"'+term+'"') break terminfo.append((term, hits, argd)) intro = "" if intro_text_p: # add full leading introductory text if f: intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \ {'x_term': "" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "", 'x_index': "" + cgi.escape(get_field_i18nname(f, ln)) + ""} else: intro = _("Search term %s did not match any record. Nearest terms in any collection are:") % \ ("" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "") return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo, intro=intro) def get_nearest_terms_in_bibwords(p, f, n_below, n_above): """Return list of +n -n nearest terms to word `p' in index for field `f'.""" nearest_words = [] # will hold the (sorted) list of nearest words to return # deduce into which bibwordsX table we will search: bibwordsX = "idxWORD%02dF" % get_index_id("anyfield") if f: index_id = get_index_id(f) if index_id: bibwordsX = "idxWORD%02dF" % index_id else: return nearest_words # firstly try to get `n' closest words above `p': res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX, (p, n_above)) for row in res: nearest_words.append(row[0]) nearest_words.reverse() # secondly insert given word `p': nearest_words.append(p) # finally try to get `n' closest words below `p': res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX, (p, n_below)) for row in res: nearest_words.append(row[0]) return nearest_words def get_nearest_terms_in_bibxxx(p, f, n_below, n_above): """Browse (-n_above, +n_below) closest bibliographic phrases for the given pattern p in the given field f, regardless of collection. Return list of [phrase1, phrase2, ... , phrase_n].""" ## determine browse field: if not f and string.find(p, ":") > 0: # does 'p' contain ':'? f, p = string.split(p, ":", 1) ## We are going to take max(n_below, n_above) as the number of ## values to ferch from bibXXx. This is needed to work around ## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to ## use MySQL 4.1.x or our own idxPHRASE in the future. n_fetch = 2*max(n_below, n_above) ## construct 'tl' which defines the tag list (MARC tags) to search in: tl = [] if str(f[0]).isdigit() and str(f[1]).isdigit(): tl.append(f) # 'f' seems to be okay as it starts by two digits else: # deduce desired MARC tags on the basis of chosen 'f' tl = get_field_tags(f) ## start browsing to fetch list of hits: browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique) # always add self to the results set: browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1 for t in tl: # deduce into which bibxxx table we will search: digit1, digit2 = int(t[0]), int(t[1]) bx = "bib%d%dx" % (digit1, digit2) bibx = "bibrec_bib%d%dx" % (digit1, digit2) # firstly try to get `n' closest phrases above `p': if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character: res = run_sql("""SELECT bx.value FROM %s AS bx WHERE bx.value<%%s AND bx.tag LIKE %%s ORDER BY bx.value DESC LIMIT %%s""" % bx, (p, t + "%", n_fetch)) else: res = run_sql("""SELECT bx.value FROM %s AS bx WHERE bx.value<%%s AND bx.tag=%%s ORDER BY bx.value DESC LIMIT %%s""" % bx, (p, t, n_fetch)) for row in res: browsed_phrases[row[0]] = 1 # secondly try to get `n' closest phrases equal to or below `p': if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character: res = run_sql("""SELECT bx.value FROM %s AS bx WHERE bx.value>=%%s AND bx.tag LIKE %%s ORDER BY bx.value ASC LIMIT %%s""" % bx, (p, t + "%", n_fetch)) else: res = run_sql("""SELECT bx.value FROM %s AS bx WHERE bx.value>=%%s AND bx.tag=%%s ORDER BY bx.value ASC LIMIT %%s""" % bx, (p, t, n_fetch)) for row in res: browsed_phrases[row[0]] = 1 # select first n words only: (this is needed as we were searching # in many different tables and so aren't sure we have more than n # words right; this of course won't be needed when we shall have # one ACC table only for given field): phrases_out = browsed_phrases.keys() phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)), string.lower(strip_accents(y)))) # find position of self: try: idx_p = phrases_out.index(p) except: idx_p = len(phrases_out)/2 # return n_above and n_below: return phrases_out[max(0, idx_p-n_above):idx_p+n_below] def get_nbhits_in_bibwords(word, f): """Return number of hits for word 'word' inside words index for field 'f'.""" out = 0 # deduce into which bibwordsX table we will search: bibwordsX = "idxWORD%02dF" % get_index_id("anyfield") if f: index_id = get_index_id(f) if index_id: bibwordsX = "idxWORD%02dF" % index_id else: return 0 if word: res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX, (word,)) for hitlist in res: out += len(HitSet(hitlist[0])) return out def get_nbhits_in_bibxxx(p, f): """Return number of hits for word 'word' inside words index for field 'f'.""" ## determine browse field: if not f and string.find(p, ":") > 0: # does 'p' contain ':'? f, p = string.split(p, ":", 1) ## construct 'tl' which defines the tag list (MARC tags) to search in: tl = [] if str(f[0]).isdigit() and str(f[1]).isdigit(): tl.append(f) # 'f' seems to be okay as it starts by two digits else: # deduce desired MARC tags on the basis of chosen 'f' tl = get_field_tags(f) # start searching: recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore) for t in tl: # deduce into which bibxxx table we will search: digit1, digit2 = int(t[0]), int(t[1]) bx = "bib%d%dx" % (digit1, digit2) bibx = "bibrec_bib%d%dx" % (digit1, digit2) if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character: res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx WHERE bx.value=%%s AND bx.tag LIKE %%s AND bibx.id_bibxxx=bx.id""" % (bibx, bx), (p, t + "%")) else: res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx WHERE bx.value=%%s AND bx.tag=%%s AND bibx.id_bibxxx=bx.id""" % (bibx, bx), (p, t)) for row in res: recIDs[row[0]] = 1 return len(recIDs) def get_mysql_recid_from_aleph_sysno(sysno): """Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER"). Returns None in case of failure.""" out = None res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""", (sysno,)) if res: out = res[0][0] return out def guess_primary_collection_of_a_record(recID): """Return primary collection name a record recid belongs to, by testing 980 identifier. May lead to bad guesses when a collection is defined dynamically bia dbquery. In that case, return 'cdsname'.""" out = cdsname dbcollids = get_fieldvalues(recID, "980__a") if dbcollids: dbquery = "collection:" + dbcollids[0] res = run_sql("SELECT name FROM collection WHERE dbquery=%s", (dbquery,)) if res: out = res[0][0] return out def get_tag_name(tag_value, prolog="", epilog=""): """Return tag name from the known tag value, by looking up the 'tag' table. Return empty string in case of failure. Example: input='100__%', output=first author'.""" out = "" res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,)) if res: out = prolog + res[0][0] + epilog return out def get_fieldcodes(): """Returns a list of field codes that may have been passed as 'search options' in URL. Example: output=['subject','division'].""" out = [] res = run_sql("SELECT DISTINCT(code) FROM field") for row in res: out.append(row[0]) return out def get_field_tags(field): """Returns a list of MARC tags for the field code 'field'. Returns empty list in case of error. Example: field='author', output=['100__%','700__%'].""" out = [] query = """SELECT t.value FROM tag AS t, field_tag AS ft, field AS f WHERE f.code=%s AND ft.id_field=f.id AND t.id=ft.id_tag ORDER BY ft.score DESC""" res = run_sql(query, (field, )) for val in res: out.append(val[0]) return out def get_fieldvalues(recID, tag): """Return list of field values for field TAG inside record RECID.""" out = [] if tag == "001___": # we have asked for recID that is not stored in bibXXx tables out.append(str(recID)) else: # we are going to look inside bibXXx tables digits = tag[0:2] try: intdigits = int(digits) if intdigits < 0 or intdigits > 99: raise ValueError except ValueError: # invalid tag value asked for return [] bx = "bib%sx" % digits bibx = "bibrec_bib%sx" % digits query = "SELECT bx.value FROM %s AS bx, %s AS bibx " \ " WHERE bibx.id_bibrec='%s' AND bx.id=bibx.id_bibxxx AND bx.tag LIKE '%s' " \ " ORDER BY bibx.field_number, bx.tag ASC" % (bx, bibx, recID, tag) res = run_sql(query) for row in res: out.append(row[0]) return out def get_fieldvalues_alephseq_like(recID, tags_in): """Return buffer of ALEPH sequential-like textual format with fields found in the list TAGS_IN for record RECID.""" out = "" if type(tags_in) is not list: tags_in = [tags_in,] if len(tags_in) == 1 and len(tags_in[0]) == 6: ## case A: one concrete subfield asked, so print its value if found ## (use with care: can false you if field has multiple occurrences) out += string.join(get_fieldvalues(recID, tags_in[0]),"\n") else: ## case B: print our "text MARC" format; works safely all the time # find out which tags to output: dict_of_tags_out = {} if not tags_in: for i in range(0, 10): for j in range(0, 10): dict_of_tags_out["%d%d%%" % (i, j)] = 1 else: for tag in tags_in: if len(tag) == 0: for i in range(0, 10): for j in range(0, 10): dict_of_tags_out["%d%d%%" % (i, j)] = 1 elif len(tag) == 1: for j in range(0, 10): dict_of_tags_out["%s%d%%" % (tag, j)] = 1 elif len(tag) < 5: dict_of_tags_out["%s%%" % tag] = 1 elif tag >= 6: dict_of_tags_out[tag[0:5]] = 1 tags_out = dict_of_tags_out.keys() tags_out.sort() # search all bibXXx tables as needed: for tag in tags_out: digits = tag[0:2] try: intdigits = int(digits) if intdigits < 0 or intdigits > 99: raise ValueError except ValueError: # invalid tag value asked for continue if tag.startswith("001") or tag.startswith("00%"): if out: out += "\n" out += "%09d %s %d" % (recID, "001__", recID) bx = "bib%sx" % digits bibx = "bibrec_bib%sx" % digits query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\ "WHERE bb.id_bibrec='%s' AND b.id=bb.id_bibxxx AND b.tag LIKE '%s%%' "\ "ORDER BY bb.field_number, b.tag ASC" % (bx, bibx, recID, tag) res = run_sql(query) # go through fields: field_number_old = -999 field_old = "" for row in res: field, value, field_number = row[0], row[1], row[2] ind1, ind2 = field[3], field[4] if ind1 == "_": ind1 = "" if ind2 == "_": ind2 = "" # print field tag if field_number != field_number_old or field[:-1] != field_old[:-1]: if out: out += "\n" out += "%09d %s " % (recID, field[:5]) field_number_old = field_number field_old = field # print subfield value if field[0:2] == "00" and field[-1:] == "_": out += value else: out += "$$%s%s" % (field[-1:], value) return out def record_exists(recID): """Return 1 if record RECID exists. Return 0 if it doesn't exist. Return -1 if it exists but is marked as deleted.""" out = 0 query = "SELECT id FROM bibrec WHERE id='%s'" % recID res = run_sql(query, None, 1) if res: # record exists; now check whether it isn't marked as deleted: dbcollids = get_fieldvalues(recID, "980__%") if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids): out = -1 # exists, but marked as deleted else: out = 1 # exists fine return out def record_public_p(recID): """Return 1 if the record is public, i.e. if it can be found in the Home collection. Return 0 otherwise. """ return recID in get_collection_reclist(cdsname) def get_creation_date(recID, fmt="%Y-%m-%d"): "Returns the creation date of the record 'recID'." out = "" res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1) if res: out = res[0][0] return out def get_modification_date(recID, fmt="%Y-%m-%d"): "Returns the date of last modification for the record 'recID'." out = "" res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1) if res: out = res[0][0] return out def print_warning(req, msg, type='', prologue='
', epilogue='
'): "Prints warning message and flushes output." if req and msg: req.write(websearch_templates.tmpl_print_warning( msg = msg, type = type, prologue = prologue, epilogue = epilogue, )) return def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=cdsname, nb_found=-1, jrec=1, rg=10, as=0, ln=cdslang, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="", sc=1, pl_in_url="", d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="", cpu_time=-1, middle_only=0): """Prints stripe with the information on 'collection' and 'nb_found' results and CPU time. Also, prints navigation links (beg/next/prev/end) inside the results set. If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links. This is suitable for displaying navigation links at the bottom of the search results page.""" out = "" # sanity check: if jrec < 1: jrec = 1 if jrec > nb_found: jrec = max(nb_found-rg+1, 1) return websearch_templates.tmpl_print_search_info( ln = ln, weburl = weburl, collection = collection, as = as, collection_name = get_coll_i18nname(collection, ln), collection_id = get_colID(collection), middle_only = middle_only, rg = rg, nb_found = nb_found, sf = sf, so = so, rm = rm, of = of, ot = ot, p = p, f = f, p1 = p1, p2 = p2, p3 = p3, f1 = f1, f2 = f2, f3 = f3, m1 = m1, m2 = m2, m3 = m3, op1 = op1, op2 = op2, pl_in_url = pl_in_url, d1y = d1y, d1m = d1m, d1d = d1d, d2y = d2y, d2m = d2m, d2d = d2d, dt = dt, jrec = jrec, sc = sc, sp = sp, all_fieldcodes = get_fieldcodes(), cpu_time = cpu_time, ) def print_results_overview(req, colls, results_final_nb_total, results_final_nb, cpu_time, ln=cdslang, ec=[]): """Prints results overview box with links to particular collections below.""" out = "" new_colls = [] for coll in colls: new_colls.append({ 'id': get_colID(coll), 'code': coll, 'name': get_coll_i18nname(coll, ln), }) return websearch_templates.tmpl_print_results_overview( ln = ln, weburl = weburl, results_final_nb_total = results_final_nb_total, results_final_nb = results_final_nb, cpu_time = cpu_time, colls = new_colls, ec = ec, ) def sort_records(req, recIDs, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=cdslang): """Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'. If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by 'sort pattern', for example "sort by report number that starts by CERN-PS". Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly.""" _ = gettext_set_language(ln) ## check arguments: if not sort_field: return recIDs if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT: if of.startswith('h'): print_warning(req, _("Sorry, sorting is allowed on sets of up to %d records only. Using default sort order.") % CFG_WEBSEARCH_NB_RECORDS_TO_SORT, "Warning") return recIDs sort_fields = string.split(sort_field, ",") recIDs_dict = {} recIDs_out = [] ## first deduce sorting MARC tag out of the 'sort_field' argument: tags = [] for sort_field in sort_fields: if sort_field and str(sort_field[0:2]).isdigit(): # sort_field starts by two digits, so this is probably a MARC tag already tags.append(sort_field) else: # let us check the 'field' table query = """SELECT DISTINCT(t.value) FROM tag AS t, field_tag AS ft, field AS f WHERE f.code='%s' AND ft.id_field=f.id AND t.id=ft.id_tag ORDER BY ft.score DESC""" % sort_field res = run_sql(query) if res: for row in res: tags.append(row[0]) else: if of.startswith('h'): print_warning(req, _("Sorry, %s does not seem to be a valid sort option. Choosing title sort instead.") % sort_field, "Error") tags.append("245__a") if verbose >= 3: print_warning(req, "Sorting by tags %s." % tags) if sort_pattern: print_warning(req, "Sorting preferentially by %s." % sort_pattern) ## check if we have sorting tag defined: if tags: # fetch the necessary field values: for recID in recIDs: val = "" # will hold value for recID according to which sort vals = [] # will hold all values found in sorting tag for recID for tag in tags: vals.extend(get_fieldvalues(recID, tag)) if sort_pattern: # try to pick that tag value that corresponds to sort pattern bingo = 0 for v in vals: if v.lower().startswith(sort_pattern.lower()): # bingo! bingo = 1 val = v break if not bingo: # sort_pattern not present, so add other vals after spaces val = sort_pattern + " " + string.join(vals) else: # no sort pattern defined, so join them all together val = string.join(vals) val = strip_accents(val.lower()) # sort values regardless of accents and case if recIDs_dict.has_key(val): recIDs_dict[val].append(recID) else: recIDs_dict[val] = [recID] # sort them: recIDs_dict_keys = recIDs_dict.keys() recIDs_dict_keys.sort() # now that keys are sorted, create output array: for k in recIDs_dict_keys: for s in recIDs_dict[k]: recIDs_out.append(s) # ascending or descending? if sort_order == 'a': recIDs_out.reverse() # okay, we are done return recIDs_out else: # good, no sort needed return recIDs def print_records(req, recIDs, jrec=1, rg=10, format='hb', ot='', ln=cdslang, relevances=[], relevances_prologue="(", relevances_epilogue="%%)", decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True, print_records_epilogue_p=True, verbose=0, tab=''): """ Prints list of records 'recIDs' formatted accoding to 'format' in groups of 'rg' starting from 'jrec'. Assumes that the input list 'recIDs' is sorted in reverse order, so it counts records from tail to head. A value of 'rg=-9999' means to print all records: to be used with care. Print also list of RELEVANCES for each record (if defined), in between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE. Print prologue and/or epilogue specific to 'format' if 'print_records_prologue_p' and/or print_records_epilogue_p' are True. """ # load the right message language _ = gettext_set_language(ln) # sanity checking: if req is None: return # get user id (for formatting based on priviledge) uid = getUid(req) if len(recIDs): nb_found = len(recIDs) if rg == -9999: # print all records rg = nb_found else: rg = abs(rg) if jrec < 1: # sanity checks jrec = 1 if jrec > nb_found: jrec = max(nb_found-rg+1, 1) # will print records from irec_max to irec_min excluded: irec_max = nb_found - jrec irec_min = nb_found - jrec - rg if irec_min < 0: irec_min = -1 if irec_max >= nb_found: irec_max = nb_found - 1 #req.write("%s:%d-%d" % (recIDs, irec_min, irec_max)) if format.startswith('x'): # print header if needed if print_records_prologue_p: print_records_prologue(req, format) # print records recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)] format_records(recIDs_to_print, format, ln=ln, search_pattern=search_pattern, record_separator="\n", uid=uid, req=req) # print footer if needed if print_records_epilogue_p: print_records_epilogue(req, format) elif format.startswith('t') or str(format[0:3]).isdigit(): # we are doing plain text output: for irec in range(irec_max, irec_min, -1): x = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) req.write(x) if x: req.write('\n') elif format == 'excel': recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)] create_excel(recIDs=recIDs_to_print, req=req, ln=ln) else: # we are doing HTML output: if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"): # portfolio and on-the-fly formats: for irec in range(irec_max, irec_min, -1): req.write(print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern, uid=uid, verbose=verbose)) elif format.startswith("hb"): # HTML brief format: req.write(websearch_templates.tmpl_record_format_htmlbrief_header( ln = ln)) for irec in range(irec_max, irec_min, -1): row_number = jrec+irec_max-irec recid = recIDs[irec] if relevances and relevances[irec]: relevance = relevances[irec] else: relevance = '' record = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) req.write(websearch_templates.tmpl_record_format_htmlbrief_body( ln = ln, recid = recid, row_number = row_number, relevance = relevance, record = record, relevances_prologue = relevances_prologue, relevances_epilogue = relevances_epilogue, )) req.write(websearch_templates.tmpl_record_format_htmlbrief_footer( ln = ln)) elif format.startswith("hd"): # HTML detailed format: for irec in range(irec_max, irec_min, -1): unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(recIDs[irec])), recIDs[irec], ln=ln) ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()] ordered_tabs_id.sort(lambda x,y: cmp(x[1],y[1])) link_ln = '' if ln != cdslang: link_ln = '?ln=%s' % ln tabs = [(unordered_tabs[tab_id]['label'], \ '%s/record/%s/%s%s' % (weburl, recIDs[irec], tab_id, link_ln), \ tab_id == tab, unordered_tabs[tab_id]['enabled']) \ for (tab_id, order) in ordered_tabs_id if unordered_tabs[tab_id]['visible'] == True] content = '' # load content if tab == 'statistics': r = calculate_reading_similarity_list(recIDs[irec], "downloads") downloadsimilarity = None downloadhistory = None #if r: # downloadsimilarity = r if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS: downloadhistory = create_download_history_graph_and_box(recIDs[irec], ln) r = calculate_reading_similarity_list(recIDs[irec], "pageviews") viewsimilarity = None if r: viewsimilarity = r content = websearch_templates.tmpl_detailed_record_statistics(recIDs[irec], ln, downloadsimilarity=downloadsimilarity, downloadhistory=downloadhistory, viewsimilarity=viewsimilarity) req.write(webstyle_templates.detailed_record_container(content, recIDs[irec], tabs, ln)) elif tab == 'citations': citinglist = None citationhistory = None recid = recIDs[irec] selfcited = get_self_cited_by(recid) r = calculate_cited_by_list(recid) if r: citinglist = r citationhistory = create_citation_history_graph_and_box(recid, ln) r = calculate_co_cited_with_list(recid) cociting = None if r: cociting = r content = websearch_templates.tmpl_detailed_record_citations(recid, ln, citinglist=citinglist, citationhistory=citationhistory, cociting=cociting, selfcited=selfcited) req.write(webstyle_templates.detailed_record_container(content, recid, tabs, ln)) elif tab == 'references': content = format_record(recIDs[irec], 'HDREF', ln=ln, uid=uid, verbose=verbose) req.write(webstyle_templates.detailed_record_container(content, recIDs[irec], tabs, ln)) else: # Metadata tab content = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) creationdate = None modifydate = None if record_exists(recIDs[irec]) == 1: creationdate = get_creation_date(recIDs[irec]) modifydate = get_modification_date(recIDs[irec]) content = websearch_templates.tmpl_detailed_record_metadata( recID = recIDs[irec], ln = ln, format = format, creationdate = creationdate, modifydate = modifydate, content = content) req.write(webstyle_templates.detailed_record_container(content, recIDs[irec], tabs, ln=ln, creationdate=creationdate, modifydate=modifydate, show_notice_p=False)) if len(tabs) > 0: # Add the mini box at bottom of the page if CFG_WEBCOMMENT_ALLOW_REVIEWS: from invenio.webcomment import get_mini_reviews reviews = get_mini_reviews(recid = recIDs[irec], ln=ln) else: reviews = '' actions = format_record(recIDs[irec], 'HDACT', ln=ln, uid=uid, verbose=verbose) files = format_record(recIDs[irec], 'HDFILE', ln=ln, uid=uid, verbose=verbose) req.write(webstyle_templates.detailed_record_mini_panel(recIDs[irec], ln, format, files=files, reviews=reviews, actions=actions)) else: # Other formats for irec in range(irec_max, irec_min, -1): req.write(print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern, uid=uid, verbose=verbose)) else: print_warning(req, _("Use different search terms.")) def print_records_prologue(req, format): """ Print the appropriate prologue for list of records in the given format. """ prologue = "" # no prologue needed for HTML or Text formats if format.startswith('xm'): prologue = websearch_templates.tmpl_xml_marc_prologue() elif format.startswith('xn'): prologue = websearch_templates.tmpl_xml_nlm_prologue() elif format.startswith('xr'): prologue = websearch_templates.tmpl_xml_rss_prologue() elif format.startswith('x'): prologue = websearch_templates.tmpl_xml_default_prologue() req.write(prologue) def print_records_epilogue(req, format): """ Print the appropriate epilogue for list of records in the given format. """ epilogue = "" # no epilogue needed for HTML or Text formats if format.startswith('xm'): epilogue = websearch_templates.tmpl_xml_marc_epilogue() elif format.startswith('xn'): epilogue = websearch_templates.tmpl_xml_nlm_epilogue() elif format.startswith('xr'): epilogue = websearch_templates.tmpl_xml_rss_epilogue() elif format.startswith('x'): epilogue = websearch_templates.tmpl_xml_default_epilogue() req.write(epilogue) def print_record(recID, format='hb', ot='', ln=cdslang, decompress=zlib.decompress, search_pattern=None, uid=None, verbose=0): """Prints record 'recID' formatted accoding to 'format'.""" _ = gettext_set_language(ln) out = "" # sanity check: record_exist_p = record_exists(recID) if record_exist_p == 0: # doesn't exist return out # New Python BibFormat procedure for formatting # Old procedure follows further below # We must still check some special formats, but these # should disappear when BibFormat improves. if not (CFG_BIBFORMAT_USE_OLD_BIBFORMAT \ or format.lower().startswith('t') \ or format.lower().startswith('hm') \ or str(format[0:3]).isdigit() \ or ot): # Unspecified format is hd if format == '': format = 'hd' if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html': # HTML output displays a default value for deleted records. # Other format have to deal with it. out += _("The record has been deleted.") else: out += call_bibformat(recID, format, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) # at the end of HTML brief mode, print the "Detailed record" functionality: if format.lower().startswith('hb') and \ format.lower() != 'hb_p': out += websearch_templates.tmpl_print_record_brief_links( ln = ln, recID = recID, weburl = weburl ) return out # Old PHP BibFormat procedure for formatting # print record opening tags, if needed: if format == "marcxml" or format == "oai_dc": out += " \n" out += "

\n" for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD): out += " %s\n" % oai_id out += " %s\n" % get_modification_date(recID) out += "
\n" out += " \n" if format.startswith("xm") or format == "marcxml": # look for detailed format existence: query = "SELECT value FROM bibfmt WHERE id_bibrec='%s' AND format='%s'" % (recID, format) res = run_sql(query, None, 1) if res and record_exist_p == 1: # record 'recID' is formatted in 'format', so print it out += "%s" % decompress(res[0][0]) else: # record 'recID' is not formatted in 'format' -- they are not in "bibfmt" table; so fetch all the data from "bibXXx" tables: if format == "marcxml": out += """ \n""" out += " %d\n" % int(recID) elif format.startswith("xm"): out += """ \n""" out += " %d\n" % int(recID) if record_exist_p == -1: # deleted record, so display only OAI ID and 980: oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD) if oai_ids: out += "%s\n" % \ (CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0]) out += "DELETED\n" else: # controlfields query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\ "WHERE bb.id_bibrec='%s' AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\ "ORDER BY bb.field_number, b.tag ASC" % recID res = run_sql(query) for row in res: field, value = row[0], row[1] value = encode_for_xml(value) out += """ %s\n""" % \ (encode_for_xml(field[0:3]), value) # datafields i = 1 # Do not process bib00x and bibrec_bib00x, as # they are controlfields. So start at bib01x and # bibrec_bib00x (and set i = 0 at the end of # first loop) for digit1 in range(0, 10): for digit2 in range(i, 10): bx = "bib%d%dx" % (digit1, digit2) bibx = "bibrec_bib%d%dx" % (digit1, digit2) query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\ "WHERE bb.id_bibrec='%s' AND b.id=bb.id_bibxxx AND b.tag LIKE '%s%%' "\ "ORDER BY bb.field_number, b.tag ASC" % (bx, bibx, recID, str(digit1)+str(digit2)) res = run_sql(query) field_number_old = -999 field_old = "" for row in res: field, value, field_number = row[0], row[1], row[2] ind1, ind2 = field[3], field[4] if ind1 == "_" or ind1 == "": ind1 = " " if ind2 == "_" or ind2 == "": ind2 = " " # print field tag if field_number != field_number_old or field[:-1] != field_old[:-1]: if field_number_old != -999: out += """ \n""" out += """ \n""" % \ (encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2)) field_number_old = field_number field_old = field # print subfield value value = encode_for_xml(value) out += """ %s\n""" % \ (encode_for_xml(field[-1:]), value) # all fields/subfields printed in this run, so close the tag: if field_number_old != -999: out += """ \n""" i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x # we are at the end of printing the record: out += " \n" elif format == "xd" or format == "oai_dc": # XML Dublin Core format, possibly OAI -- select only some bibXXx fields: out += """ \n""" if record_exist_p == -1: out += "" else: for f in get_fieldvalues(recID, "041__a"): out += " %s\n" % f for f in get_fieldvalues(recID, "100__a"): out += " %s\n" % encode_for_xml(f) for f in get_fieldvalues(recID, "700__a"): out += " %s\n" % encode_for_xml(f) for f in get_fieldvalues(recID, "245__a"): out += " %s\n" % encode_for_xml(f) for f in get_fieldvalues(recID, "65017a"): out += " %s\n" % encode_for_xml(f) for f in get_fieldvalues(recID, "8564_u"): out += " %s\n" % encode_for_xml(f) for f in get_fieldvalues(recID, "520__a"): out += " %s\n" % encode_for_xml(f) out += " %s\n" % get_creation_date(recID) out += " \n" elif str(format[0:3]).isdigit(): # user has asked to print some fields only if format == "001": out += "%s\n" % (format, recID, format) else: vals = get_fieldvalues(recID, format) for val in vals: out += "%s\n" % (format, val, format) elif format.startswith('t'): ## user directly asked for some tags to be displayed only if record_exist_p == -1: out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"]) else: out += get_fieldvalues_alephseq_like(recID, ot) elif format == "hm": if record_exist_p == -1: out += "
" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"])) + "
" else: out += "
" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot)) + "
" elif format.startswith("h") and ot: ## user directly asked for some tags to be displayed only if record_exist_p == -1: out += "
" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"]) + "
" else: out += "
" + get_fieldvalues_alephseq_like(recID, ot) + "
" elif format == "hd": # HTML detailed format if record_exist_p == -1: out += _("The record has been deleted.") else: # look for detailed format existence: query = "SELECT value FROM bibfmt WHERE id_bibrec='%s' AND format='%s'" % (recID, format) res = run_sql(query, None, 1) if res: # record 'recID' is formatted in 'format', so print it out += "%s" % decompress(res[0][0]) else: # record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format: out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) if out_record_in_format: out += out_record_in_format else: out += websearch_templates.tmpl_print_record_detailed( ln = ln, recID = recID, weburl = weburl, ) elif format.startswith("hb_") or format.startswith("hd_"): # underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats if record_exist_p == -1: out += _("The record has been deleted.") else: out += call_bibformat(recID, format, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) elif format.startswith("hx"): # BibTeX format, called on the fly: if record_exist_p == -1: out += _("The record has been deleted.") else: out += call_bibformat(recID, format, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) elif format.startswith("hs"): # for citation/download similarity navigation links: if record_exist_p == -1: out += _("The record has been deleted.") else: out += '' % websearch_templates.build_search_url(recid=recID, ln=ln) # firstly, title: titles = get_fieldvalues(recID, "245__a") if titles: for title in titles: out += "%s" % title else: # usual title not found, try conference title: titles = get_fieldvalues(recID, "111__a") if titles: for title in titles: out += "%s" % title else: # just print record ID: out += "%s %d" % (get_field_i18nname("record ID", ln), recID) out += "" # secondly, authors: authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a") if authors: out += " - %s" % authors[0] if len(authors) > 1: out += " et al" # thirdly publication info: publinfos = get_fieldvalues(recID, "773__s") if not publinfos: publinfos = get_fieldvalues(recID, "909C4s") if not publinfos: publinfos = get_fieldvalues(recID, "037__a") if not publinfos: publinfos = get_fieldvalues(recID, "088__a") if publinfos: out += " - %s" % publinfos[0] else: # fourthly publication year (if not publication info): years = get_fieldvalues(recID, "773__y") if not years: years = get_fieldvalues(recID, "909C4y") if not years: years = get_fieldvalues(recID, "260__c") if years: out += " (%s)" % years[0] else: # HTML brief format by default if record_exist_p == -1: out += _("The record has been deleted.") else: query = "SELECT value FROM bibfmt WHERE id_bibrec='%s' AND format='%s'" % (recID, format) res = run_sql(query) if res: # record 'recID' is formatted in 'format', so print it out += "%s" % decompress(res[0][0]) else: # record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format: if CFG_WEBSEARCH_CALL_BIBFORMAT: out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern, uid=uid, verbose=verbose) if out_record_in_format: out += out_record_in_format else: out += websearch_templates.tmpl_print_record_brief( ln = ln, recID = recID, weburl = weburl, ) else: out += websearch_templates.tmpl_print_record_brief( ln = ln, recID = recID, weburl = weburl, ) # at the end of HTML brief mode, print the "Detailed record" functionality: if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"): pass # do nothing for portfolio and on-the-fly formats else: out += websearch_templates.tmpl_print_record_brief_links( ln = ln, recID = recID, weburl = weburl, ) # print record closing tags, if needed: if format == "marcxml" or format == "oai_dc": out += "
\n" out += " \n" return out def encode_for_xml(s): "Encode special chars in string so that it would be XML-compliant." s = string.replace(s, '&', '&') s = string.replace(s, '<', '<') return s def call_bibformat(recID, format="HD", ln=cdslang, search_pattern=None, uid=None, verbose=0): """ Calls BibFormat and returns formatted record. BibFormat will decide by itself if old or new BibFormat must be used. """ keywords = [] if search_pattern is not None: units = create_basic_search_units(None, str(search_pattern), None) keywords = [unit[1] for unit in units if unit[0] != '-'] return format_record(recID, of=format, ln=ln, search_pattern=keywords, uid=uid, verbose=verbose) def log_query(hostname, query_args, uid=-1): """ Log query into the query and user_query tables. Return id_query or None in case of problems. """ id_query = None if uid > 0: # log the query only if uid is reasonable res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1) try: id_query = res[0][0] except: id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,)) if id_query: run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)", (uid, id_query, hostname, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) return id_query def log_query_info(action, p, f, colls, nb_records_found_total=-1): """Write some info to the log file for later analysis.""" try: log = open(logdir + "/search.log", "a") log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime())) log.write(action+"#") log.write(p+"#") log.write(f+"#") for coll in colls[:-1]: log.write("%s," % coll) log.write("%s#" % colls[-1]) log.write("%d" % nb_records_found_total) log.write("\n") log.close() except: pass return def wash_url_argument(var, new_type): """Wash list argument into 'new_type', that can be 'list', 'str', or 'int'. Useful for washing mod_python passed arguments, that are all lists of strings (URL args may be multiple), but we sometimes want only to take the first value, and sometimes to represent it as string or numerical value.""" out = [] if new_type == 'list': # return lst if type(var) is list: out = var else: out = [var] elif new_type == 'str': # return str if type(var) is list: try: out = "%s" % var[0] except: out = "" elif type(var) is str: out = var else: out = "%s" % var elif new_type == 'int': # return int if type(var) is list: try: out = string.atoi(var[0]) except: out = 0 elif type(var) is int: out = var elif type(var) is str: try: out = string.atoi(var) except: out = 0 else: out = 0 return out ### CALLABLES def perform_request_search(req=None, cc=cdsname, c=None, p="", f="", rg=10, sf="", so="d", sp="", rm="", of="id", ot="", as=0, p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=cdslang, ec=None, tab=""): """Perform search or browse request, without checking for authentication. Return list of recIDs found, if of=id. Otherwise create web page. The arguments are as follows: req - mod_python Request class instance. cc - current collection (e.g. "ATLAS"). The collection the user started to search/browse from. c - collection list (e.g. ["Theses", "Books"]). The collections user may have selected/deselected when starting to search from 'cc'. p - pattern to search for (e.g. "ellis and muon or kaon"). f - field to search within (e.g. "author"). rg - records in groups of (e.g. "10"). Defines how many hits per collection in the search results page are displayed. sf - sort field (e.g. "title"). so - sort order ("a"=ascending, "d"=descending). sp - sort pattern (e.g. "CERN-") -- in case there are more values in a sort field, this argument tells which one to prefer rm - ranking method (e.g. "jif"). Defines whether results should be ranked by some known ranking method. of - output format (e.g. "hb"). Usually starting "h" means HTML output (and "hb" for HTML brief, "hd" for HTML detailed), "x" means XML output, "t" means plain text output, "id" means no output at all but to return list of recIDs found. (Suitable for high-level API.) ot - output only these MARC tags (e.g. "100,700,909C0b"). Useful if only some fields are to be shown in the output, e.g. for library to control some fields. as - advanced search ("0" means no, "1" means yes). Whether search was called from within the advanced search interface. p1 - first pattern to search for in the advanced search interface. Much like 'p'. f1 - first field to search within in the advanced search interface. Much like 'f'. m1 - first matching type in the advanced search interface. ("a" all of the words, "o" any of the words, "e" exact phrase, "p" partial phrase, "r" regular expression). op1 - first operator, to join the first and the second unit in the advanced search interface. ("a" add, "o" or, "n" not). p2 - second pattern to search for in the advanced search interface. Much like 'p'. f2 - second field to search within in the advanced search interface. Much like 'f'. m2 - second matching type in the advanced search interface. ("a" all of the words, "o" any of the words, "e" exact phrase, "p" partial phrase, "r" regular expression). op2 - second operator, to join the second and the third unit in the advanced search interface. ("a" add, "o" or, "n" not). p3 - third pattern to search for in the advanced search interface. Much like 'p'. f3 - third field to search within in the advanced search interface. Much like 'f'. m3 - third matching type in the advanced search interface. ("a" all of the words, "o" any of the words, "e" exact phrase, "p" partial phrase, "r" regular expression). sc - split by collection ("0" no, "1" yes). Governs whether we want to present the results in a single huge list, or splitted by collection. jrec - jump to record (e.g. "234"). Used for navigation inside the search results. recid - display record ID (e.g. "20000"). Do not search/browse but go straight away to the Detailed record page for the given recID. recidb - display record ID bis (e.g. "20010"). If greater than 'recid', then display records from recid to recidb. Useful for example for dumping records from the database for reformatting. sysno - display old system SYS number (e.g. ""). If you migrate to CDS Invenio from another system, and store your old SYS call numbers, you can use them instead of recid if you wish so. id - the same as recid, in case recid is not set. For backwards compatibility. idb - the same as recid, in case recidb is not set. For backwards compatibility. sysnb - the same as sysno, in case sysno is not set. For backwards compatibility. action - action to do. "SEARCH" for searching, "Browse" for browsing. Default is to search. d1 - first datetime in full YYYY-mm-dd HH:MM:DD format (e.g. "1998-08-23 12:34:56"). Useful for search limits on creation/modification date (see 'dt' argument below). Note that 'd1' takes precedence over d1y, d1m, d1d if these are defined. d1y - first date's year (e.g. "1998"). Useful for search limits on creation/modification date. d1m - first date's month (e.g. "08"). Useful for search limits on creation/modification date. d1d - first date's day (e.g. "23"). Useful for search limits on creation/modification date. d2 - second datetime in full YYYY-mm-dd HH:MM:DD format (e.g. "1998-09-02 12:34:56"). Useful for search limits on creation/modification date (see 'dt' argument below). Note that 'd2' takes precedence over d2y, d2m, d2d if these are defined. d2y - second date's year (e.g. "1998"). Useful for search limits on creation/modification date. d2m - second date's month (e.g. "09"). Useful for search limits on creation/modification date. d2d - second date's day (e.g. "02"). Useful for search limits on creation/modification date. dt - first and second date's type (e.g. "c"). Specifies whether to search in creation dates ("c") or in modification dates ("m"). When dt is not set and d1* and d2* are set, the default is "c". verbose - verbose level (0=min, 9=max). Useful to print some internal information on the searching process in case something goes wrong. ap - alternative patterns (0=no, 1=yes). In case no exact match is found, the search engine can try alternative patterns e.g. to replace non-alphanumeric characters by a boolean query. ap defines if this is wanted. ln - language of the search interface (e.g. "en"). Useful for internationalization. ec - list of external search engines to search as well (e.g. "SPIRES HEP"). """ selected_external_collections_infos = None # wash all arguments requiring special care try: (cc, colls_to_display, colls_to_search) = wash_colls(cc, c, sc) # which colls to search and to display? except InvenioWebSearchUnknownCollectionError, exc: colname = exc.colname if of.startswith("h"): page_start(req, of, cc, as, ln, getUid(req), websearch_templates.tmpl_collection_not_found_page_title(colname, ln)) req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln)) return page_end(req, of, ln) elif of == "id": return [] + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) else: return page_end(req, of, ln) p = wash_pattern(p) f = wash_field(f) p1 = wash_pattern(p1) f1 = wash_field(f1) p2 = wash_pattern(p2) f2 = wash_field(f2) p3 = wash_pattern(p3) f3 = wash_field(f3) datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d) _ = gettext_set_language(ln) # backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable) if sysnb != "" and sysno == "": sysno = sysnb if id > 0 and recid == -1: recid = id if idb > 0 and recidb == -1: recidb = idb # TODO deduce passed search limiting criterias (if applicable) pl, pl_in_url = "", "" # no limits by default if action != "browse" and req and req.args: # we do not want to add options while browsing or while calling via command-line fieldargs = cgi.parse_qs(req.args) for fieldcode in get_fieldcodes(): if fieldargs.has_key(fieldcode): for val in fieldargs[fieldcode]: pl += "+%s:\"%s\" " % (fieldcode, val) pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val)) # deduce recid from sysno argument (if applicable): if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record: recid = get_mysql_recid_from_aleph_sysno(sysno) # deduce collection we are in (if applicable): if recid > 0: cc = guess_primary_collection_of_a_record(recid) # deduce user id (if applicable): try: uid = getUid(req) except: uid = 0 ## 0 - start output if recid > 0: ## 1 - detailed record display title, description, keywords = \ websearch_templates.tmpl_record_page_header_content(req, recid, ln) page_start(req, of, cc, as, ln, uid, title, description, keywords, recid, tab) # Default format is hb but we are in detailed -> change 'of' if of == "hb": of = "hd" if record_exists(recid): if recidb <= recid: # sanity check recidb = recid + 1 if of == "id": return [recidx for recidx in range(recid, recidb) if record_exists(recidx)] else: print_records(req, range(recid, recidb), -1, -9999, of, ot, ln, search_pattern=p, verbose=verbose, tab=tab) if req and of.startswith("h"): # register detailed record page view event client_ip_address = str(req.get_remote_host(apache.REMOTE_NOLOOKUP)) register_page_view_event(recid, uid, client_ip_address) else: # record does not exist if of == "id": return [] + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) elif of.startswith("h"): print_warning(req, "Requested record does not seem to exist.") + elif action == "browse": ## 2 - browse needed page_start(req, of, cc, as, ln, uid, _("Browse")) if of.startswith("h"): req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, as, ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action)) try: if as == 1 or (p1 or p2 or p3): browse_pattern(req, colls_to_search, p1, f1, rg) browse_pattern(req, colls_to_search, p2, f2, rg) browse_pattern(req, colls_to_search, p3, f3, rg) else: browse_pattern(req, colls_to_search, p, f, rg) except: if of.startswith("h"): req.write(create_error_box(req, verbose=verbose, ln=ln)) + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) return page_end(req, of, ln) elif rm and p.startswith("recid:"): ## 3-ter - similarity search needed page_start(req, of, cc, as, ln, uid, _("Search Results")) if of.startswith("h"): req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, as, ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action)) if record_exists(p[6:]) != 1: # record does not exist if of.startswith("h"): print_warning(req, "Requested record does not seem to exist.") if of == "id": return [] + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) else: # record well exists, so find similar ones to it t1 = os.times()[4] results_similar_recIDs, results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, results_similar_comments = \ rank_records(rm, 0, get_collection_reclist(cdsname), string.split(p), verbose) if results_similar_recIDs: t2 = os.times()[4] cpu_time = t2 - t1 if of.startswith("h"): req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cdsname, len(results_similar_recIDs), jrec, rg, as, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2, sc, pl_in_url, d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time)) print_warning(req, results_similar_comments) print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln, results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, search_pattern=p, verbose=verbose) elif of=="id": return results_similar_recIDs + elif of.startswith("x"): + print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln, + results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, search_pattern=p, verbose=verbose) else: # rank_records failed and returned some error message to display: if of.startswith("h"): print_warning(req, results_similar_relevances_prologue) print_warning(req, results_similar_relevances_epilogue) print_warning(req, results_similar_comments) if of == "id": return [] + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL ## 3-terter - cited by search needed page_start(req, of, cc, as, ln, uid, _("Search Results")) if of.startswith("h"): req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, as, ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action)) recID = p[12:] if record_exists(recID) != 1: # record does not exist if of.startswith("h"): print_warning(req, "Requested record does not seem to exist.") if of == "id": return [] + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) else: # record well exists, so find co-cited ones: t1 = os.times()[4] results_cocited_recIDs = map(lambda x: x[0], calculate_co_cited_with_list(int(recID))) if results_cocited_recIDs: t2 = os.times()[4] cpu_time = t2 - t1 if of.startswith("h"): req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cdsname, len(results_cocited_recIDs), jrec, rg, as, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2, sc, pl_in_url, d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time)) print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose) elif of=="id": return results_cocited_recIDs + elif of.startswith("x"): + print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose) + else: # cited rank_records failed and returned some error message to display: if of.startswith("h"): print_warning(req, "nothing found") if of == "id": return [] + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) else: ## 3 - common search needed page_start(req, of, cc, as, ln, uid, _("Search Results")) if of.startswith("h"): req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, as, ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action)) t1 = os.times()[4] results_in_any_collection = HitSet() if as == 1 or (p1 or p2 or p3): ## 3A - advanced search try: results_in_any_collection = search_pattern(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln) if len(results_in_any_collection) == 0: if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) return page_end(req, of, ln) if p2: results_tmp = search_pattern(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln) if op1 == "a": # add results_in_any_collection.intersection_update(results_tmp) elif op1 == "o": # or results_in_any_collection.union_update(results_tmp) elif op1 == "n": # not results_in_any_collection.difference_update(results_tmp) else: if of.startswith("h"): print_warning(req, "Invalid set operation %s." % op1, "Error") if len(results_in_any_collection) == 0: if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) return page_end(req, of, ln) if p3: results_tmp = search_pattern(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln) if op2 == "a": # add results_in_any_collection.intersection_update(results_tmp) elif op2 == "o": # or results_in_any_collection.union_update(results_tmp) elif op2 == "n": # not results_in_any_collection.difference_update(results_tmp) else: if of.startswith("h"): print_warning(req, "Invalid set operation %s." % op2, "Error") except: if of.startswith("h"): req.write(create_error_box(req, verbose=verbose, ln=ln)) perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) + return page_end(req, of, ln) else: ## 3B - simple search try: results_in_any_collection = search_pattern(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln) except: if of.startswith("h"): req.write(create_error_box(req, verbose=verbose, ln=ln)) perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) return page_end(req, of, ln) if len(results_in_any_collection) == 0: if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) return page_end(req, of, ln) # search_cache_key = p+"@"+f+"@"+string.join(colls_to_search,",") # if search_cache.has_key(search_cache_key): # is the result in search cache? # results_final = search_cache[search_cache_key] # else: # results_final = search_pattern(req, p, f, colls_to_search) # search_cache[search_cache_key] = results_final # if len(search_cache) > CFG_WEBSEARCH_SEARCH_CACHE_SIZE: # is the cache full? (sanity cleaning) # search_cache.clear() # search stage 4: intersection with collection universe: try: results_final = intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, ap, of, verbose, ln) except: if of.startswith("h"): req.write(create_error_box(req, verbose=verbose, ln=ln)) perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) return page_end(req, of, ln) if results_final == {}: if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) + if of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) return page_end(req, of, ln) # search stage 5: apply search option limits and restrictions: if datetext1 != "": if verbose and of.startswith("h"): print_warning(req, "Search stage 5: applying time limits, from %s until %s..." % (datetext1, datetext2)) try: results_final = intersect_results_with_hitset(req, results_final, search_unit_in_bibrec(datetext1, datetext2, dt), ap, aptext= _("No match within your time limits, " "discarding this condition..."), of=of) except: if of.startswith("h"): req.write(create_error_box(req, verbose=verbose, ln=ln)) perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) return page_end(req, of, ln) if results_final == {}: if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) return page_end(req, of, ln) if pl: pl = wash_pattern(pl) if verbose and of.startswith("h"): print_warning(req, "Search stage 5: applying search pattern limit %s..." % (pl,)) try: results_final = intersect_results_with_hitset(req, results_final, search_pattern(req, pl, ap=0, ln=ln), ap, aptext=_("No match within your search limits, " "discarding this condition..."), of=of) except: if of.startswith("h"): req.write(create_error_box(req, verbose=verbose, ln=ln)) perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) return page_end(req, of, ln) if results_final == {}: if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) + if of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) return page_end(req, of, ln) t2 = os.times()[4] cpu_time = t2 - t1 ## search stage 6: display results: results_final_nb_total = 0 results_final_nb = {} # will hold number of records found in each collection # (in simple dict to display overview more easily) for coll in results_final.keys(): results_final_nb[coll] = len(results_final[coll]) #results_final_nb_total += results_final_nb[coll] # Now let us calculate results_final_nb_total more precisely, # in order to get the total number of "distinct" hits across # searched collections; this is useful because a record might # have been attributed to more than one primary collection; so # we have to avoid counting it multiple times. The price to # pay for this accuracy of results_final_nb_total is somewhat # increased CPU time. if results_final.keys() == 1: # only one collection; no need to union them results_final_for_all_selected_colls = results_final.values()[0] results_final_nb_total = results_final_nb.values()[0] else: # okay, some work ahead to union hits across collections: results_final_for_all_selected_colls = HitSet() for coll in results_final.keys(): results_final_for_all_selected_colls.union_update(results_final[coll]) results_final_nb_total = len(results_final_for_all_selected_colls) if results_final_nb_total == 0: if of.startswith('h'): print_warning(req, "No match found, please enter different search terms.") + elif of.startswith("x"): + # Print empty, but valid XML + print_records_prologue(req, of) + print_records_epilogue(req, of) else: # yes, some hits found: good! # collection list may have changed due to not-exact-match-found policy so check it out: for coll in results_final.keys(): if coll not in colls_to_search: colls_to_search.append(coll) # print results overview: if of == "id": # we have been asked to return list of recIDs recIDs = list(results_final_for_all_selected_colls) if sf: # do we have to sort? recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of) elif rm: # do we have to rank? results_final_for_all_colls_rank_records_output = rank_records(rm, 0, results_final_for_all_selected_colls, string.split(p) + string.split(p1) + string.split(p2) + string.split(p3), verbose) if results_final_for_all_colls_rank_records_output[0]: recIDs = results_final_for_all_colls_rank_records_output[0] return recIDs elif of.startswith("h"): req.write(print_results_overview(req, colls_to_search, results_final_nb_total, results_final_nb, cpu_time, ln, ec)) selected_external_collections_infos = print_external_results_overview(req, cc, [p, p1, p2, p3], f, ec, verbose, ln) # print number of hits found for XML outputs: if of.startswith("x"): req.write("\n" % results_final_nb_total) # print records: if len(colls_to_search)>1: cpu_time = -1 # we do not want to have search time printed on each collection - print_records_prologue(req, of) for coll in colls_to_search: if results_final.has_key(coll) and len(results_final[coll]): if of.startswith("h"): req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll], jrec, rg, as, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2, sc, pl_in_url, d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time)) results_final_recIDs = list(results_final[coll]) results_final_relevances = [] results_final_relevances_prologue = "" results_final_relevances_epilogue = "" if sf: # do we have to sort? results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of) elif rm: # do we have to rank? results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \ rank_records(rm, 0, results_final[coll], string.split(p) + string.split(p1) + string.split(p2) + string.split(p3), verbose) if of.startswith("h"): print_warning(req, results_final_comments) if results_final_recIDs_ranked: results_final_recIDs = results_final_recIDs_ranked else: # rank_records failed and returned some error message to display: print_warning(req, results_final_relevances_prologue) print_warning(req, results_final_relevances_epilogue) print_records(req, results_final_recIDs, jrec, rg, of, ot, ln, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, search_pattern=p, print_records_prologue_p=False, print_records_epilogue_p=False, verbose=verbose) if of.startswith("h"): req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll], jrec, rg, as, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2, sc, pl_in_url, d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1)) print_records_epilogue(req, of) if f == "author" and of.startswith("h"): req.write(create_similarly_named_authors_link_box(p, ln)) # log query: try: id_query = log_query(req.get_remote_host(), req.args, uid) if of.startswith("h") and id_query: # Alert/RSS teaser: req.write(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, ln=ln)) except: # do not log query if req is None (used by CLI interface) pass log_query_info("ss", p, f, colls_to_search, results_final_nb_total) # External searches if of.startswith("h"): perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos) return page_end(req, of, ln) def perform_request_cache(req, action="show"): """Manipulates the search engine cache.""" global search_cache global collection_reclist_cache global collection_reclist_cache_timestamp global field_i18nname_cache global field_i18nname_cache_timestamp global collection_i18nname_cache global collection_i18nname_cache_timestamp req.content_type = "text/html" req.send_http_header() out = "" out += "

Search Cache

" # clear cache if requested: if action == "clear": search_cache = {} collection_reclist_cache = create_collection_reclist_cache() # show collection reclist cache: out += "

Collection reclist cache

" out += "- collection table last updated: %s" % get_table_update_time('collection') out += "
- reclist cache timestamp: %s" % collection_reclist_cache_timestamp out += "
- reclist cache contents:" out += "
" for coll in collection_reclist_cache.keys(): if collection_reclist_cache[coll]: out += "%s (%d)
" % (coll, len(get_collection_reclist(coll))) out += "
" # show search cache: out += "

Search Cache

" out += "
" if len(search_cache): out += """""" out += "" % \ ("Pattern", "Field", "Collection", "Number of Hits") for search_cache_key in search_cache.keys(): p, f, c = string.split(search_cache_key, "@", 2) # find out about length of cached data: l = 0 for coll in search_cache[search_cache_key]: l += len(search_cache[search_cache_key][coll]) out += "" % (p, f, c, l) out += "
%s%s%s%s
%s%s%s%d
" else: out += "

Search cache is empty." out += "

" out += """

clear cache""" % weburl # show field i18nname cache: out += "

Field I18N names cache

" out += "- fieldname table last updated: %s" % get_table_update_time('fieldname') out += "
- i18nname cache timestamp: %s" % field_i18nname_cache_timestamp out += "
- i18nname cache contents:" out += "
" for field in field_i18nname_cache.keys(): for ln in field_i18nname_cache[field].keys(): out += "%s, %s = %s
" % (field, ln, field_i18nname_cache[field][ln]) out += "
" # show collection i18nname cache: out += "

Collection I18N names cache

" out += "- collectionname table last updated: %s" % get_table_update_time('collectionname') out += "
- i18nname cache timestamp: %s" % collection_i18nname_cache_timestamp out += "
- i18nname cache contents:" out += "
" for coll in collection_i18nname_cache.keys(): for ln in collection_i18nname_cache[coll].keys(): out += "%s, %s = %s
" % (coll, ln, collection_i18nname_cache[coll][ln]) out += "
" req.write("") req.write(out) req.write("") return "\n" def perform_request_log(req, date=""): """Display search log information for given date.""" req.content_type = "text/html" req.send_http_header() req.write("") req.write("

Search Log

") if date: # case A: display stats for a day yyyymmdd = string.atoi(date) req.write("

Date: %d

" % yyyymmdd) req.write("""""") req.write("" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits")) # read file: p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, logdir), 'r') lines = p.readlines() p.close() # process lines: i = 0 for line in lines: try: datetime, as, p, f, c, nbhits = string.split(line,"#") i += 1 req.write("" \ % (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits)) except: pass # ignore eventual wrong log lines req.write("
%s%s%s%s%s%s
#%d%s:%s:%s%s%s%s%s
") else: # case B: display summary stats per day yyyymm01 = int(time.strftime("%Y%m01", time.localtime())) yyyymmdd = int(time.strftime("%Y%m%d", time.localtime())) req.write("""""") req.write("" % ("Day", "Number of Queries")) for day in range(yyyymm01, yyyymmdd + 1): p = os.popen("grep -c ^%d %s/search.log" % (day, logdir), 'r') for line in p.readlines(): req.write("""""" % \ (day, weburl, day, line)) p.close() req.write("
%s%s
%s%s
") req.write("") return "\n" def profile(p="", f="", c=cdsname): """Profile search time.""" import profile import pstats profile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile") p = pstats.Stats("perform_request_search_profile") p.strip_dirs().sort_stats("cumulative").print_stats() return 0 ## test cases: #print wash_colls(cdsname,"Library Catalogue", 0) #print wash_colls("Periodicals & Progress Reports",["Periodicals","Progress Reports"], 0) #print wash_field("wau") #print print_record(20,"tm","001,245") #print create_opft_search_units(None, "PHE-87-13","reportnumber") #print ":"+wash_pattern("* and % doo * %")+":\n" #print ":"+wash_pattern("*")+":\n" #print ":"+wash_pattern("ellis* ell* e*%")+":\n" #print run_sql("SELECT name,dbquery from collection") #print get_index_id("author") #print get_coll_ancestors("Theses") #print get_coll_sons("Articles & Preprints") #print get_coll_real_descendants("Articles & Preprints") #print get_collection_reclist("Theses") #print log(sys.stdin) #print search_unit_in_bibrec('2002-12-01','2002-12-12') #print type(wash_url_argument("-1",'int')) #print get_nearest_terms_in_bibxxx("ellis", "author", 5, 5) #print call_bibformat(68, "HB_FLY") #print create_collection_i18nname_cache() #print get_fieldvalues(10, "980__a") #print get_fieldvalues_alephseq_like(10,"001___") #print get_fieldvalues_alephseq_like(10,"980__a") #print get_fieldvalues_alephseq_like(10,"foo") #print get_fieldvalues_alephseq_like(10,"-1") #print get_fieldvalues_alephseq_like(10,"99") #print get_fieldvalues_alephseq_like(10,["001", "980"]) ## profiling: #profile("of the this") #print perform_request_search(p="ellis") diff --git a/modules/websearch/lib/websearch_templates.py b/modules/websearch/lib/websearch_templates.py index 601484da0..972c96156 100644 --- a/modules/websearch/lib/websearch_templates.py +++ b/modules/websearch/lib/websearch_templates.py @@ -1,2683 +1,2725 @@ # -*- coding: utf-8 -*- ## $Id$ ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # pylint: disable-msg=C0301 __revision__ = "$Id$" import urllib import time import cgi import gettext import string import locale from invenio.config import \ CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH, \ CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD, \ CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \ CFG_BIBRANK_SHOW_READING_STATS, \ CFG_BIBRANK_SHOW_DOWNLOAD_STATS, \ CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \ CFG_BIBRANK_SHOW_CITATION_LINKS, \ CFG_BIBRANK_SHOW_CITATION_STATS, \ CFG_BIBRANK_SHOW_CITATION_GRAPHS, \ + CFG_WEBSEARCH_RSS_TTL, \ cdslang, \ cdsname, \ cdsnameintl, \ version, \ weburl, \ supportemail from invenio.dbquery import run_sql from invenio.messages import gettext_set_language #from invenio.search_engine_config import CFG_EXPERIMENTAL_FEATURES from invenio.urlutils import make_canonical_urlargd, drop_default_urlargd, create_html_link, create_url from invenio.htmlutils import nmtoken_from_string +from invenio.webinterface_handler import wash_urlargd from invenio.websearch_external_collections import external_collection_get_state def get_fieldvalues(recID, tag): """Return list of field values for field TAG inside record RECID. FIXME: should be imported commonly for search_engine too.""" out = [] if tag == "001___": # we have asked for recID that is not stored in bibXXx tables out.append(str(recID)) else: # we are going to look inside bibXXx tables digit = tag[0:2] bx = "bib%sx" % digit bibx = "bibrec_bib%sx" % digit query = "SELECT bx.value FROM %s AS bx, %s AS bibx WHERE bibx.id_bibrec='%s' AND bx.id=bibx.id_bibxxx AND bx.tag LIKE '%s'" \ "ORDER BY bibx.field_number, bx.tag ASC" % (bx, bibx, recID, tag) res = run_sql(query) for row in res: out.append(row[0]) return out class Template: # This dictionary maps CDS Invenio language code to locale codes (ISO 639) tmpl_localemap = { 'bg': 'bg_BG', 'ca': 'ca_ES', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES', 'pt': 'pt_BR', 'fr': 'fr_FR', 'it': 'it_IT', 'ru': 'ru_RU', 'sk': 'sk_SK', 'cs': 'cs_CZ', 'no': 'no_NO', 'sv': 'sv_SE', 'uk': 'uk_UA', 'ja': 'ja_JA', 'pl': 'pl_PL', 'hr': 'hr_HR', 'zh_CN': 'zh_CN', 'zh_TW': 'zh_TW', } tmpl_default_locale = "en_US" # which locale to use by default, useful in case of failure # Type of the allowed parameters for the web interface for search results search_results_default_urlargd = { 'cc': (str, cdsname), 'c': (list, []), 'p': (str, ""), 'f': (str, ""), 'rg': (int, 10), 'sf': (str, ""), 'so': (str, "d"), 'sp': (str, ""), 'rm': (str, ""), 'of': (str, "hb"), 'ot': (list, []), 'as': (int, 0), 'p1': (str, ""), 'f1': (str, ""), 'm1': (str, ""), 'op1':(str, ""), 'p2': (str, ""), 'f2': (str, ""), 'm2': (str, ""), 'op2':(str, ""), 'p3': (str, ""), 'f3': (str, ""), 'm3': (str, ""), 'sc': (int, 0), 'jrec': (int, 0), 'recid': (int, -1), 'recidb': (int, -1), 'sysno': (str, ""), 'id': (int, -1), 'idb': (int, -1), 'sysnb': (str, ""), 'action': (str, "search"), 'action_search': (str, ""), 'action_browse': (str, ""), 'd1': (str, ""), 'd1y': (int, 0), 'd1m': (int, 0), 'd1d': (int, 0), 'd2': (str, ""), 'd2y': (int, 0), 'd2m': (int, 0), 'd2d': (int, 0), 'dt': (str, ""), 'ap': (int, 1), 'verbose': (int, 0), 'ec': (list, []), } # ...and for search interfaces search_interface_default_urlargd = { 'as': (int, 0), 'verbose': (int, 0)} + # ...and for RSS feeds + rss_default_urlargd = {'c' : (list, []), + 'cc' : (str, ""), + 'p' : (str, ""), + 'f' : (str, ""), + 'p1' : (str, ""), + 'f1' : (str, ""), + 'm1' : (str, ""), + 'op1': (str, ""), + 'p2' : (str, ""), + 'f2' : (str, ""), + 'm2' : (str, ""), + 'op2': (str, ""), + 'p3' : (str, ""), + 'f3' : (str, ""), + 'm3' : (str, "")} + def build_search_url(self, known_parameters={}, **kargs): """ Helper for generating a canonical search url. 'known_parameters' is the list of query parameters you inherit from your current query. You can then pass keyword arguments to modify this query. build_search_url(known_parameters, of="xm") The generated URL is absolute. """ parameters = {} parameters.update(known_parameters) parameters.update(kargs) # Now, we only have the arguments which have _not_ their default value parameters = drop_default_urlargd(parameters, self.search_results_default_urlargd) # Asking for a recid? Return a /record/ URL if 'recid' in parameters: target = "%s/record/%d" % (weburl, parameters['recid']) del parameters['recid'] target += make_canonical_urlargd(parameters, self.search_results_default_urlargd) return target return "%s/search%s" % (weburl, make_canonical_urlargd(parameters, self.search_results_default_urlargd)) def build_search_interface_url(self, known_parameters={}, **kargs): """ Helper for generating a canonical search interface URL.""" parameters = {} parameters.update(known_parameters) parameters.update(kargs) c = parameters['c'] del parameters['c'] # Now, we only have the arguments which have _not_ their default value if c and c != cdsname: base = weburl + '/collection/' + urllib.quote(c) else: base = weburl return create_url(base, drop_default_urlargd(parameters, self.search_results_default_urlargd)) + def build_rss_url(self, known_parameters, **kargs): + """Helper for generating a canonical RSS URL""" + + parameters = {} + parameters.update(known_parameters) + parameters.update(kargs) + + # Keep only interesting parameters + argd = wash_urlargd(parameters, self.rss_default_urlargd) + + if argd: + # Handle 'c' differently since it is a list + c = argd.get('c', []) + del argd['c'] + # Create query, and drop empty params + args = make_canonical_urlargd(argd, self.rss_default_urlargd) + if c != []: + # Add collections + c = [urllib.quote(coll) for coll in c] + args += '&c=' + '&c='.join(c) + + return weburl + '/rss' + args + def tmpl_record_page_header_content(self, req, recid, ln): """ Provide extra information in the header of /record pages """ _ = gettext_set_language(ln) title = get_fieldvalues(recid, "245__a") if title: title = _("Record") + '#%d: %s' %(recid, cgi.escape(title[0])) else: title = _("Record") + ' #%d' % recid keywords = ', '.join(get_fieldvalues(recid, "6531_a")) description = ' '.join(get_fieldvalues(recid, "520__a")) description += "\n" description += '; '.join(get_fieldvalues(recid, "100__a") + get_fieldvalues(recid, "700__a")) return [cgi.escape(x, True) for x in (title, description, keywords)] def tmpl_navtrail_links(self, as, ln, dads): """ Creates the navigation bar at top of each search page (*Home > Root collection > subcollection > ...*) Parameters: - 'as' *bool* - Should we display an advanced search box? - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'separator' *string* - The separator between two consecutive collections - 'dads' *list* - A list of parent links, eachone being a dictionary of ('name', 'longname') """ out = [] for url, name in dads: out.append(create_html_link(self.build_search_interface_url(c=url, as=as, ln=ln), {}, cgi.escape(name), {'class': 'navtrail'})) return ' > '.join(out) def tmpl_webcoll_body(self, ln, collection, te_portalbox, searchfor, np_portalbox, narrowsearch, focuson, instantbrowse, ne_portalbox): """ Creates the body of the main search page. Parameters: - 'ln' *string* - language of the page being generated - 'collection' - collection id of the page being generated - 'te_portalbox' *string* - The HTML code for the portalbox on top of search - 'searchfor' *string* - The HTML code for the search options - 'np_portalbox' *string* - The HTML code for the portalbox on bottom of search - 'searchfor' *string* - The HTML code for the search categories (left bottom of page) - 'focuson' *string* - The HTML code for the "focuson" categories (right bottom of page) - 'ne_portalbox' *string* - The HTML code for the bottom of the page """ if not narrowsearch: narrowsearch = instantbrowse body = '''

%(searchfor)s %(np_portalbox)s ''' % { 'weburl' : weburl, 'searchfor' : searchfor, 'np_portalbox' : np_portalbox, 'narrowsearch' : narrowsearch } if focuson: body += """""" body += """
%(narrowsearch)s""" + focuson + """
%(ne_portalbox)s
""" % {'ne_portalbox' : ne_portalbox} return body def tmpl_portalbox(self, title, body): """Creates portalboxes based on the parameters Parameters: - 'title' *string* - The title of the box - 'body' *string* - The HTML code for the body of the box """ out = """
%(title)s
%(body)s
""" % {'title' : cgi.escape(title), 'body' : body} return out def tmpl_searchfor_simple(self, ln, collection_id, collection_name, record_count, middle_option): """Produces simple *Search for* box for the current collection. Parameters: - 'ln' *string* - The language to display - 'header' *string* - header of search form - 'middle_option' *string* - HTML code for the options (any field, specific fields ...) """ # load the right message language _ = gettext_set_language(ln) out = ''' ''' argd = drop_default_urlargd({'ln': ln, 'cc': collection_id, 'sc': 1}, self.search_results_default_urlargd) # Only add non-default hidden values for field, value in argd.items(): out += self.tmpl_input_hidden(field, value) header = _("Search %s records for:") % \ self.tmpl_nbrecs_info(record_count, "","") asearchurl = self.build_search_interface_url(c=collection_id, as=1, ln=ln) # print commentary start: out += ''' ''' % {'ln' : ln, 'langlink': ln != cdslang and '?ln=' + ln or '', 'weburl' : weburl, 'asearch' : create_html_link(asearchurl, {}, _('Advanced Search')), 'header' : header, 'middle_option' : middle_option, 'msg_search' : _('Search'), 'msg_browse' : _('Browse'), 'msg_search_tips' : _('Search Tips')} return out def tmpl_searchfor_advanced(self, ln, # current language collection_id, collection_name, record_count, middle_option_1, middle_option_2, middle_option_3, searchoptions, sortoptions, rankoptions, displayoptions, formatoptions ): """ Produces advanced *Search for* box for the current collection. Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'ssearchurl' *string* - The URL to simple search form - 'header' *string* - header of search form - 'middle_option_1' *string* - HTML code for the first row of options (any field, specific fields ...) - 'middle_option_2' *string* - HTML code for the second row of options (any field, specific fields ...) - 'middle_option_3' *string* - HTML code for the third row of options (any field, specific fields ...) - 'searchoptions' *string* - HTML code for the search options - 'sortoptions' *string* - HTML code for the sort options - 'rankoptions' *string* - HTML code for the rank options - 'displayoptions' *string* - HTML code for the display options - 'formatoptions' *string* - HTML code for the format options """ # load the right message language _ = gettext_set_language(ln) out = ''' ''' argd = drop_default_urlargd({'ln': ln, 'as': 1, 'cc': collection_id, 'sc': 1}, self.search_results_default_urlargd) # Only add non-default hidden values for field, value in argd.items(): out += self.tmpl_input_hidden(field, value) header = _("Search %s records for") % \ self.tmpl_nbrecs_info(record_count, "","") header += ':' ssearchurl = self.build_search_interface_url(c=collection_id, as=0, ln=ln) out += ''' ''' % {'ln' : ln, 'langlink': ln != cdslang and '?ln=' + ln or '', 'weburl' : weburl, 'ssearch' : create_html_link(ssearchurl, {}, _("Simple Search")), 'header' : header, 'matchbox_m1' : self.tmpl_matchtype_box('m1', ln=ln), 'middle_option_1' : middle_option_1, 'andornot_op1' : self.tmpl_andornot_box('op1', ln=ln), 'matchbox_m2' : self.tmpl_matchtype_box('m2', ln=ln), 'middle_option_2' : middle_option_2, 'andornot_op2' : self.tmpl_andornot_box('op2', ln=ln), 'matchbox_m3' : self.tmpl_matchtype_box('m3', ln=ln), 'middle_option_3' : middle_option_3, 'msg_search' : _("Search"), 'msg_browse' : _("Browse"), 'msg_search_tips' : _("Search Tips")} if (searchoptions): out += """""" % { 'searchheader' : _("Search options:"), 'searchoptions' : searchoptions } out += """ """ % { 'added' : _("Added/modified since:"), 'until' : _("until:"), 'added_or_modified': self.tmpl_inputdatetype(ln=ln), 'date_added' : self.tmpl_inputdate("d1", ln=ln), 'date_until' : self.tmpl_inputdate("d2", ln=ln), 'msg_sort' : _("Sort by:"), 'msg_display' : _("Display results:"), 'msg_format' : _("Output format:"), 'sortoptions' : sortoptions, 'rankoptions' : rankoptions, 'displayoptions' : displayoptions, 'formatoptions' : formatoptions } return out def tmpl_matchtype_box(self, name='m', value='', ln='en'): """Returns HTML code for the 'match type' selection box. Parameters: - 'name' *string* - The name of the produced select - 'value' *string* - The selected value (if any value is already selected) - 'ln' *string* - the language to display """ # load the right message language _ = gettext_set_language(ln) out = """ """ % {'name' : name, 'sela' : self.tmpl_is_selected('a', value), 'opta' : _("All of the words:"), 'selo' : self.tmpl_is_selected('o', value), 'opto' : _("Any of the words:"), 'sele' : self.tmpl_is_selected('e', value), 'opte' : _("Exact phrase:"), 'selp' : self.tmpl_is_selected('p', value), 'optp' : _("Partial phrase:"), 'selr' : self.tmpl_is_selected('r', value), 'optr' : _("Regular expression:") } return out def tmpl_is_selected(self, var, fld): """ Checks if *var* and *fld* are equal, and if yes, returns ' selected="selected"'. Useful for select boxes. Parameters: - 'var' *string* - First value to compare - 'fld' *string* - Second value to compare """ if var == fld: return ' selected="selected"' else: return "" def tmpl_andornot_box(self, name='op', value='', ln='en'): """ Returns HTML code for the AND/OR/NOT selection box. Parameters: - 'name' *string* - The name of the produced select - 'value' *string* - The selected value (if any value is already selected) - 'ln' *string* - the language to display """ # load the right message language _ = gettext_set_language(ln) out = """ """ % {'name' : name, 'sela' : self.tmpl_is_selected('a', value), 'opta' : _("AND"), 'selo' : self.tmpl_is_selected('o', value), 'opto' : _("OR"), 'seln' : self.tmpl_is_selected('n', value), 'optn' : _("AND NOT") } return out def tmpl_inputdate(self, name, ln, sy = 0, sm = 0, sd = 0): """ Produces *From Date*, *Until Date* kind of selection box. Suitable for search options. Parameters: - 'name' *string* - The base name of the produced selects - 'ln' *string* - the language to display """ # load the right message language _ = gettext_set_language(ln) box = """ """ # month box += """ """ # year box += """ """ return box def tmpl_inputdatetype(self, dt='', ln=cdslang): """ Produces input date type selection box to choose added-or-modified date search option. Parameters: - 'dt' *string - date type (c=created, m=modified) - 'ln' *string* - the language to display """ # load the right message language _ = gettext_set_language(ln) box = """ """ % { 'added': _("Added since:"), 'modified': _("Modified since:"), 'sel': self.tmpl_is_selected(dt, 'm'), } return box def tmpl_narrowsearch(self, as, ln, type, father, has_grandchildren, sons, display_grandsons, grandsons): """ Creates list of collection descendants of type *type* under title *title*. If as==1, then links to Advanced Search interfaces; otherwise Simple Search. Suitable for 'Narrow search' and 'Focus on' boxes. Parameters: - 'as' *bool* - Should we display an advanced search box? - 'ln' *string* - The language to display - 'type' *string* - The type of the produced box (virtual collections or normal collections) - 'father' *collection* - The current collection - 'has_grandchildren' *bool* - If the current collection has grand children - 'sons' *list* - The list of the sub-collections (first level) - 'display_grandsons' *bool* - If the grand children collections should be displayed (2 level deep display) - 'grandsons' *list* - The list of sub-collections (second level) """ # load the right message language _ = gettext_set_language(ln) title = {'r': _("Narrow by collection:"), 'v': _("Focus on:")}[type] if has_grandchildren: style_prolog = "" style_epilog = "" else: style_prolog = "" style_epilog = "" out = """""" % {'title' : title, 'narrowsearchbox': {'r': 'narrowsearchbox', 'v': 'focusonsearchbox'}[type]} # iterate through sons: i = 0 for son in sons: out += """""" % {'name' : cgi.escape(son.name) } else: out += """ """ % {'name' : cgi.escape(son.name) } else: out += '' out += """""" i += 1 out += "
%(title)s
""" % \ { 'narrowsearchbox': {'r': 'narrowsearchbox', 'v': 'focusonsearchbox'}[type]} if type == 'r': if son.restricted_p() and son.restricted_p() != father.restricted_p(): out += """ %(link)s%(recs)s """ % { 'link': create_html_link(self.build_search_interface_url(c=son.name, ln=ln, as=as), {}, style_prolog + cgi.escape(son.get_name(ln)) + style_epilog), 'recs' : self.tmpl_nbrecs_info(son.nbrecs, ln=ln)} if son.restricted_p(): out += """ [%(msg)s] """ % { 'msg' : _("restricted") } if display_grandsons and len(grandsons[i]): # iterate trough grandsons: out += """
""" for grandson in grandsons[i]: out += """ %(link)s%(nbrec)s """ % { 'link': create_html_link(self.build_search_interface_url(c=grandson.name, ln=ln, as=as), {}, cgi.escape(grandson.get_name(ln))), 'nbrec' : self.tmpl_nbrecs_info(grandson.nbrecs, ln=ln)} out += """
" return out def tmpl_searchalso(self, ln, engines_list, collection_id): _ = gettext_set_language(ln) box_name = _("Search also:") html = """
""" % locals() for engine in engines_list: internal_name = engine.name name = _(internal_name) base_url = engine.base_url if external_collection_get_state(engine, collection_id) == 3: checked = ' checked="checked"' else: checked = '' html += """""" % \ { 'checked': checked, 'base_url': base_url, 'internal_name': internal_name, 'name': cgi.escape(name), 'id': "extSearch" + nmtoken_from_string(name), 'weburl': weburl,} html += """
%(box_name)s
%(name)s
""" return html def tmpl_nbrecs_info(self, number, prolog=None, epilog=None, ln=cdslang): """ Return information on the number of records. Parameters: - 'number' *string* - The number of records - 'prolog' *string* (optional) - An HTML code to prefix the number (if **None**, will be '(') - 'epilog' *string* (optional) - An HTML code to append to the number (if **None**, will be ')') """ if number is None: number = 0 if prolog is None: prolog = ''' (''' if epilog is None: epilog = ''')''' return prolog + self.tmpl_nice_number(number, ln) + epilog def tmpl_box_restricted_content(self, ln): """ Displays a box containing a *restricted content* message Parameters: - 'ln' *string* - The language to display """ # load the right message language _ = gettext_set_language(ln) return _("The contents of this collection is restricted.") def tmpl_box_no_records(self, ln): """ Displays a box containing a *no content* message Parameters: - 'ln' *string* - The language to display """ # load the right message language _ = gettext_set_language(ln) return _("This collection does not contain any document yet.") def tmpl_instant_browse(self, as, ln, recids, more_link = None): """ Formats a list of records (given in the recids list) from the database. Parameters: - 'as' *int* - Advanced Search interface or not (0 or 1) - 'ln' *string* - The language to display - 'recids' *list* - the list of records from the database - 'more_link' *string* - the "More..." link for the record. If not given, will not be displayed """ # load the right message language _ = gettext_set_language(ln) body = '''''' for recid in recids: body += ''' ''' % {'date': recid['date'], 'body': recid['body'] } body += "
%(date)s %(body)s
" if more_link: body += '
' + \ create_html_link(more_link, {}, '[>> %s]' % _("more")) + \ '
' return '''
%(header)s
%(body)s
''' % {'header' : _("Latest additions:"), 'body' : body, } def tmpl_searchwithin_select(self, ln, fieldname, selected, values): """ Produces 'search within' selection box for the current collection. Parameters: - 'ln' *string* - The language to display - 'fieldname' *string* - the name of the select box produced - 'selected' *string* - which of the values is selected - 'values' *list* - the list of values in the select """ out = '""" return out def tmpl_select(self, fieldname, values, selected=None, css_class=''): """ Produces a generic select box Parameters: - 'css_class' *string* - optional, a css class to display this select with - 'fieldname' *list* - the name of the select box produced - 'selected' *string* - which of the values is selected - 'values' *list* - the list of values in the select """ if css_class != '': class_field = ' class="%s"' % css_class else: class_field = '' out = '""" return out def tmpl_record_links(self, weburl, recid, ln): """ Displays the *More info* and *Find similar* links for a record Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'recid' *string* - the id of the displayed record """ # load the right message language _ = gettext_set_language(ln) out = '''
%(detailed)s - %(similar)s''' % { 'detailed': create_html_link(self.build_search_url(recid=recid, ln=ln), {}, _("Detailed record"), {'class': "moreinfo"}), 'similar': create_html_link(self.build_search_url(p="recid:%d" % recid, rm='wrd', ln=ln), {}, _("Similar records"), {'class': "moreinfo"})} if CFG_BIBRANK_SHOW_CITATION_LINKS: out += ''' - %s ''' % \ create_html_link(self.build_search_url(p='recid:%d' % recid, rm='citation', ln=ln), {}, _("Cited by"), {'class': "moreinfo"}) return out def tmpl_record_body(self, weburl, titles, authors, dates, rns, abstracts, urls_u, urls_z, ln): """ Displays the "HTML basic" format of a record Parameters: - 'weburl' *string* - The base URL for the site - 'authors' *list* - the authors (as strings) - 'dates' *list* - the dates of publication - 'rns' *list* - the quicknotes for the record - 'abstracts' *list* - the abstracts for the record - 'urls_u' *list* - URLs to the original versions of the notice - 'urls_z' *list* - Not used """ out = "" for title in titles: out += "%(title)s " % { 'title' : cgi.escape(title) } if authors: out += " / " for author in authors[:CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD]: out += '%s; ' % \ create_html_link(self.build_search_url(p=author, f='author', ln=ln), {}, cgi.escape(author)) if len(authors) > CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD: out += "et al" for date in dates: out += " %s." % cgi.escape(date) for rn in rns: out += """ [%(rn)s]""" % {'rn' : cgi.escape(rn)} for abstract in abstracts: out += "
%(abstract)s [...]" % {'abstract' : cgi.escape(abstract[:1+string.find(abstract, '.')]) } for idx in range(0, len(urls_u)): out += """
%(name)s""" % { 'url' : urls_u[idx], 'name' : urls_u[idx] } return out def tmpl_search_in_bibwords(self, p, f, ln, nearest_box): """ Displays the *Words like current ones* links for a search Parameters: - 'p' *string* - Current search words - 'f' *string* - the fields in which the search was done - 'nearest_box' *string* - the HTML code for the "nearest_terms" box - most probably from a create_nearest_terms_box call """ # load the right message language _ = gettext_set_language(ln) out = '

' if f: out += _("Words nearest to %(x_word)s inside %(x_field)s in any collection are:") % {'x_word': '' + cgi.escape(p) + '', 'x_field': '' + cgi.escape(f) + ''} else: out += _("Words nearest to %(x_word)s in any collection are:") % {'x_word': '' + cgi.escape(p) + ''} out += '
' + nearest_box + '

' return out def tmpl_nearest_term_box(self, p, ln, f, terminfo, intro): """ Displays the *Nearest search terms* box Parameters: - 'p' *string* - Current search words - 'f' *string* - a collection description (if the search has been completed in a collection) - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'terminfo': tuple (term, hits, argd) for each near term - 'intro' *string* - the intro HTML to prefix the box with """ out = '''''' for term, hits, argd in terminfo: if hits: hitsinfo = str(hits) else: hitsinfo = '-' term = cgi.escape(term) if term == p: # print search word for orientation: nearesttermsboxbody_class = "nearesttermsboxbodyselected" if hits > 0: term = create_html_link(self.build_search_url(argd), {}, term, {'class': "nearesttermsselected"}) else: nearesttermsboxbody_class = "nearesttermsboxbody" term = create_html_link(self.build_search_url(argd), {}, term, {'class': "nearestterms"}) out += '''\ ''' % {'hits': hitsinfo, 'nearesttermsboxbody_class': nearesttermsboxbody_class, 'term': term} out += "
%(hits)s   %(term)s
" return intro + "
" + out + "
" def tmpl_browse_pattern(self, f, ln, browsed_phrases_in_colls, colls): """ Displays the *Nearest search terms* box Parameters: - 'f' *string* - a field name (i18nized) - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'browsed_phrases_in_colls' *array* - the phrases to display - 'colls' *array* - the list of collection parameters of the search (c's) """ # load the right message language _ = gettext_set_language(ln) out = """""" % { 'hits' : _("Hits"), 'f' : cgi.escape(f) } if len(browsed_phrases_in_colls) == 1: # one hit only found: phrase, nbhits = browsed_phrases_in_colls[0][0], browsed_phrases_in_colls[0][1] query = {'c': colls, 'ln': ln, 'p': '"%s"' % phrase, 'f': f} out += """""" % {'nbhits': nbhits, 'link': create_html_link(self.build_search_url(query), {}, cgi.escape(phrase))} elif len(browsed_phrases_in_colls) > 1: # first display what was found but the last one: for phrase, nbhits in browsed_phrases_in_colls[:-1]: query = {'c': colls, 'ln': ln, 'p': '"%s"' % phrase, 'f': f} out += """""" % {'nbhits' : nbhits, 'link': create_html_link(self.build_search_url(query), {}, cgi.escape(phrase))} # now display last hit as "next term": phrase, nbhits = browsed_phrases_in_colls[-1] query = {'c': colls, 'ln': ln, 'p': phrase, 'f': f} out += """""" % {'link': create_html_link(self.build_search_url(query, action='browse'), {}, _("next")), 'weburl' : weburl} out += """
%(hits)s   %(f)s
%(nbhits)s   %(link)s
%(nbhits)s   %(link)s
  %(link)s
""" return out def tmpl_search_box(self, ln, as, cc, cc_intl, ot, sp, action, fieldslist, f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2, rm, p, f, coll_selects, d1y, d2y, d1m, d2m, d1d, d2d, dt, sort_fields, sf, so, ranks, sc, rg, formats, of, pl, jrec, ec): """ Displays the *Nearest search terms* box Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'as' *bool* - Should we display an advanced search box? - 'cc_intl' *string* - the i18nized current collection name - 'cc' *string* - the internal current collection name - 'ot', 'sp' *string* - hidden values - 'action' *string* - the action demanded by the user - 'fieldslist' *list* - the list of all fields available, for use in select within boxes in advanced search - 'p, f, f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2, op3, rm' *strings* - the search parameters - 'coll_selects' *array* - a list of lists, each containing the collections selects to display - 'd1y, d2y, d1m, d2m, d1d, d2d' *int* - the search between dates - 'dt' *string* - the dates' types (creation dates, modification dates) - 'sort_fields' *array* - the select information for the sort fields - 'sf' *string* - the currently selected sort field - 'so' *string* - the currently selected sort order ("a" or "d") - 'ranks' *array* - ranking methods - 'rm' *string* - selected ranking method - 'sc' *string* - split by collection or not - 'rg' *string* - selected results/page - 'formats' *array* - available output formats - 'of' *string* - the selected output format - 'pl' *string* - `limit to' search pattern """ # load the right message language _ = gettext_set_language(ln) # These are hidden fields the user does not manipulate # directly argd = drop_default_urlargd({ 'ln': ln, 'as': as, 'cc': cc, 'ot': ot, 'sp': sp, 'ec': ec, }, self.search_results_default_urlargd) out = '''

%(ccname)s

''' % {'ccname' : cgi.escape(cc_intl), 'weburl' : weburl} # Only add non-default hidden values for field, value in argd.items(): out += self.tmpl_input_hidden(field, value) leadingtext = _("Search") if action == 'browse': leadingtext = _("Browse") if as == 1: # print Advanced Search form: # define search box elements: out += ''' ''' % { 'simple_search': create_html_link(self.build_search_url(p=p1, f=f1, rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg), {}, _("Simple Search")), 'leading' : leadingtext, 'sizepattern' : CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH, 'matchbox1' : self.tmpl_matchtype_box('m1', m1, ln=ln), 'p1' : cgi.escape(p1,1), 'searchwithin1' : self.tmpl_searchwithin_select( ln = ln, fieldname = 'f1', selected = f1, values = self._add_mark_to_field(value = f1, fields = fieldslist, ln = ln) ), 'andornot1' : self.tmpl_andornot_box( name = 'op1', value = op1, ln = ln ), 'matchbox2' : self.tmpl_matchtype_box('m2', m2, ln=ln), 'p2' : cgi.escape(p2,1), 'searchwithin2' : self.tmpl_searchwithin_select( ln = ln, fieldname = 'f2', selected = f2, values = self._add_mark_to_field(value = f2, fields = fieldslist, ln = ln) ), 'andornot2' : self.tmpl_andornot_box( name = 'op2', value = op2, ln = ln ), 'matchbox3' : self.tmpl_matchtype_box('m3', m3, ln=ln), 'p3' : cgi.escape(p3,1), 'searchwithin3' : self.tmpl_searchwithin_select( ln = ln, fieldname = 'f3', selected = f3, values = self._add_mark_to_field(value = f3, fields = fieldslist, ln = ln) ), 'search' : _("Search"), 'browse' : _("Browse"), 'weburl' : weburl, 'ln' : ln, 'langlink': ln != cdslang and '?ln=' + ln or '', 'search_tips': _("Search Tips") } else: # print Simple Search form: out += ''' ''' % { 'advanced_search': create_html_link(self.build_search_url(p1=p, f1=f, rm=rm, as=1, cc=cc, jrec=jrec, ln=ln, rg=rg), {}, _("Advanced Search")), 'leading' : leadingtext, 'sizepattern' : CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH, 'p' : cgi.escape(p, 1), 'searchwithin' : self.tmpl_searchwithin_select( ln = ln, fieldname = 'f', selected = f, values = self._add_mark_to_field(value=f, fields=fieldslist, ln=ln) ), 'search' : _("Search"), 'browse' : _("Browse"), 'weburl' : weburl, 'ln' : ln, 'langlink': ln != cdslang and '?ln=' + ln or '', 'search_tips': _("Search Tips") } ## secondly, print Collection(s) box: selects = '' for sel in coll_selects: selects += self.tmpl_select(fieldname='c', values=sel) out += """ """ % { 'leading' : leadingtext, 'msg_coll' : _("collections"), 'colls' : selects, } ## thirdly, print search limits, if applicable: if action != _("Browse") and pl: out += """""" % { 'limitto' : _("Limit to:"), 'sizepattern' : CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH, 'pl' : cgi.escape(pl, 1), } ## fourthly, print from/until date boxen, if applicable: if action == _("Browse") or (d1y==0 and d1m==0 and d1d==0 and d2y==0 and d2m==0 and d2d==0): pass # do not need it else: cell_6_a = self.tmpl_inputdatetype(dt, ln) + self.tmpl_inputdate("d1", ln, d1y, d1m, d1d) cell_6_b = self.tmpl_inputdate("d2", ln, d2y, d2m, d2d) out += """""" % { 'added' : _("Added/modified since:"), 'until' : _("until:"), 'added_or_modified': self.tmpl_inputdatetype(dt, ln), 'date1' : self.tmpl_inputdate("d1", ln, d1y, d1m, d1d), 'date2' : self.tmpl_inputdate("d2", ln, d2y, d2m, d2d), } ## fifthly, print Display results box, including sort/rank, formats, etc: if action != _("Browse"): rgs = [] for i in [10, 25, 50, 100, 250, 500]: rgs.append({ 'value' : i, 'text' : "%d %s" % (i, _("results"))}) # sort by: out += """""" % { 'sort_by' : _("Sort :"), 'display_res' : _("Display results:"), 'out_format' : _("Output format:"), 'select_sf' : self.tmpl_select(fieldname = 'sf', values = sort_fields, selected = sf, css_class = 'address'), 'select_so' : self.tmpl_select(fieldname = 'so', values = [{ 'value' : 'a', 'text' : _("asc.") }, { 'value' : 'd', 'text' : _("desc.") }], selected = so, css_class = 'address'), 'select_rm' : self.tmpl_select(fieldname = 'rm', values = ranks, selected = rm, css_class = 'address'), 'select_rg' : self.tmpl_select(fieldname = 'rg', values = rgs, selected = rg, css_class = 'address'), 'select_sc' : self.tmpl_select(fieldname = 'sc', values = [{ 'value' : 0, 'text' : _("single list") }, { 'value' : 1, 'text' : _("split by collection") }], selected = sc, css_class = 'address'), 'select_of' : self.tmpl_searchwithin_select( ln = ln, fieldname = 'of', selected = of, values = self._add_mark_to_field(value = of, fields = formats, chars = 3, ln = ln) ), } ## last but not least, print end of search box: out += """
""" return out def tmpl_input_hidden(self, name, value): "Produces the HTML code for a hidden field " if isinstance(value, list): list_input = [self.tmpl_input_hidden(name, val) for val in value] return "\n".join(list_input) return """""" % { 'name' : cgi.escape(str(name), 1), 'value' : cgi.escape(str(value), 1), } def _add_mark_to_field(self, value, fields, ln, chars = 1): """Adds the current value as a MARC tag in the fields array Useful for advanced search""" # load the right message language _ = gettext_set_language(ln) out = fields if value and str(value[0:chars]).isdigit(): out.append({'value' : value, 'text' : str(value) + " " + _("MARC tag") }) return out def tmpl_search_pagestart(self, ln) : "page start for search page. Will display after the page header" return """
""" def tmpl_search_pageend(self, ln) : "page end for search page. Will display just before the page footer" return """
""" def tmpl_print_warning(self, msg, type, prologue, epilogue): """Prints warning message and flushes output. Parameters: - 'msg' *string* - The message string - 'type' *string* - the warning type - 'prologue' *string* - HTML code to display before the warning - 'epilogue' *string* - HTML code to display after the warning """ out = '\n%s' % (prologue) if type: out += '%s: ' % type out += '%s%s' % (msg, epilogue) return out def tmpl_print_search_info(self, ln, weburl, middle_only, collection, collection_name, collection_id, as, sf, so, rm, rg, nb_found, of, ot, p, f, f1, f2, f3, m1, m2, m3, op1, op2, p1, p2, p3, d1y, d1m, d1d, d2y, d2m, d2d, dt, all_fieldcodes, cpu_time, pl_in_url, jrec, sc, sp): """Prints stripe with the information on 'collection' and 'nb_found' results and CPU time. Also, prints navigation links (beg/next/prev/end) inside the results set. If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links. This is suitable for displaying navigation links at the bottom of the search results page. Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'middle_only' *bool* - Only display parts of the interface - 'collection' *string* - the collection name - 'collection_name' *string* - the i18nized current collection name - 'as' *bool* - if we display the advanced search interface - 'sf' *string* - the currently selected sort format - 'so' *string* - the currently selected sort order ("a" or "d") - 'rm' *string* - selected ranking method - 'rg' *int* - selected results/page - 'nb_found' *int* - number of results found - 'of' *string* - the selected output format - 'ot' *string* - hidden values - 'p' *string* - Current search words - 'f' *string* - the fields in which the search was done - 'f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2' *strings* - the search parameters - 'jrec' *int* - number of first record on this page - 'd1y, d2y, d1m, d2m, d1d, d2d' *int* - the search between dates - 'dt' *string* the dates' type (creation date, modification date) - 'all_fieldcodes' *array* - all the available fields - 'cpu_time' *float* - the time of the query in seconds """ # load the right message language _ = gettext_set_language(ln) out = "" # left table cells: print collection name if not middle_only: out += '''
''' % { 'collection_id': collection_id, 'weburl' : weburl, 'collection_link': create_html_link(self.build_search_interface_url(c=collection, as=as, ln=ln), {}, cgi.escape(collection_name)) } else: out += """
""" % { 'weburl' : weburl } # middle table cell: print beg/next/prev/end arrows: if not middle_only: out += """
" else: out += "" # right table cell: cpu time info if not middle_only: if cpu_time > -1: out += """""" % { 'time' : _("Search took %s seconds.") % ('%.2f' % cpu_time), } out += "
%(collection_link)s %(recs_found)s  """ % { 'recs_found' : _("%s records found") % ('' + self.tmpl_nice_number(nb_found, ln) + '') } else: out += "" if nb_found > rg: out += "" + cgi.escape(collection_name) + " : " + _("%s records found") % ('' + self.tmpl_nice_number(nb_found, ln) + '') + "   " if nb_found > rg: # navig.arrows are needed, since we have many hits query = {'p': p, 'f': f, 'cc': collection, 'sf': sf, 'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'as': as, 'ln': ln, 'p1': p1, 'p2': p2, 'p3': p3, 'f1': f1, 'f2': f2, 'f3': f3, 'm1': m1, 'm2': m2, 'm3': m3, 'op1': op1, 'op2': op2, 'sc': 0, 'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2y': d2y, 'd2m': d2m, 'd2d': d2d, 'dt': dt, } # @todo here def img(gif, txt): return '%(txt)s' % { 'txt': txt, 'gif': gif, 'weburl': weburl} if jrec-rg > 1: out += create_html_link(self.build_search_url(query, jrec=1, rg=rg), {}, img('sb', _("begin")), {'class': 'img'}) if jrec > 1: out += create_html_link(self.build_search_url(query, jrec=max(jrec-rg, 1), rg=rg), {}, img('sp', _("previous")), {'class': 'img'}) if jrec+rg-1 < nb_found: out += "%d - %d" % (jrec, jrec+rg-1) else: out += "%d - %d" % (jrec, nb_found) if nb_found >= jrec+rg: out += create_html_link(self.build_search_url(query, jrec=jrec+rg, rg=rg), {}, img('sn', _("next")), {'class':'img'}) if nb_found >= jrec+rg+rg: out += create_html_link(self.build_search_url(query, jrec=nb_found-rg+1, rg=rg), {}, img('se', _("end")), {'class': 'img'}) # still in the navigation part cc = collection sc = 0 for var in ['p', 'cc', 'f', 'sf', 'so', 'of', 'rg', 'as', 'ln', 'p1', 'p2', 'p3', 'f1', 'f2', 'f3', 'm1', 'm2', 'm3', 'op1', 'op2', 'sc', 'd1y', 'd1m', 'd1d', 'd2y', 'd2m', 'd2d', 'dt']: out += self.tmpl_input_hidden(name = var, value = vars()[var]) for var in ['ot', 'sp', 'rm']: if vars()[var]: out += self.tmpl_input_hidden(name = var, value = vars()[var]) if pl_in_url: fieldargs = cgi.parse_qs(pl_in_url) for fieldcode in all_fieldcodes: # get_fieldcodes(): if fieldargs.has_key(fieldcode): for val in fieldargs[fieldcode]: out += self.tmpl_input_hidden(name = fieldcode, value = val) out += """  %(jump)s """ % { 'jump' : _("jump to record:"), 'jrec' : jrec, } if not middle_only: out += "%(time)s 
" else: out += "" out += "
" return out def tmpl_nice_number(self, number, ln=cdslang, thousands_separator=','): """ Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. This version does not pay attention to locale. See tmpl_nice_number_via_locale(). """ if type(number) is float: int_part, frac_part = str(number).split('.') return '%s.%s' % (self.tmpl_nice_number(int(int_part), ln, thousands_separator), frac_part) else: chars_in = list(str(number)) number = len(chars_in) chars_out = [] for i in range(0, number): if i % 3 == 0 and i != 0: chars_out.append(thousands_separator) chars_out.append(chars_in[number-i-1]) chars_out.reverse() return ''.join(chars_out) def tmpl_nice_number_via_locale(self, number, ln=cdslang): """ Return nicely printed number NUM in language LN using the locale. See also version tmpl_nice_number(). """ if number is None: return None # Temporarily switch the numeric locale to the requested one, and format the number # In case the system has no locale definition, use the vanilla form ol = locale.getlocale(locale.LC_NUMERIC) try: locale.setlocale(locale.LC_NUMERIC, self.tmpl_localemap.get(ln, self.tmpl_default_locale)) except locale.Error: return str(number) try: number = locale.format('%d', number, True) except TypeError: return str(number) locale.setlocale(locale.LC_NUMERIC, ol) return number def tmpl_record_format_htmlbrief_header(self, ln): """Returns the header of the search results list when output is html brief. Note that this function is called for each collection results when 'split by collection' is enabled. See also: tmpl_record_format_htmlbrief_footer(..), tmpl_record_format_htmlbrief_body(..) Parameters: - 'ln' *string* - The language to display """ # load the right message language _ = gettext_set_language(ln) out = """
""" % { 'weburl' : weburl, } return out def tmpl_record_format_htmlbrief_footer(self, ln): """Returns the footer of the search results list when output is html brief. Note that this function is called for each collection results when 'split by collection' is enabled. See also: tmpl_record_format_htmlbrief_header(..), tmpl_record_format_htmlbrief_body(..) Parameters: - 'ln' *string* - The language to display """ # load the right message language _ = gettext_set_language(ln) out = """

""" % { 'basket' : _("ADD TO BASKET") } return out def tmpl_record_format_htmlbrief_body(self, ln, recid, row_number, relevance, record, relevances_prologue, relevances_epilogue): """Returns the html brief format of one record. Used in the search results list for each record. See also: tmpl_record_format_htmlbrief_header(..), tmpl_record_format_htmlbrief_footer(..) Parameters: - 'ln' *string* - The language to display - 'row_number' *int* - The position of this record in the list - 'recid' *int* - The recID - 'relevance' *string* - The relevance of the record - 'record' *string* - The formatted record - 'relevances_prologue' *string* - HTML code to prepend the relevance indicator - 'relevances_epilogue' *string* - HTML code to append to the relevance indicator (used mostly for formatting) """ # load the right message language _ = gettext_set_language(ln) out = """ %(number)s. """ % {'recid': recid, 'number': row_number} if relevance: out += """
""" % { 'prologue' : relevances_prologue, 'epilogue' : relevances_epilogue, 'relevance' : relevance } out += """%s""" % record return out def tmpl_print_results_overview(self, ln, weburl, results_final_nb_total, cpu_time, results_final_nb, colls, ec): """Prints results overview box with links to particular collections below. Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'results_final_nb_total' *int* - The total number of hits for the query - 'colls' *array* - The collections with hits, in the format: - 'coll[code]' *string* - The code of the collection (canonical name) - 'coll[name]' *string* - The display name of the collection - 'results_final_nb' *array* - The number of hits, indexed by the collection codes: - 'cpu_time' *string* - The time the query took - 'url_args' *string* - The rest of the search query - 'ec' *array* - selected external collections """ if len(colls) == 1 and not ec: # if one collection only and no external collections, print nothing: return "" # load the right message language _ = gettext_set_language(ln) # first find total number of hits: out = """
%(founds)s
""" % { 'founds' : _("%(x_fmt_open)sResults overview:%(x_fmt_close)s Found %(x_nb_records)s records in %(x_nb_seconds)s seconds.") %\ {'x_fmt_open': '', 'x_fmt_close': '', 'x_nb_records': '' + self.tmpl_nice_number(results_final_nb_total, ln) + '', 'x_nb_seconds': '%.2f' % cpu_time} } # then print hits per collection: for coll in colls: if results_final_nb.has_key(coll['code']) and results_final_nb[coll['code']] > 0: out += '''%(coll_name)s, %(number)s
''' % { 'coll' : coll['id'], 'coll_name' : cgi.escape(coll['name']), 'number' : _("%s records found") % ('' + self.tmpl_nice_number(results_final_nb[coll['code']], ln) + '') } out += "
" return out def tmpl_search_no_boolean_hits(self, ln, nearestterms): """No hits found, proposes alternative boolean queries Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'nearestterms' *array* - Parts of the interface to display, in the format: - 'nearestterms[nbhits]' *int* - The resulting number of hits - 'nearestterms[url_args]' *string* - The search parameters - 'nearestterms[p]' *string* - The search terms """ # load the right message language _ = gettext_set_language(ln) out = _("Boolean query returned no hits. Please combine your search terms differently.") out += '''
''' for term, hits, argd in nearestterms: out += '''\ ''' % {'hits' : hits, 'link': create_html_link(self.build_search_url(argd), {}, cgi.escape(term), {'class': "nearestterms"})} out += """
%(hits)s   %(link)s
""" return out def tmpl_similar_author_names(self, authors, ln): """No hits found, proposes alternative boolean queries Parameters: - 'authors': a list of (name, hits) tuples - 'ln' *string* - The language to display """ # load the right message language _ = gettext_set_language(ln) out = ''' ''' % { 'similar' : _("See also: similar author names") } for author, hits in authors: out += '''\ ''' % {'link': create_html_link( self.build_search_url(p=author, f='author', ln=ln), {}, cgi.escape(author), {'class':"google"}), 'nb' : hits} out += """
%(similar)s
%(nb)d %(link)s
""" return out def tmpl_print_record_detailed(self, recID, ln, weburl): """Displays a detailed on-the-fly record Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'recID' *int* - The record id """ # okay, need to construct a simple "Detailed record" format of our own: out = "

 " # secondly, title: titles = get_fieldvalues(recID, "245__a") for title in titles: out += "

%s

" % cgi.escape(title) # thirdly, authors: authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a") if authors: out += "

" for author in authors: out += '%s; ' % create_html_link(self.build_search_url( ln=ln, p=author, f='author'), {}, cgi.escape(author)) out += "

" # fourthly, date of creation: dates = get_fieldvalues(recID, "260__c") for date in dates: out += "

%s

" % date # fifthly, abstract: abstracts = get_fieldvalues(recID, "520__a") for abstract in abstracts: out += """

Abstract: %s

""" % abstract # fifthly bis, keywords: keywords = get_fieldvalues(recID, "6531_a") if len(keywords): out += """

Keyword(s):""" for keyword in keywords: out += '%s; ' % create_html_link( self.build_search_url(ln=ln, p=keyword, f='keyword'), {}, cgi.escape(keyword)) out += '

' # fifthly bis bis, published in: prs_p = get_fieldvalues(recID, "909C4p") prs_v = get_fieldvalues(recID, "909C4v") prs_y = get_fieldvalues(recID, "909C4y") prs_n = get_fieldvalues(recID, "909C4n") prs_c = get_fieldvalues(recID, "909C4c") for idx in range(0, len(prs_p)): out += """

Publ. in: %s""" % prs_p[idx] if prs_v and prs_v[idx]: out += """%s""" % prs_v[idx] if prs_y and prs_y[idx]: out += """(%s)""" % prs_y[idx] if prs_n and prs_n[idx]: out += """, no.%s""" % prs_n[idx] if prs_c and prs_c[idx]: out += """, p.%s""" % prs_c[idx] out += """.

""" # sixthly, fulltext link: urls_z = get_fieldvalues(recID, "8564_z") urls_u = get_fieldvalues(recID, "8564_u") for idx in range(0, len(urls_u)): link_text = "URL" try: if urls_z[idx]: link_text = urls_z[idx] except IndexError: pass out += """

%s: %s

""" % (link_text, urls_u[idx], urls_u[idx]) # print some white space at the end: out += "

" return out def tmpl_print_record_list_for_similarity_boxen(self, title, recID_score_list, ln=cdslang): """Print list of records in the "hs" (HTML Similarity) format for similarity boxes. RECID_SCORE_LIST is a list of (recID1, score1), (recID2, score2), etc. """ from invenio.search_engine import print_record, record_public_p recID_score_list_to_be_printed = [] # firstly find 5 first public records to print: nb_records_to_be_printed = 0 nb_records_seen = 0 while nb_records_to_be_printed < 5 and nb_records_seen < len(recID_score_list) and nb_records_seen < 50: # looking through first 50 records only, picking first 5 public ones (recID, score) = recID_score_list[nb_records_seen] nb_records_seen += 1 if record_public_p(recID): nb_records_to_be_printed += 1 recID_score_list_to_be_printed.append([recID, score]) # secondly print them: out = '''
%(title)s
''' % { 'title': cgi.escape(title) } for recid, score in recID_score_list_to_be_printed: out += ''' ''' % { 'score': score, 'info' : print_record(recid, format="hs", ln=ln), } out += """
(%(score)s)  %(info)s
""" return out def tmpl_print_record_brief(self, ln, recID, weburl): """Displays a brief record on-the-fly Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'recID' *int* - The record id """ out = "" # record 'recID' does not exist in format 'format', so print some default format: # firstly, title: titles = get_fieldvalues(recID, "245__a") # secondly, authors: authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a") # thirdly, date of creation: dates = get_fieldvalues(recID, "260__c") # thirdly bis, report numbers: rns = get_fieldvalues(recID, "037__a") rns = get_fieldvalues(recID, "088__a") # fourthly, beginning of abstract: abstracts = get_fieldvalues(recID, "520__a") # fifthly, fulltext link: urls_z = get_fieldvalues(recID, "8564_z") urls_u = get_fieldvalues(recID, "8564_u") return self.tmpl_record_body( weburl = weburl, titles = titles, authors = authors, dates = dates, rns = rns, abstracts = abstracts, urls_u = urls_u, urls_z = urls_z, ln=ln) def tmpl_print_record_brief_links(self, ln, recID, weburl): """Displays links for brief record on-the-fly Parameters: - 'ln' *string* - The language to display - 'weburl' *string* - The base URL for the site - 'recID' *int* - The record id """ # load the right message language _ = gettext_set_language(ln) out = "" if CFG_WEBSEARCH_USE_ALEPH_SYSNOS: alephsysnos = get_fieldvalues(recID, "970__a") if len(alephsysnos)>0: alephsysno = alephsysnos[0] out += '
%s' % \ create_html_link(self.build_search_url(sysno=alephsysno, ln=ln), {}, _("Detailed record"), {'class': "moreinfo"}) else: out += '
%s' % \ create_html_link(self.build_search_url(recid=recID, ln=ln), {}, _("Detailed record"), {'class': "moreinfo"}) else: out += '
%s' % \ create_html_link(self.build_search_url(recid=recID, ln=ln), {}, _("Detailed record"), {'class': "moreinfo"}) out += ' - %s' % \ create_html_link(self.build_search_url(p="recid:%d" % recID, rm="wrd", ln=ln), {}, _("Similar records"), {'class': "moreinfo"}) if CFG_BIBRANK_SHOW_CITATION_LINKS: out += ' - %s' % \ create_html_link(self.build_search_url(p="recid:%d" % recID, rm="citation", ln=ln), {}, _("Cited by"), {'class': "moreinfo"}) return out def tmpl_xml_rss_prologue(self): """Creates XML RSS 2.0 prologue.""" out = """ %(cdsname)s %(weburl)s %(cdsname)s latest documents %(cdslang)s %(timestamp)s CDS Invenio %(version)s %(supportemail)s - 1440 + %(timetolive)s %(weburl)s/img/cds.png %(cdsname)s %(weburl)s Search Search this site: p %(weburl)s/search """ % {'cdsname': cdsname, 'weburl': weburl, 'cdslang': cdslang, 'timestamp': time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime()), 'version': version, 'supportemail': supportemail, + 'timetolive': CFG_WEBSEARCH_RSS_TTL } return out def tmpl_xml_rss_epilogue(self): """Creates XML RSS 2.0 epilogue.""" out = """\ \n""" return out def tmpl_xml_nlm_prologue(self): """Creates XML NLM prologue.""" out = """\n""" return out def tmpl_xml_nlm_epilogue(self): """Creates XML NLM epilogue.""" out = """\n""" return out def tmpl_xml_marc_prologue(self): """Creates XML MARC prologue.""" out = """\n""" return out def tmpl_xml_marc_epilogue(self): """Creates XML MARC epilogue.""" out = """\n""" return out def tmpl_xml_default_prologue(self): """Creates XML default format prologue. (Sanity calls only.)""" out = """\n""" return out def tmpl_xml_default_epilogue(self): """Creates XML default format epilogue. (Sanity calls only.)""" out = """\n""" return out def tmpl_collection_not_found_page_title(self, colname, ln=cdslang): """ Create page title for cases when unexisting collection was asked for. """ _ = gettext_set_language(ln) out = _("Collection %s Not Found") % cgi.escape(colname) return out def tmpl_collection_not_found_page_body(self, colname, ln=cdslang): """ Create page body for cases when unexisting collection was asked for. """ _ = gettext_set_language(ln) out = """

%(title)s

%(sorry)s

%(you_may_want)s

""" % { 'title': self.tmpl_collection_not_found_page_title(colname, ln), 'sorry': _("Sorry, collection %s does not seem to exist.") % \ ('' + cgi.escape(colname) + ''), 'you_may_want': _("You may want to start browsing from %s.") % \ ('' + \ cgi.escape(cdsnameintl.get(ln, cdsname)) + '')} return out def tmpl_alert_rss_teaser_box_for_query(self, id_query, ln): """Propose teaser for setting up this query as alert or RSS feed. Parameters: - 'id_query' *int* - ID of the query we make teaser for - 'ln' *string* - The language to display """ # load the right message language _ = gettext_set_language(ln) # get query arguments: res = run_sql("SELECT urlargs FROM query WHERE id=%s", (id_query,)) + argd = {} if res: - rssurl = weburl + '/search?of=xr&' + cgi.escape(res[0][0]) - else: - # cannot detect query arguments, use generic RSS URL - rssurl = weburl + '/rss/' + argd = cgi.parse_qs(res[0][0]) + rssurl = self.build_rss_url(argd) alerturl = weburl + '/youralerts/input?ln=%s&idq=%s' % (ln, id_query) out = '''
%(similar)s
%(msg_alert)s
''' % { 'similar' : _("Interested in being notified about new results for this query?"), 'msg_alert': _("""Set up a personal %(x_url1_open)semail alert%(x_url1_close)s or subscribe to the %(x_url2_open)sRSS feed%(x_url2_close)s.""") % \ {'x_url1_open': ' ' % (alerturl, weburl) + ' ' % (alerturl), 'x_url1_close': '', 'x_url2_open': ' ' % (rssurl, weburl) + ' ' % rssurl, 'x_url2_close': '', }} return out def tmpl_detailed_record_metadata(self, recID, ln, format, content, creationdate=None, modifydate=None): """Returns the main detailed page of a record Parameters: - 'recID' *int* - The ID of the printed record - 'ln' *string* - The language to display - 'format' *string* - The format in used to print the record - 'content' *string* - The main content of the page - 'creationdate' *string* - The creation date of the printed record - 'modifydate' *string* - The last modification date of the printed record """ _ = gettext_set_language(ln) out = content return out def tmpl_detailed_record_statistics(self, recID, ln, downloadsimilarity, downloadhistory, viewsimilarity): """Returns the statistics page of a record Parameters: - 'recID' *int* - The ID of the printed record - 'ln' *string* - The language to display - downloadsimilarity *string* - downloadsimilarity box - downloadhistory *string* - downloadhistory box - viewsimilarity *string* - viewsimilarity box """ # load the right message language _ = gettext_set_language(ln) out = '' if CFG_BIBRANK_SHOW_DOWNLOAD_STATS and downloadsimilarity is not None: similar = self.tmpl_print_record_list_for_similarity_boxen ( _("People who downloaded this document also downloaded:"), downloadsimilarity, ln) out = '' out += ''' ''' % { 'weburl': weburl, 'recid': recID, 'ln': ln, 'similar': similar, 'more': _("more"), 'graph': downloadsimilarity } out += '
%(graph)s
%(similar)s
' out += '
' if CFG_BIBRANK_SHOW_READING_STATS and viewsimilarity is not None: out += self.tmpl_print_record_list_for_similarity_boxen ( _("People who viewed this page also viewed:"), viewsimilarity, ln) if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS and downloadhistory is not None: out += downloadhistory + '
' return out def tmpl_detailed_record_citations(self, recID, ln, citinglist, citationhistory, cociting,selfcited): """Returns the citations page of a record Parameters: - 'recID' *int* - The ID of the printed record - 'ln' *string* - The language to display - citinglist *list* - a list of tuples [(x1,y1),(x2,y2),..] where x is doc id and y is number of citations - citationhistory *string* - citationhistory box - cociting *string* - cociting box - selfcited list - a list of self-citations for recID """ # load the right message language _ = gettext_set_language(ln) out = '' if CFG_BIBRANK_SHOW_CITATION_STATS and citinglist is not None: similar = self.tmpl_print_record_list_for_similarity_boxen( _("Cited by: %s records") % len (citinglist), citinglist, ln) out += ''' ''' % { 'more': create_html_link( self.build_search_url(p='recid:%d' % \ recID, #XXXX rm='citation', ln=ln), {}, _("more")), 'similar': similar} if CFG_BIBRANK_SHOW_CITATION_GRAPHS and selfcited is not None: sc_scorelist = [] #a score list for print.. for s in selfcited: #copy weight from citations weight = 0 for c in citinglist: (crec,score) = c if crec == s: weight = score tmp = [s,weight] sc_scorelist.append(tmp) scite = self.tmpl_print_record_list_for_similarity_boxen ( _(".. of which self-citations: %s records") % len (selfcited), sc_scorelist, ln) out += '' if CFG_BIBRANK_SHOW_CITATION_STATS and cociting is not None: similar = self.tmpl_print_record_list_for_similarity_boxen ( _("Co-cited with: %s records") % len (cociting), cociting, ln) out += ''' ''' % { 'more': create_html_link(self.build_search_url(p='cocitedwith:%d' % recID, ln=ln), {}, _("more")), 'similar': similar} if CFG_BIBRANK_SHOW_CITATION_GRAPHS and citationhistory is not None: out += '' % citationhistory out += '
%(similar)s %(more)s

'+scite+'
%(similar)s %(more)s
%s
' return out def tmpl_detailed_record_references(self, recID, ln, content): """Returns the discussion page of a record Parameters: - 'recID' *int* - The ID of the printed record - 'ln' *string* - The language to display - 'content' *string* - The main content of the page """ # load the right message language _ = gettext_set_language(ln) out = '' if content is not None: out += content return out diff --git a/modules/websearch/lib/websearch_webinterface.py b/modules/websearch/lib/websearch_webinterface.py index 28dafb469..91ec3ca95 100644 --- a/modules/websearch/lib/websearch_webinterface.py +++ b/modules/websearch/lib/websearch_webinterface.py @@ -1,645 +1,714 @@ ## $Id$ ## ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebSearch URL handler.""" __revision__ = "$Id$" import cgi +import os +import datetime from urllib import quote from mod_python import apache -from invenio.config import weburl, cdsname, cachedir, cdslang, adminemail, sweburl +from invenio.config import \ + weburl, \ + cdsname, \ + cachedir, \ + cdslang, \ + adminemail, \ + sweburl, \ + CFG_WEBSEARCH_INSTANT_BROWSE_RSS, \ + CFG_WEBSEARCH_RSS_TTL from invenio.dbquery import Error from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory from invenio.urlutils import redirect_to_url, make_canonical_urlargd, drop_default_urlargd from invenio.webuser import getUid, page_not_authorized, get_user_preferences, \ collect_user_info, http_check_credentials from invenio import search_engine from invenio.websubmit_webinterface import WebInterfaceFilesPages from invenio.webcomment_webinterface import WebInterfaceCommentsPages from invenio.webpage import page, create_error_box from invenio.messages import gettext_set_language from invenio.search_engine import get_colID, get_coll_i18nname, collection_restricted_p from invenio.access_control_engine import acc_authorize_action from invenio.access_control_config import VIEWRESTRCOLL from invenio.access_control_mailcookie import mail_cookie_create_authorize_action +from invenio.bibformat import format_records +from invenio.websearch_webcoll import mymkdir import invenio.template websearch_templates = invenio.template.load('websearch') search_results_default_urlargd = websearch_templates.search_results_default_urlargd search_interface_default_urlargd = websearch_templates.search_interface_default_urlargd output_formats = ['xm', 'xd', 'hm', 'hx', 'hd', 'hb', 'xe', 'xn'] def wash_search_urlargd(form): """ Create canonical search arguments from those passed via web form. """ argd = wash_urlargd(form, search_results_default_urlargd) # Sometimes, users pass ot=245,700 instead of # ot=245&ot=700. Normalize that. ots = [] for ot in argd['ot']: ots += ot.split(',') argd['ot'] = ots # We can either get the mode of function as # action=, or by setting action_browse or # action_search. if argd['action_browse']: argd['action'] = 'browse' elif argd['action_search']: argd['action'] = 'search' else: if argd['action'] not in ('browse', 'search'): argd['action'] = 'search' del argd['action_browse'] del argd['action_search'] return argd class WebInterfaceRecordPages(WebInterfaceDirectory): """ Handling of a /record/ URL fragment """ _exports = ['', 'files', 'reviews', 'comments', 'statistics', 'references', 'export', 'citations'] #_exports.extend(output_formats) def __init__(self, recid, tab, format=None): self.recid = recid self.tab = tab self.format = format self.export = self self.files = WebInterfaceFilesPages(self.recid) self.reviews = WebInterfaceCommentsPages(self.recid, reviews=1) self.comments = WebInterfaceCommentsPages(self.recid) self.statistics = self self.references = self self.citations = self self.export = WebInterfaceRecordExport(self.recid, self.format) return def __call__(self, req, form): argd = wash_search_urlargd(form) argd['recid'] = self.recid argd['tab'] = self.tab if self.format is not None: argd['of'] = self.format req.argd = argd uid = getUid(req) if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass # Check if the record belongs to a restricted primary # collection. If yes, redirect to the authenticated URL. record_primary_collection = search_engine.guess_primary_collection_of_a_record(self.recid) if collection_restricted_p(record_primary_collection): user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=record_primary_collection) if auth_code and user_info['email'] == 'guest': target = '/youraccount/login' + \ make_canonical_urlargd({'action': VIEWRESTRCOLL, 'ln' : argd['ln'], 'referer' : \ weburl + '/record/' + str(self.recid) + make_canonical_urlargd(argd, \ search_results_default_urlargd)}, {'ln' : cdslang}) return redirect_to_url(req, target) elif auth_code: return page_not_authorized(req, "../", \ text = auth_msg,\ navmenuid='search') #del argd['recid'] # not wanted argument for detailed record page #target = '/record-restricted/' + str(self.recid) + '/' + \ #make_canonical_urlargd(argd, search_results_default_urlargd) #return redirect_to_url(req, target) # mod_python does not like to return [] in case when of=id: out = search_engine.perform_request_search(req, **argd) if out == []: return str(out) else: return out # Return the same page wether we ask for /record/123 or /record/123/ index = __call__ class WebInterfaceRecordRestrictedPages(WebInterfaceDirectory): """ Handling of a /record-restricted/ URL fragment """ _exports = ['', 'files', 'reviews', 'comments', 'statistics', 'references', 'export', 'citations'] #_exports.extend(output_formats) def __init__(self, recid, tab, format=None): self.recid = recid self.tab = tab self.format = format self.files = WebInterfaceFilesPages(self.recid) self.reviews = WebInterfaceCommentsPages(self.recid, reviews=1) self.comments = WebInterfaceCommentsPages(self.recid) self.statistics = self self.references = self self.citations = self self.export = WebInterfaceRecordExport(self.recid, self.format) return def __call__(self, req, form): argd = wash_search_urlargd(form) argd['recid'] = self.recid if self.format is not None: argd['of'] = self.format req.argd = argd uid = getUid(req) user_info = collect_user_info(req) if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass record_primary_collection = search_engine.guess_primary_collection_of_a_record(self.recid) if collection_restricted_p(record_primary_collection): (auth_code, dummy) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=record_primary_collection) if auth_code: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') # Keep all the arguments, they might be reused in the # record page itself to derivate other queries req.argd = argd # mod_python does not like to return [] in case when of=id: out = search_engine.perform_request_search(req, **argd) if out == []: return str(out) else: return out # Return the same page wether we ask for /record/123 or /record/123/ index = __call__ class WebInterfaceSearchResultsPages(WebInterfaceDirectory): """ Handling of the /search URL and its sub-pages. """ _exports = ['', 'authenticate', 'cache', 'log'] def __call__(self, req, form): """ Perform a search. """ argd = wash_search_urlargd(form) _ = gettext_set_language(argd['ln']) if req.method == 'POST': raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED uid = getUid(req) user_info = collect_user_info(req) if uid == -1: return page_not_authorized(req, "../", text = _("You are not authorized to view this area."), navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass # If any of the collection requires authentication, redirect # to the authentication form. for coll in argd['c'] + [argd['cc']]: if collection_restricted_p(coll): (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll) if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll}) target = '/youraccount/login' + \ make_canonical_urlargd({'action' : cookie, 'ln' : argd['ln'], 'referer' : \ weburl + '/search' + make_canonical_urlargd(argd, \ search_results_default_urlargd)}, {'ln' : cdslang}) return redirect_to_url(req, target) elif auth_code: return page_not_authorized(req, "../", \ text = auth_msg,\ navmenuid='search') # Keep all the arguments, they might be reused in the # search_engine itself to derivate other queries req.argd = argd # mod_python does not like to return [] in case when of=id: out = search_engine.perform_request_search(req, **argd) if out == []: return str(out) else: return out def cache(self, req, form): """Search cache page.""" argd = wash_urlargd(form, {'action': (str, 'show')}) return search_engine.perform_request_cache(req, action=argd['action']) def log(self, req, form): """Search log page.""" argd = wash_urlargd(form, {'date': (str, '')}) return search_engine.perform_request_log(req, date=argd['date']) def authenticate(self, req, form): """Restricted search results pages.""" argd = wash_search_urlargd(form) user_info = collect_user_info(req) for coll in argd['c'] + [argd['cc']]: if collection_restricted_p(coll): (auth_code, dummy) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll) if auth_code: return page_not_authorized(req, "../", text="You are not authorized to view this collection.", navmenuid='search') # Keep all the arguments, they might be reused in the # search_engine itself to derivate other queries req.argd = argd uid = getUid(req) if uid > 0: pref = get_user_preferences(uid) try: argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass # mod_python does not like to return [] in case when of=id: out = search_engine.perform_request_search(req, **argd) if out == []: return str(out) else: return out # Parameters for the legacy URLs, of the form /?c=ALEPH legacy_collection_default_urlargd = { 'as': (int, 0), 'verbose': (int, 0), 'c': (str, cdsname)} class WebInterfaceSearchInterfacePages(WebInterfaceDirectory): """ Handling of collection navigation.""" _exports = [('index.py', 'legacy_collection'), ('', 'legacy_collection'), ('search.py', 'legacy_search'), 'search'] search = WebInterfaceSearchResultsPages() def _lookup(self, component, path): """ This handler is invoked for the dynamic URLs (for collections and records)""" if component == 'collection': c = '/'.join(path) def answer(req, form): """Accessing collections cached pages.""" # Accessing collections: this is for accessing the # cached page on top of each collection. argd = wash_urlargd(form, search_interface_default_urlargd) # We simply return the cached page of the collection argd['c'] = c if not argd['c']: # collection argument not present; display # home collection by default argd['c'] = cdsname return display_collection(req, **argd) return answer, [] elif component == 'record' or component == 'record-restricted': try: recid = int(path[0]) except IndexError: # display record #1 for URL /record without a number recid = 1 except ValueError: if path[0] == '': # display record #1 for URL /record/ without a number recid = 1 else: # display page not found for URLs like /record/foo return None, [] if recid <= 0: # display page not found for URLs like /record/-5 or /record/0 return None, [] format = None tab = '' try: if path[1] in ['', 'files', 'reviews', 'comments', 'statistics', 'references', 'citations']: tab = path[1] elif path[1] == 'export': tab = '' format = path[2] # format = None # elif path[1] in output_formats: # tab = '' # format = path[1] else: # display page not found for URLs like /record/references # for a collection where 'references' tabs is not visible return None, [] except IndexError: # Keep normal url if tabs is not specified pass if component == 'record-restricted': return WebInterfaceRecordRestrictedPages(recid, tab, format), path[1:] else: return WebInterfaceRecordPages(recid, tab, format), path[1:] return None, [] def legacy_collection(self, req, form): """Collection URL backward compatibility handling.""" accepted_args = dict(legacy_collection_default_urlargd) accepted_args.update({'referer' : (str, '%s/youraccount/your'), 'realm' : (str, '')}) argd = wash_urlargd(form, accepted_args) # Apache authentication stuff if argd['realm']: http_check_credentials(req, argd['realm']) return redirect_to_url(req, argd['referer'] or '%s/youraccount/youradminactivities' % sweburl) del argd['referer'] del argd['realm'] # If we specify no collection, then we don't need to redirect # the user, so that accessing returns the # default collection. if not form.has_key('c'): return display_collection(req, **argd) # make the collection an element of the path, and keep the # other query elements as is. If the collection is cdsname, # however, redirect to the main URL. c = argd['c'] del argd['c'] if c == cdsname: target = '/' else: target = '/collection/' + quote(c) target += make_canonical_urlargd(argd, legacy_collection_default_urlargd) return redirect_to_url(req, target) def legacy_search(self, req, form): """Search URL backward compatibility handling.""" argd = wash_search_urlargd(form) # We either jump into the generic search form, or the specific # /record/... display if a recid is requested if argd['recid'] != -1: target = '/record/%d' % argd['recid'] del argd['recid'] else: target = '/search' target += make_canonical_urlargd(argd, search_results_default_urlargd) return redirect_to_url(req, target) def display_collection(req, c, as, verbose, ln): "Display search interface page for collection c by looking in the collection cache." _ = gettext_set_language(ln) req.argd = drop_default_urlargd({'as': as, 'verbose': verbose, 'ln': ln}, search_interface_default_urlargd) # get user ID: try: uid = getUid(req) user_preferences = {} if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this collection", navmenuid='search') elif uid > 0: user_preferences = get_user_preferences(uid) except Error: return page(title=_("Internal Error"), body = create_error_box(req, verbose=verbose, ln=ln), description="%s - Internal Error" % cdsname, keywords="%s, Internal Error" % cdsname, language=ln, req=req, navmenuid='search') # start display: req.content_type = "text/html" req.send_http_header() # deduce collection id: colID = get_colID(c) if type(colID) is not int: page_body = '

' + (_("Sorry, collection %s does not seem to exist.") % ('' + str(c) + '')) + '

' page_body = '

' + (_("You may want to start browsing from %s.") % ('' + get_coll_i18nname(cdsname, ln) + '')) + '

' return page(title=_("Collection %s Not Found") % cgi.escape(c), body=page_body, description=(cdsname + ' - ' + _("Not found") + ': ' + cgi.escape(str(c))), keywords="%s" % cdsname, uid=uid, language=ln, req=req, navmenuid='search') # display collection interface page: try: filedesc = open("%s/collections/%d/navtrail-as=%d-ln=%s.html" % (cachedir, colID, as, ln), "r") c_navtrail = filedesc.read() filedesc.close() filedesc = open("%s/collections/%d/body-as=%d-ln=%s.html" % (cachedir, colID, as, ln), "r") c_body = filedesc.read() filedesc.close() filedesc = open("%s/collections/%d/portalbox-tp-ln=%s.html" % (cachedir, colID, ln), "r") c_portalbox_tp = filedesc.read() filedesc.close() filedesc = open("%s/collections/%d/portalbox-te-ln=%s.html" % (cachedir, colID, ln), "r") c_portalbox_te = filedesc.read() filedesc.close() filedesc = open("%s/collections/%d/portalbox-lt-ln=%s.html" % (cachedir, colID, ln), "r") c_portalbox_lt = filedesc.read() filedesc.close() # show help boxes (usually located in "tr", "top right") # if users have not banned them in their preferences: c_portalbox_rt = "" if user_preferences.get('websearch_helpbox', 1) > 0: filedesc = open("%s/collections/%d/portalbox-rt-ln=%s.html" % (cachedir, colID, ln), "r") c_portalbox_rt = filedesc.read() filedesc.close() filedesc = open("%s/collections/%d/last-updated-ln=%s.html" % (cachedir, colID, ln), "r") c_last_updated = filedesc.read() filedesc.close() + title = get_coll_i18nname(c, ln) + rssurl = weburl + '/rss' + if c != cdsname: + rssurl += '?cc=' + quote(c) return page(title=title, body=c_body, navtrail=c_navtrail, description="%s - %s" % (cdsname, c), keywords="%s, %s" % (cdsname, c), uid=uid, language=ln, req=req, cdspageboxlefttopadd=c_portalbox_lt, cdspageboxrighttopadd=c_portalbox_rt, titleprologue=c_portalbox_tp, titleepilogue=c_portalbox_te, lastupdated=c_last_updated, - navmenuid='search') + navmenuid='search', + rssurl=rssurl) except: if verbose >= 9: req.write("
c=%s" % c) req.write("
as=%s" % as) req.write("
ln=%s" % ln) req.write("
colID=%s" % colID) req.write("
uid=%s" % uid) return page(title=_("Internal Error"), body = create_error_box(req, ln=ln), description="%s - Internal Error" % cdsname, keywords="%s, Internal Error" % cdsname, uid=uid, language=ln, req=req, navmenuid='search') return "\n" class WebInterfaceRSSFeedServicePages(WebInterfaceDirectory): """RSS 2.0 feed service pages.""" def __call__(self, req, form): """RSS 2.0 feed service.""" - # FIXME: currently searching live, should put cache in place via webcoll - return search_engine.perform_request_search(req, of="xr") + + # Keep only interesting parameters for the search + argd = wash_urlargd(form, websearch_templates.rss_default_urlargd) + + # Create a standard filename with these parameters + args = websearch_templates.build_rss_url(argd).split('/')[-1] + + req.content_type = "application/rss+xml" + req.send_http_header() + try: + # Try to read from cache + path = "%s/rss/%s.xml" % (cachedir, args) + filedesc = open(path, "r") + # Check if cache needs refresh + last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(path)).st_mtime) + assert(datetime.datetime.now() < last_update_time + datetime.timedelta(minutes=CFG_WEBSEARCH_RSS_TTL)) + c_rss = filedesc.read() + filedesc.close() + req.write(c_rss) + return + except Exception, e: + # do it live and cache + rss_prologue = '\n' + \ + websearch_templates.tmpl_xml_rss_prologue() + '\n' + req.write(rss_prologue) + + recIDs = search_engine.perform_request_search(req, of="id", + c=argd['c'], cc=argd['cc'], + p=argd['p'], f=argd['f'], + p1=argd['p1'], f1=argd['f1'], + m1=argd['m1'], op1=argd['op1'], + p2=argd['p2'], f2=argd['f2'], + m2=argd['m2'], op2=argd['op2'], + p3=argd['p3'], f3=argd['f3'], + m3=argd['m3'])[:-(CFG_WEBSEARCH_INSTANT_BROWSE_RSS+1):-1] + rss_body = format_records(recIDs, + of='xr', + record_separator="\n", + req=req, epilogue="\n") + rss_epilogue = websearch_templates.tmpl_xml_rss_epilogue() + '\n' + req.write(rss_epilogue) + + # update cache + dirname = "%s/rss" % (cachedir) + mymkdir(dirname) + fullfilename = "%s/rss/%s.xml" % (cachedir, args) + try: + os.umask(022) + f = open(fullfilename, "w") + except IOError, v: + raise v + + f.write(rss_prologue + rss_body + rss_epilogue) + f.close() index = __call__ class WebInterfaceRecordExport(WebInterfaceDirectory): """ Handling of a /record//export/ URL fragment """ _exports = output_formats def __init__(self, recid, format=None): self.recid = recid self.format = format for output_format in output_formats: self.__dict__[output_format] = self return def __call__(self, req, form): argd = wash_search_urlargd(form) argd['recid'] = self.recid if self.format is not None: argd['of'] = self.format req.argd = argd uid = getUid(req) if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass # Check if the record belongs to a restricted primary # collection. If yes, redirect to the authenticated URL. record_primary_collection = search_engine.guess_primary_collection_of_a_record(self.recid) if collection_restricted_p(record_primary_collection): user_info = collect_user_info(req) (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=record_primary_collection) if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll}) target = '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \ weburl + '/record/' + str(self.recid) + make_canonical_urlargd(argd, \ search_results_default_urlargd)}, {'ln' : cdslang}) return redirect_to_url(req, target) elif auth_code: return page_not_authorized(req, "../", \ text = auth_msg,\ navmenuid='search') #del argd['recid'] # not wanted argument for detailed record page #target = '/record-restricted/' + str(self.recid) + '/' + \ #make_canonical_urlargd(argd, search_results_default_urlargd) #return redirect_to_url(req, target) # mod_python does not like to return [] in case when of=id: out = search_engine.perform_request_search(req, **argd) if out == []: return str(out) else: return out # Return the same page wether we ask for /record/123/export/xm or /record/123/export/xm/ index = __call__ diff --git a/modules/webstyle/lib/webpage.py b/modules/webstyle/lib/webpage.py index 7c5fcd0e2..a3f1812b6 100644 --- a/modules/webstyle/lib/webpage.py +++ b/modules/webstyle/lib/webpage.py @@ -1,241 +1,246 @@ ## $Id$ ## This file is part of CDS Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """CDS Invenio Web Page Functions""" __revision__ = "$Id$" from invenio.config import \ CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM, \ CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP, \ CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM, \ CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP, \ CFG_WEBSTYLE_CDSPAGEFOOTER, \ CFG_WEBSTYLE_CDSPAGEHEADER, \ cdslang, \ - supportemail + supportemail, \ + weburl from invenio.messages import gettext_set_language from invenio.webuser import create_userinfobox_body from invenio.errorlib import get_msgs_for_code_list, register_errors import invenio.template webstyle_templates = invenio.template.load('webstyle') from xml.dom.minidom import parseString, getDOMImplementation def create_navtrailbox_body(title, previous_links, prolog="", separator=""" > """, epilog="", language=cdslang): """Create navigation trail box body input: title = page title; previous_links = the trail content from site title until current page (both ends exlusive). output: text containing the navtrail """ return webstyle_templates.tmpl_navtrailbox_body(ln = language, title = title, previous_links = \ previous_links, separator = separator, prolog = prolog, epilog = epilog) def page(title, body, navtrail="", description="", keywords="", uid=0, cdspageheaderadd="", cdspageboxlefttopadd="", cdspageboxleftbottomadd="", cdspageboxrighttopadd="", cdspageboxrightbottomadd="", cdspagefooteradd="", lastupdated="", language=cdslang, verbose=1, titleprologue="", titleepilogue="", secure_page_p=0, req=None, errors=[], warnings=[], navmenuid="admin", - navtrail_append_title_p=1, of=""): + navtrail_append_title_p=1, of="", rssurl=weburl+"/rss"): """page(): display CDS web page input: title of the page body of the page in html format description goes to the metadata in the header of the HTML page keywords goes to the metadata in the header of the html page cdspageheaderadd is a message to be displayed just under the page header cdspageboxlefttopadd is a message to be displayed in the page body on left top cdspageboxleftbottomadd is a message to be displayed in the page body on left bottom cdspageboxrighttopadd is a message to be displayed in the page body on right top cdspageboxrightbottomadd is a message to be displayed in the page body on right bottom cdspagefooteradd is a message to be displayed on the top of the page footer lastupdated is a text containing the info on last update (optional) language is the language version of the page verbose is verbosity of the page (useful for debugging) titleprologue is to be printed right before page title titleepilogue is to be printed right after page title req is the mod_python request object errors is the list of error codes as defined in the moduleName_config.py file of the calling module log is the string of data that should be appended to the log file (errors automatically logged) secure_page_p is 0 or 1 and tells whether we are to use HTTPS friendly page elements or not navmenuid the section of the website this page belongs (search, submit, baskets, etc.) navtrail_append_title_p is 0 or 1 and tells whether page title is appended to navtrail of is an output format (use xx for xml output (e.g. AJAX)) + rssfeed is the url of the RSS feed for this page output: the final cds page with header, footer, etc. """ _ = gettext_set_language(language) if of == 'xx': #xml output (e.g. AJAX calls) => of=xx req.content_type = 'text/xml' impl = getDOMImplementation() output = impl.createDocument(None, "invenio-message", None) root = output.documentElement body_node = output.createElement('body') body_text = output.createCDATASection(unicode(body, 'utf_8')) body_node.appendChild(body_text) root.appendChild(body_node) if errors: errors_node = output.createElement('errors') errors = get_msgs_for_code_list(errors, 'error', language) register_errors(errors, 'error', req) for (error_code, error_msg) in errors: error_node = output.createElement('error') error_node.setAttribute('code', error_code) error_text = output.createTextNode(error_msg) error_node.appendChild(error_text) errors_node.appendChild(error_node) root.appendChild(errors_node) if warnings: warnings_node = output.createElement('warnings') warnings = get_msgs_for_code_list(warnings, 'warning', language) register_errors(warnings, 'warning') for (warning_code, warning_msg) in warnings: warning_node = output.createElement('warning') warning_node.setAttribute('code', warning_code) warning_text = output.createTextNode(warning_msg) warning_node.appendChild(warning_text) warnings_node.appendChild(warning_node) root.appendChild(warnings_node) return output.toprettyxml(encoding="utf-8" ) else: #usual output # if there are event if warnings: warnings = get_msgs_for_code_list(warnings, 'warning', language) register_errors(warnings, 'warning') # if there are errors if errors: errors = get_msgs_for_code_list(errors, 'error', language) register_errors(errors, 'error', req) body = create_error_box(req, errors=errors, ln=language) return webstyle_templates.tmpl_page(req, ln=language, description = description, keywords = keywords, userinfobox = create_userinfobox_body(req, uid, language), navtrailbox = create_navtrailbox_body(navtrail_append_title_p \ and title or '', navtrail, language=language), uid = uid, secure_page_p = secure_page_p, # pageheader = CFG_WEBSTYLE_CDSPAGEHEADER, pageheaderadd = cdspageheaderadd, boxlefttop = CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP, boxlefttopadd = cdspageboxlefttopadd, boxleftbottomadd = cdspageboxleftbottomadd, boxleftbottom = CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM, boxrighttop = CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP, boxrighttopadd = cdspageboxrighttopadd, boxrightbottomadd = cdspageboxrightbottomadd, boxrightbottom = CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM, titleprologue = titleprologue, title = title, titleepilogue = titleepilogue, body = body, # pagefooter = CFG_WEBSTYLE_CDSPAGEFOOTER, lastupdated = lastupdated, pagefooteradd = cdspagefooteradd, - navmenuid = navmenuid) + navmenuid = navmenuid, + rssurl = rssurl) def pageheaderonly(title, navtrail="", description="", keywords="", uid=0, cdspageheaderadd="", language=cdslang, req=None, secure_page_p=0, verbose=1, navmenuid="admin", - navtrail_append_title_p=1, metaheaderadd="", ): + navtrail_append_title_p=1, metaheaderadd="", + rssurl=weburl+"/rss"): """Return just the beginning of page(), with full headers. Suitable for the search results page and any long-taking scripts.""" return webstyle_templates.tmpl_pageheader(req, ln = language, headertitle = title, description = description, keywords = keywords, metaheaderadd = metaheaderadd, userinfobox = create_userinfobox_body(req, uid, language), navtrailbox = create_navtrailbox_body(navtrail_append_title_p \ and title or '', navtrail, language=language), uid = uid, secure_page_p = secure_page_p, # pageheader = CFG_WEBSTYLE_CDSPAGEHEADER, pageheaderadd = cdspageheaderadd, - navmenuid = navmenuid) + navmenuid = navmenuid, + rssurl = rssurl) def pagefooteronly(cdspagefooteradd="", lastupdated="", language=cdslang, req=None, verbose=1): """Return just the ending of page(), with full footer. Suitable for the search results page and any long-taking scripts.""" return webstyle_templates.tmpl_pagefooter(req, ln=language, lastupdated = lastupdated, pagefooteradd = cdspagefooteradd) def create_error_box(req, title=None, verbose=1, ln=cdslang, errors=None): """Analyse the req object and the sys traceback and return a text message box with internal information that would be suitful to display when something bad has happened. """ _ = gettext_set_language(ln) return webstyle_templates.tmpl_error_box(title = title, ln = ln, verbose = verbose, req = req, supportemail = supportemail, errors = errors) def adderrorbox(header='', datalist=[]): """used to create table around main data on a page, row based""" try: perc = str(100 // len(datalist)) + '%' except ZeroDivisionError: perc = 1 output = '' output += '' % (len(datalist), header) output += '' for row in [datalist]: output += '' for data in row: output += '' output += '' output += '
%s
' % (perc, ) output += data output += '
' return output