Index: fixes/fix_boxes.py
===================================================================
--- fixes/fix_boxes.py (revision 105)
+++ fixes/fix_boxes.py (revision 106)
@@ -1,215 +1,215 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to process words after they have been merged with faksimile data.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
from deprecated import deprecated
from functools import cmp_to_key
import getopt
import inspect
import lxml.etree as ET
import re
import shutil
import string
from svgpathtools import svg2paths2, svg_to_paths
from svgpathtools.path import Path as SVGPath
from svgpathtools.path import Line
import sys
import tempfile
from operator import attrgetter
import os
from os import listdir, sep, path, setpgrp, devnull
from os.path import exists, isfile, isdir, dirname, basename
from progress.bar import Bar
import warnings
from fix_old_data import save_page
sys.path.append('svgscripts')
from convert_wordPositions import HTMLConverter
from datatypes.box import Box
from datatypes.faksimile import FaksimilePage
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.path import Path
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.word import Word, update_transkription_position_ids
from join_faksimileAndTranskription import sort_words
from util import back_up, back_up_svg_file, copy_faksimile_svg_file
from process_files import update_svgposfile_status
from process_words_post_merging import update_faksimile_line_positions, MERGED_DIR
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from main_util import create_function_dictionary
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
MAX_SVG_XY_THRESHOLD = 10
BOX_ERROR_STATUS = 'box error'
DEBUG_MSG = 'TODO: should have a box'
class WordWithBoxes(Word):
@classmethod
def create_cls(cls, word_node):
"""Creates a word from a (lxml.Element) node.
[:return:] WordWithBoxes
"""
word = super(WordWithBoxes,cls).create_cls(word_node)
word.missing_boxes = []
for index, debug_node in enumerate(word_node.xpath('./debug')):
missing_text = debug_node.get('text')
is_earlier_version = bool(debug_node.get('earlier-version'))\
and debug_node.get('earlier-version') == 'true'
overwritten_by = debug_node.get('overwritten-by')
if overwritten_by is not None:
split_into_parts_and_attach_box(word, index, missing_text, is_earlier_version, overwritten_by)
else:
attach_box(word, 0, missing_text, False)
word.create_correction_history()
if len(word.corrections) > 0:
for wp in word.word_parts:
wp.overwrites_word = None
return word
def attach_box(target_word, box_index, earlier_text, is_earlier_version):
"""Attach box to word.
"""
transkription_position = target_word.transkription_positions[0]
if len(target_word.transkription_positions) > 1:
positional_word_parts = []
for tp in target_word.transkription_positions:
positional_word_parts += tp.positional_word_parts
transkription_position = TranskriptionPosition(positional_word_parts=positional_word_parts)
target_word.word_box = Box(id=box_index, path=Path.create_path_from_transkription_position(transkription_position).path,\
earlier_text=earlier_text, earlier_version=is_earlier_version)
def split_into_parts_and_attach_box(target_word, box_index, missing_text, is_earlier_version, overwritten_by, child_process=False)->list:
"""Split word into word parts and attach a box to the part with text == overwritten_by.
"""
if len(target_word.word_parts) > 0:
index = 0
if True in [ wp.word_box is not None for wp in target_word.word_parts ]:
latest_word_with_box = [ wp for wp in target_word.word_parts if wp.word_box is not None ][-1]
index = target_word.word_parts.index(latest_word_with_box)+1
child_word_parts = []
for wp in target_word.word_parts[index:]:
word_parts = split_into_parts_and_attach_box(wp, box_index, missing_text, is_earlier_version, overwritten_by, child_process=True)
if child_process:
child_word_parts += word_parts
elif len(word_parts) > 0:
old_index = target_word.word_parts.index(wp)
target_word.word_parts[old_index] = word_parts[0]
for new_wp in word_parts[1:]:
target_word.word_parts.insert(old_index+1, new_wp)
if overwritten_by in [ new_wp.text for new_wp in word_parts ]:
break
if child_process:
return child_word_parts
return target_word.word_parts
elif overwritten_by in target_word.text:
new_words_triple = target_word.split(overwritten_by)
word_with_box = [ wp for wp in new_words_triple if wp is not None and wp.text == overwritten_by ][0]
attach_box(word_with_box, box_index, missing_text, is_earlier_version)
if not child_process:
if len(new_words_triple) > 1:
target_word.word_parts = [ i for i in new_words_triple if i is not None ]
target_word.transkription_positions = []
else:
target_word.word_box = word_with_box.word_box
return [ i for i in new_words_triple if i is not None ]
return []
def fix_boxes(page)->int:
"""Fix boxes and return exit code
"""
exit_status = 0
for word_node in set([ node.getparent() for node in page.page_tree.xpath('//' + Word.XML_TAG + f'/debug[@msg="{DEBUG_MSG}"]')]):
word = WordWithBoxes.create_cls(word_node)
try:
replace_word = [ w for w in page.words if w.id == word.id and w.text == word.text ][0]
page.words[page.words.index(replace_word)] = word
except IndexError:
return 2
if not UNITTESTING:
save_page(page, attach_first=True)
return exit_status
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to fix boxes.
svgscripts/fix_boxes.py [OPTIONS]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
if len(args) < 1:
usage()
return 2
exit_status = 0
xml_file = args[0]
if isfile(xml_file):
counter = 0
for page in Page.get_pages_from_xml_file(xml_file, status_contains=BOX_ERROR_STATUS):
counter = 0
if not UNITTESTING:
print(Fore.CYAN + f'Fixing boxes of {page.title}, {page.number} ...' + Style.RESET_ALL)
back_up(page, page.xml_file)
if fix_boxes(page) == 0:
counter += 1
if not UNITTESTING:
print(Style.RESET_ALL + f'[{counter} pages changed]')
else:
raise FileNotFoundError('File {} does not exist!'.format(xml_file))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: fixes/server.py
===================================================================
--- fixes/server.py (revision 105)
+++ fixes/server.py (revision 106)
@@ -1,172 +1,175 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to send xml data as json over http.
"""
# Copyright (C) University of Basel 2020 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import getopt
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
import http.client
import simplejson as json
from os.path import exists, isfile, isdir, dirname, basename
import cgi
import sys
from interactive_editor import ResponseOrganizer
sys.path.append('svgscripts')
from convert_wordPositions import JSONConverter
from datatypes.page import Page
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
class Server(BaseHTTPRequestHandler):
CONTENT_TYPE = 'Content-Type'
CONTENT_LENGTH = 'Content-Length'
CONFIG_FILE = '.local_variables'
XML = 'xml'
SVG = 'svg'
MANUSCRIPT = 'manuscript'
@classmethod
def get_local_file_dictionary(cls) ->dict:
"""Return a dictionary about local files with keys: XML, SVG, MANUSCRIPT.
"""
local_file_dictionary = {}
if isfile(cls.CONFIG_FILE):
with open(cls.CONFIG_FILE, 'r') as reader:
for raw_line in reader.readlines():
line = raw_line.replace('\n', '')
line_content = line.split('=')
if len(line_content) == 2\
and isfile(line_content[1]):
local_file_dictionary.update({line_content[0]: line_content[1]})
return local_file_dictionary
def _set_headers(self, response_code):
self.send_response(response_code)
self.send_header('Content-type', 'application/json')
#self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header("Cache-Control", "no-cache")
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
def do_HEAD(self):
self._set_headers(200)
def do_OPTIONS(self):
"""Process OPTIONS.
"""
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
def do_GET(self):
"""Process GET.
"""
self._set_headers(200)
local_file_dictionary = Server.get_local_file_dictionary()
if self.XML in local_file_dictionary.keys():
response_organizer = ResponseOrganizer(local_file_dictionary.get(self.MANUSCRIPT))
json_dict = response_organizer.create_json_dict(local_file_dictionary[self.XML])
- self.wfile.write(str.encode(json.dumps(json_dict)))
+ try:
+ self.wfile.write(str.encode(json.dumps(json_dict)))
+ except Exception:
+ print(json_dict)
def _parse_header(self, key) ->str:
"""Return content of header for key.
"""
headers = [ header for header in self.headers._headers if key in header ]
if len(headers) > 0:
return headers[0][1]
return ''
def do_POST(self):
"""Process POST.
"""
ctype = self._parse_header(self.CONTENT_TYPE)
if ctype != 'application/json':
length = int(self._parse_header(self.CONTENT_LENGTH))
self._send_error()
return
# read the message and convert it into a python dictionary
length = int(self._parse_header(self.CONTENT_LENGTH))
response = json.loads(self.rfile.read(length))
local_file_dictionary = Server.get_local_file_dictionary()
response_organizer = ResponseOrganizer(local_file_dictionary.get(self.MANUSCRIPT))
json_dict = response_organizer.handle_response(response)
self._set_headers(200)
self.wfile.write(str.encode(json.dumps(json_dict)))
def _send_error(self):
"""Send error msg.
"""
self._set_headers(400)
self.end_headers()
def run(port=8008):
server_address = ('', port)
httpd = HTTPServer(server_address, Server)
print(f'Starting httpd on port {port}...')
httpd.serve_forever()
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to send xml data as json over http.
fixes/server.py OPTIONS
OPTIONS:
-h|--help: show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help') or not args:
usage()
return 0
run()
return exit_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: fixes/fix_old_data.py
===================================================================
--- fixes/fix_old_data.py (revision 105)
+++ fixes/fix_old_data.py (revision 106)
@@ -1,540 +1,540 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to fix old data.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
from deprecated import deprecated
from functools import cmp_to_key
import getopt
import inspect
import lxml.etree as ET
import re
import shutil
import string
from svgpathtools import svg2paths2, svg_to_paths
from svgpathtools.path import Path as SVGPath
from svgpathtools.path import Line
import sys
import tempfile
from operator import attrgetter
import os
from os import listdir, sep, path, setpgrp, devnull
from os.path import exists, isfile, isdir, dirname, basename
from progress.bar import Bar
import warnings
sys.path.append('svgscripts')
from convert_wordPositions import HTMLConverter
from datatypes.box import Box
from datatypes.faksimile import FaksimilePage
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.matrix import Matrix
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from datatypes.positional_word_part import PositionalWordPart
from datatypes.path import Path
from datatypes.word import Word
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.word import Word, update_transkription_position_ids
from join_faksimileAndTranskription import sort_words
from util import back_up, back_up_svg_file, copy_faksimile_svg_file, reset_tp_with_matrix
from process_files import update_svgposfile_status
from process_words_post_merging import update_faksimile_line_positions, MERGED_DIR
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from main_util import create_function_dictionary, get_manuscript_files
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
MAX_SVG_XY_THRESHOLD = 10
#TODO: fix all svg graphical files: change xlink:href to href!!!!
def convert_old_matrix(tp, xmin, ymin) ->(Matrix, float, float):
"""Return new matrix, x and y for old transkription_position.
"""
matrix = tp.transform.clone_transformation_matrix()
matrix.matrix[Matrix.XINDEX] = round(tp.transform.matrix[Matrix.XINDEX] + xmin, 3)
matrix.matrix[Matrix.YINDEX] = round(tp.transform.matrix[Matrix.YINDEX] + ymin, 3)
x = round(tp.left - tp.transform.matrix[Matrix.XINDEX], 3)\
if tp.left > 0\
else 0
y = round((tp.height-1.5)*-1, 3)
return matrix, x, y
def save_page(page, attach_first=False, backup=False, script_name=None):
"""Write page to xml file
"""
if backup:
back_up(page, page.xml_file)
if attach_first:
page.update_and_attach_words2tree()
if script_name is None:
script_name = f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}'
write_pretty(xml_element_tree=page.page_tree, file_name=page.page_tree.docinfo.URL,\
script_name=script_name, file_type=FILE_TYPE_SVG_WORD_POSITION)
def page_already_changed(page) -> bool:
"""Return whether page has alreadybeen changed by function
"""
return len(\
page.page_tree.xpath(f'//metadata/modifiedBy[@script="{__file__}:{inspect.currentframe().f_back.f_code.co_name}"]')\
) > 0
def fix_faksimile_line_position(page, redo=False) -> bool:
"""Create a faksimile line position.
"""
if not redo and page_already_changed(page):
return False;
update_faksimile_line_positions(page)
if not UNITTESTING:
save_page(page)
return True
def check_faksimile_positions(page, redo=False) -> bool:
"""Check faksimile line position.
"""
if len(page.page_tree.xpath('//data-source/@file')) > 0:
svg_file = page.page_tree.xpath('//data-source/@file')[0]
svg_tree = ET.parse(svg_file)
positions_are_equal_counter = 0
page_changed = False
for faksimile_page in FaksimilePage.GET_FAKSIMILEPAGES(svg_tree):
if page.title == faksimile_page.title\
and page.number == faksimile_page.page_number:
#print([fp.id for fp in faksimile_page.word_positions ])
for word in page.words:
for fp in word.faksimile_positions:
rect_fps = [ rfp for rfp in faksimile_page.word_positions if rfp.id == fp.id ]
if len(rect_fps) > 0:
rfp = rect_fps[0]
if fp.left != rfp.left or fp.top != rfp.top:
#print(f'{fp.id}: {fp.left}/{rfp.left} {fp.top}/{rfp.top}')
fp.left = rfp.left
fp.top = rfp.top
fp.bottom = fp.top + rfp.height
word.attach_word_to_tree(page.page_tree)
page_changed = True
else:
positions_are_equal_counter += 1
print(f'{positions_are_equal_counter}/{len(page.words)} are equal')
if page_changed and not UNITTESTING:
save_page(page)
return page_changed
def fix_faksimile_positions(page, redo=False) -> bool:
"""Set faksimile positions to absolute values.
[:return:] fixed
"""
if not redo and len(page.page_tree.xpath(f'//metadata/modifiedBy[@script="{__file__}"]')) > 0:
return False
x_min = page.text_field.xmin
y_min = page.text_field.ymin
for word in page.words:
for fp in word.faksimile_positions:
fp.left = fp.left + x_min
fp.top = fp.top + y_min
fp.bottom = fp.bottom + y_min
word.attach_word_to_tree(page.page_tree)
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
write_pretty(xml_element_tree=page.page_tree, file_name=page.page_tree.docinfo.URL,\
script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
return True
def _fix_tp_of_word(page, word, text_field):
"""Fix transkription positions ->set relative to 0,0 instead of text_field.left,text_field.top
"""
for tp in word.transkription_positions:
tp.left += text_field.left
tp.top += text_field.top
reset_tp_with_matrix(word.transkription_positions)
if type(word) == Word:
words_in_word = word.word_parts + [ item for item in word.__dict__.items() if type(item) == Word ]
for wp in words_in_word:
_fix_tp_of_word(page, wp, text_field)
def fix_tp_with_matrix(page, redo=False) -> bool:
"""Fix transkription positions with rotation matrix ->set left to 0 and top to -5.
[:return:] fixed
"""
xmin = 0 if page.svg_image is None or page.svg_image.text_field is None else page.svg_image.text_field.left
ymin = 0 if page.svg_image is None or page.svg_image.text_field is None else page.svg_image.text_field.top
for word in page.words:
reset_tp_with_matrix(word.transkription_positions, tr_xmin=xmin, tr_ymin=ymin)
for wp in word.word_parts:
reset_tp_with_matrix(wp.transkription_positions, tr_xmin=xmin, tr_ymin=ymin)
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, attach_first=True)
return True
def _fix_old_transkription_positions(page, redo=False) -> bool:
"""Fix transkription positions ->set relative to 0,0 instead of text_field.left,text_field.top
[:return:] fixed
"""
if page.svg_image is not None\
and page.svg_image.text_field is None:
if page.svg_image is None:
if page.svg_file is not None:
transkription_field = TranskriptionField(page.svg_file)
width = round(tf.documentWidth, 3)
height = round(tf.documentHeight, 3)
page.svg_image = SVGImage(file_name=svg_file, width=width,\
height=height, text_field=transkription_field.convert_to_text_field())
page.svg_image.attach_object_to_tree(page.page_tree)
else:
raise Exception(f'ERROR page {page.page_tree.docinfo.URL} does not have a svg_file!')
elif page.svg_image.text_field is None:
page.svg_image.text_field = TranskriptionField(page.svg_image.file_name).convert_to_text_field()
page.svg_image.attach_object_to_tree(page.page_tree)
for line_number in page.line_numbers:
line_number.top += page.svg_image.text_field.top
line_number.bottom += page.svg_image.text_field.top
line_number.attach_object_to_tree(page.page_tree)
for word in page.words:
_fix_tp_of_word(page, word, page.svg_image.text_field)
for mark in page.mark_foreign_hands:
_fix_tp_of_word(page, mark, page.svg_image.text_field)
for tcm in page.text_connection_marks:
_fix_tp_of_word(page, tcm, page.svg_image.text_field)
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, attach_first=True)
return True
return False
def _fix_old_pwps(page, old_tps):
"""Adjust positional_word_parts to corrected transkription_positions.
"""
for tp in old_tps:
for pwp in tp.xpath(f'./{PositionalWordPart.XML_TAG}'):
left = float(pwp.get('left'))
top = float(pwp.get('top'))
bottom = float(pwp.get('bottom'))
pwp.set('left', str(left + page.svg_image.text_field.left))
pwp.set('top', str(top + page.svg_image.text_field.top))
pwp.set('bottom', str(bottom + page.svg_image.text_field.top))
def _fix_quotation_mark_tps(page, old_tps):
"""Fix the height of transkription_positions of words with quotation marks.
"""
for tp in old_tps:
heighest_pwp = sorted(tp.xpath(f'./{PositionalWordPart.XML_TAG}'), key=lambda pwp: float(pwp.get('height')), reverse=True)[0]
toppest_pwp = sorted(tp.xpath(f'./{PositionalWordPart.XML_TAG}'), key=lambda pwp: float(pwp.get('top')))[0]
new_height = float(tp.get('height')) + abs(float(heighest_pwp.get('top'))-float(toppest_pwp.get('top')))
tp.set('height', str(new_height))
def fix_transkription_positions(page, redo=False) -> bool:
"""Fix transkription positions ->set relative to 0,0 instead of text_field.left,text_field.top
[:return:] fixed
"""
THRESHOLD = 10
if page.svg_image is not None\
and page.svg_image.text_field is None:
if not _fix_old_transkription_positions(page):
return False
_fix_old_pwps(page, [ pwp.getparent() for pwp in page.page_tree.xpath(f'//{PositionalWordPart.XML_TAG}[@id="0"]')\
if abs(float(pwp.get('left')) - float(pwp.getparent().get('left'))) > THRESHOLD ])
_fix_quotation_mark_tps(page, [ tp for tp in page.page_tree.xpath(f'//{TranskriptionPosition.XML_TAG}')\
if len(tp.xpath(f'./{PositionalWordPart.XML_TAG}')) > 0\
and sorted(tp.xpath(f'./{PositionalWordPart.XML_TAG}'), key=lambda pwp: float(pwp.get('height')), reverse=True)[0]\
!= sorted(tp.xpath(f'./{PositionalWordPart.XML_TAG}'), key=lambda pwp: float(pwp.get('top')))[0] ])
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page)
return True
def fix_styles(page, redo=False):
"""Remove unused styles from tree.
"""
if len(page.page_tree.xpath('//style')) > 1:
for node in page.page_tree.xpath('//style')[1:]: node.getparent().remove(node)
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page)
return True
def merge_transkription_positions(page, redo=False) -> bool:
"""Fix transkription positions of merged words
[:return:] fixed
"""
if not isdir(dirname(page.page_tree.docinfo.URL) + sep + MERGED_DIR)\
or not isfile(dirname(page.page_tree.docinfo.URL) + sep + MERGED_DIR + sep + basename(page.page_tree.docinfo.URL)):
return False
merged_page = Page(dirname(page.page_tree.docinfo.URL) + sep + MERGED_DIR + sep + basename(page.page_tree.docinfo.URL))
sync_dictionary = sync_words_linewise(merged_page.words, page.words, merged_page.line_numbers)
words = []
for source_word in merged_page.words:
words.append(source_word)
if bool(sync_dictionary.get(source_word)):
_sync_transkriptions_with_words(source_word, sync_dictionary)
if source_word.text != ''.join([ t.get_text() for t in source_word.transkription_positions ]):
text = ''.join([ t.get_text() for t in source_word.transkription_positions ])
print(f'{source_word.line_number}: {source_word.text} has transkription_positions with text "{text}".')
response = input('Change? [Y/n]>')
if not response.startswith('n'):
new_sync_dictionary = sync_words_linewise(merged_page.words, page.words,\
[ line for line in merged_page.line_numbers if line.id == source_word.line_number ], force_sync_on_word=source_word)
if bool(new_sync_dictionary.get(source_word)):
_sync_transkriptions_with_words(source_word, new_sync_dictionary)
else:
raise Exception(f'Could not find sourc_word {source_word.text} in {new_sync_dictionary}!')
page.words = words
page.update_and_attach_words2tree()
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page)
return True
def fix_graphical_svg_file(page, redo=False) -> bool:
"""Fix glyphs of word for which there is a /changed-word in page.page_tree
"""
svg_tree = ET.parse(page.svg_file)
transkription_field = TranskriptionField(page.source)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
back_up_svg_file(svg_tree, namespaces=namespaces)
tr_xmin = transkription_field.xmin if (page.svg_image is None or page.svg_image.text_field is None) else 0
tr_ymin = transkription_field.ymin if (page.svg_image is None or page.svg_image.text_field is None) else 0
for deleted_word_node in page.page_tree.xpath('//deleted-word'):
deleted_word = Word.create_cls(deleted_word_node)
_run_function_on_nodes_for_word(svg_tree, namespaces, deleted_word, tr_xmin, tr_ymin, _set_node_attribute_to, 'visibility', 'hidden')
for changed_word_node in page.page_tree.xpath('//changed-word'):
changed_word = Word.create_cls(changed_word_node)
try:
word = [ word for word in page.words if word.id == changed_word.id and word.text == changed_word.text ][0]
left_difference = word.transkription_positions[0].left - changed_word.transkription_positions[0].left
_run_function_on_nodes_for_word(svg_tree, namespaces, word, tr_xmin, tr_ymin, _add_value2attribute, 'x', left_difference)
except IndexError:
warnings.warn(f'There is no word for changed_word {changed_word.id}: "{changed_word.text}" in {page.page_tree.docinfo.URL}!')
copy_faksimile_svg_file(target_file=page.svg_file, faksimile_tree=svg_tree, namespaces=namespaces)
def _add_value2attribute(node, attribute, value):
"""Add left_difference to x of node.
"""
node.set(attribute, str(float(node.get(attribute)) + value))
node.set('changed', 'true')
def _get_nodes_with_symbol_id(svg_tree, namespaces, symbol_id, svg_x, svg_y, threshold=0.1) -> list:
"""Return nodes with symbol_id n x = svg_x and y = svg_y.
"""
nodes = [ node for node in svg_tree.xpath(\
f'//ns:use[@xlink:href="#{symbol_id}" and @x > {svg_x-threshold} and @x < {svg_x+threshold} and @y > {svg_y-threshold} and @y < {svg_y+threshold} ]',\
namespaces=namespaces) if not bool(node.get('changed')) ]
if len(nodes) == 0 and threshold < MAX_SVG_XY_THRESHOLD:
return _get_nodes_with_symbol_id(svg_tree, namespaces, symbol_id, svg_x, svg_y, threshold=threshold+1)
return nodes
def _run_function_on_nodes_for_word(svg_tree, namespaces, word, tr_xmin, tr_ymin, function_on_node, attribute, value):
"""Run function on nodes for words.
"""
for tp in word.transkription_positions:
for pwp in tp.positional_word_parts:
symbol_id = pwp.symbol_id
svg_x = pwp.left + tr_xmin
svg_y = pwp.bottom + tr_ymin
nodes = _get_nodes_with_symbol_id(svg_tree, namespaces, symbol_id, svg_x, svg_y)
if len(nodes) > 0:
node = nodes[0]
function_on_node(node, attribute, value)
def _set_node_attribute_to(node, attribute, value):
"""Set attribute of node to value.
"""
node.set(attribute, str(value))
node.set('changed', 'true')
def sync_words_linewise(source_words, target_words, lines, force_sync_on_word=None) -> dict:
"""Sync words an create a dictionary with source_words as keys, refering to a list of corresponding words.
"""
result_dict = {}
for word in target_words + source_words: word.processed = False
for line in lines:
source_words_on_line = sorted([ word for word in source_words if word.line_number == line.id ], key=lambda word: word.transkription_positions[0].left)
target_words_on_line = sorted([ word for word in target_words if word.line_number == line.id ], key=lambda word: word.transkription_positions[0].left)
if len(target_words_on_line) == len(source_words_on_line):
_sync_same_length(result_dict, source_words_on_line, target_words_on_line, force_sync_on_word=force_sync_on_word)
elif len(source_words_on_line) < len(target_words_on_line):
_sync_more_target_words(result_dict, source_words_on_line, target_words_on_line, force_sync_on_word=force_sync_on_word)
else:
print('okey dokey')
return result_dict
def _force_sync_on_word(force_sync_on_word, target_words_on_line, result_dict):
"""Force sync on word.
"""
unprocessed_target_words = [t_word for t_word in target_words_on_line if not t_word.processed]
if len(unprocessed_target_words) > 0:
print([ (i, t_word.text) for i, t_word in enumerate(unprocessed_target_words)])
response = input(f'Please specify indices of words to sync {force_sync_on_word.text} with: [default:0-{len(unprocessed_target_words)-1}]>')
indices = [ i for i in range(0, len(unprocessed_target_words)) ]
if re.match(r'\d+-\d+', response):
index_strings = response.split('-')
indices = [ i for i in range(int(index_strings[0]), int(index_strings[1])+1) ]
elif response != '':
indices = [ int(i) for i in response.split(' ') ]
target_words = []
for i in indices: target_words.append(unprocessed_target_words[i])
result_dict.update({ force_sync_on_word: target_words })
else:
raise Exception(f'There are no unprocessed target_words for {force_sync_on_word.text} on line {force_sync_on_word.line_number}!')
def _sync_transkriptions_with_words(word, sync_dictionary):
"""Sync transkription_positions of word with syncronized words.
"""
word.transkription_positions = []
for target_word in sync_dictionary[word]:
word.transkription_positions += target_word.transkription_positions
def _sync_more_target_words(result_dict, source_words_on_line, target_words_on_line, force_sync_on_word=None):
"""Sync if there are more target words.
"""
current_source_word = None
for target_word in target_words_on_line:
if current_source_word is not None\
and current_source_word.text.startswith(''.join([ w.text for w in result_dict[current_source_word]]) + target_word.text):
result_dict[current_source_word].append(target_word)
target_word.processed = True
if current_source_word.text == ''.join([ w.text for w in result_dict[current_source_word]]):
current_source_word = None
elif len([ s_word for s_word in source_words_on_line if not s_word.processed and s_word.text == target_word.text ]) > 0:
source_word = [ s_word for s_word in source_words_on_line if not s_word.processed and s_word.text == target_word.text ][0]
target_word.processed = True
source_word.processed = True
result_dict.update({ source_word: [ target_word ] })
elif len([ s_word for s_word in source_words_on_line if not s_word.processed and s_word.text.startswith(target_word.text) ]) > 0:
current_source_word = [ s_word for s_word in source_words_on_line if not s_word.processed and s_word.text.startswith(target_word.text) ][0]
current_source_word.processed = True
target_word.processed = True
result_dict.update({ current_source_word: [ target_word ] })
else:
msg = f'On line {target_word.line_number}: target_word "{target_word.text}" does not have a sibling in {[ s.text for s in source_words_on_line if not s.processed ]}'
warnings.warn(msg)
if force_sync_on_word is not None:
_force_sync_on_word(force_sync_on_word, target_words_on_line, result_dict)
def _sync_same_length(result_dict, source_words_on_line, target_words_on_line, force_sync_on_word=None):
"""Sync same length
"""
for i, word in enumerate(source_words_on_line):
if word.text == target_words_on_line[i].text:
word.processed = True
target_words_on_line[i].processed = True
result_dict.update({ word: [ target_words_on_line[i] ] })
elif len([ t_word for t_word in target_words_on_line if not t_word.processed and t_word.text == word.text ]) > 0:
target_word = [ t_word for t_word in target_words_on_line if not t_word.processed and t_word.text == word.text ][0]
word.processed = True
target_word.processed = True
result_dict.update({ word: [ target_word ] })
else:
msg = f'On line {word.line_number}: source_word "{word.text}" does not have a sibling in {[ s.text for s in target_words_on_line]}'
warnings.warn(msg)
if force_sync_on_word is not None:
_force_sync_on_word(force_sync_on_word, target_words_on_line, result_dict)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to fix old data.
svgscripts/fix_old_data.py [OPTIONS]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help show help
-c|--check-faksimile-positions check whether faksimile positions have been updated
-l|--faksimile-line-position create faksimile line positions
-p|--faksimile-positions fix old faksimile positions
-r|--redo rerun
-s|--fix-graphical-svg fix use position of glyphs for words changed by 'changed-word' and 'deleted-word' in xml file.
-S|--fix-styles fix use position of glyphs for words changed by 'changed-word' and 'deleted-word' in xml file.
-t|--transkription-positions fix old transkription positions
-M|--matrix fix old transkription positions with transform matrix
:return: exit code (int)
"""
function_list = []
function_dict = create_function_dictionary(['-c', '--check-faksimile-positions'], check_faksimile_positions)
function_dict = create_function_dictionary(['-l', '--faksimile-line-position'], fix_faksimile_line_position, function_dictionary=function_dict)
function_dict = create_function_dictionary(['-p', '--faksimile-positions'], fix_faksimile_positions, function_dictionary=function_dict)
function_dict = create_function_dictionary(['-m', '--merge-positions'], merge_transkription_positions, function_dictionary=function_dict)
function_dict = create_function_dictionary(['-s', '--fix-graphical-svg'], fix_graphical_svg_file, function_dictionary=function_dict)
function_dict = create_function_dictionary(['-M', '--matrix'], fix_tp_with_matrix, function_dictionary=function_dict)
function_dict = create_function_dictionary(['-t', '--transkription-positions'], fix_transkription_positions, function_dictionary=function_dict)
function_dict = create_function_dictionary(['default', '-S', '--fix-styles'], fix_styles, function_dictionary=function_dict)
redo = False;
try:
opts, args = getopt.getopt(argv, "hcplrmsStM", ["help", "check-faksimile-positions", "faksimile-positions", "faksimile-line-position",\
"redo", "merge-positions", "fix-graphical-svg", "fix-styles", "transkription-positions", 'matrix' ])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-r', '--redo'):
redo = True;
elif opt in function_dict.keys():
function_list.append(function_dict[opt])
if len(function_list) == 0:
function_list.append(function_dict['default'])
if len(args) < 1:
usage()
return 2
exit_status = 0
for xml_file in get_manuscript_files(args):
if isfile(xml_file):
counters = { f.__name__: 0 for f in function_list }
for current_function in function_list:
status_contains = STATUS_MERGED_OK if 'faksimile' in current_function.__name__ else 'OK'
for page in Page.get_pages_from_xml_file(xml_file, status_contains=status_contains):
if not UNITTESTING:
print(Fore.CYAN + f'Processing {page.title}, {page.number} with function {current_function.__name__} ...' + Style.RESET_ALL)
back_up(page, page.xml_file)
counters[current_function.__name__] += 1 if current_function(page, redo=redo) else 0
if not UNITTESTING:
for function_name, counter in counters.items():
print(Style.RESET_ALL + f'[{counter} pages changed by {function_name}]')
else:
raise FileNotFoundError('File {} does not exist!'.format(xml_file))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: fixes/test_checker_handler.py
===================================================================
--- fixes/test_checker_handler.py (revision 0)
+++ fixes/test_checker_handler.py (revision 106)
@@ -0,0 +1,29 @@
+import lxml.etree as ET
+from os import sep, path, remove
+from os.path import isdir, isfile, dirname, basename
+import shutil
+import sys
+import tempfile
+import unittest
+import warnings
+
+from checker_handler import CheckerHandler
+
+sys.path.append('svgscripts')
+from datatypes.page import Page
+
+
+class TestCheckerHandler(unittest.TestCase):
+ def setUp(self):
+ DATADIR = path.dirname(__file__) + sep + 'test_data'
+ self.fix_boxes = DATADIR + sep + 'Mp_XIV_page416.xml'
+
+ def test_check_get_set(self):
+ page = Page(self.fix_boxes)
+ checker = CheckerHandler(page)
+ checker.set_task_done(CheckerHandler.CHECKS[0])
+ self.assertTrue(len(checker.get_todos()) < len(CheckerHandler.CHECKS))
+ print(ET.dump(page.page_tree.xpath(f'//metadata/{CheckerHandler.XML_TAG}')[0]))
+
+if __name__ == "__main__":
+ unittest.main()
Index: fixes/checker_handler.py
===================================================================
--- fixes/checker_handler.py (revision 0)
+++ fixes/checker_handler.py (revision 106)
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+""" This program can be used to handle the manual check status of xml files.
+"""
+# Copyright (C) University of Basel 2021 {{{1
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see 1}}}
+
+from datetime import datetime
+import lxml.etree as ET
+import sys
+
+
+sys.path.append('svgscripts')
+from datatypes.page import Page
+
+__author__ = "Christian Steiner"
+__maintainer__ = __author__
+__copyright__ = 'University of Basel'
+__email__ = "christian.steiner@unibas.ch"
+__status__ = "Development"
+__license__ = "GPL v3"
+__version__ = "0.0.1"
+
+UNITTESTING = False
+
+class CheckerHandler:
+ """This class can be used to handle the manual check status of xml files.
+ """
+ XML_TAG = 'manual-checks'
+ CHECKS = [ 'transkription positions', 'hyphenation', 'boxes/correction history',\
+ 'mark foreign hands', 'line assignement', 'deletion paths', 'faksimile/transkription word correspondance' ]
+
+
+ def __init__(self, page: Page):
+ self.page = page
+
+ def get_todos(self) ->list:
+ """Return todos as a list
+ """
+ todos = self.page.page_tree.xpath(f'//metadata/{self.XML_TAG}/todo/text()')
+ if len(todos) > 0\
+ or len(self.page.page_tree.xpath(f'//metadata/{self.XML_TAG}/done/text()')) == len(self.CHECKS):
+ return todos
+ return self.CHECKS
+
+ def set_task_done(self, task: str):
+ """Set task as done.
+ """
+ date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ checks = self.page.page_tree.xpath(f'//metadata/{self.XML_TAG}')[0]\
+ if len(self.page.page_tree.xpath(f'//metadata/{self.XML_TAG}')) > 0\
+ else ET.SubElement(self.page.page_tree.xpath('//metadata')[0], self.XML_TAG)
+ if len(checks.xpath('./todo')) == 0:
+ for todo in self.CHECKS:
+ if todo != task:
+ ET.SubElement(checks, 'todo').text = todo
+ else:
+ ET.SubElement(checks, 'done', attrib={'date': date}).text = task
+ elif len(checks.xpath(f'./todo[text()="{task}"]')) > 0:
+ done = checks.xpath(f'./todo[text()="{task}"]')[0]
+ done.tag = 'done'
+ done.set('date', date)
+ else:
+ ET.SubElement(checks, 'done', attrib={'date': date}).text = task
+
+
Index: fixes/interactive_editor.py
===================================================================
--- fixes/interactive_editor.py (revision 105)
+++ fixes/interactive_editor.py (revision 106)
@@ -1,944 +1,967 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to process words after they have been merged with faksimile data.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
from datetime import datetime
from deprecated import deprecated
from functools import cmp_to_key
import getopt
import inspect
import lxml.etree as ET
import re
import shutil
import string
from svgpathtools import svg2paths2, svg_to_paths
from svgpathtools.path import Path as SVGPath
from svgpathtools.path import Line
from svgpathtools.parser import parse_path
import sys
import tempfile
from operator import attrgetter
import os
from os import listdir, sep, path, setpgrp, devnull
from os.path import exists, isfile, isdir, dirname, basename
from progress.bar import Bar
import warnings
-
+from checker_handler import CheckerHandler
from fix_old_data import save_page
from fix_boxes import attach_box, split_into_parts_and_attach_box
sys.path.append('svgscripts')
from convert_wordPositions import HTMLConverter, JSONConverter
from datatypes.box import Box
from datatypes.faksimile import FaksimilePage
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.path import Path
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.word import Word, update_transkription_position_ids
from datatypes.word_deletion_path import WordDeletionPath
from join_faksimileAndTranskription import sort_words
from util import back_up, back_up_svg_file, copy_faksimile_svg_file
from process_files import update_svgposfile_status
from process_words_post_merging import update_faksimile_line_positions, MERGED_DIR
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from main_util import create_function_dictionary
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
MAX_SVG_XY_THRESHOLD = 10
class ResponseHandler:
def __init__(self, response_starts_with=None, dialog_string=None, action_name=None, description=None):
self.action_name = action_name
self.dialog_string = dialog_string
self.description = description
self.response_starts_with = response_starts_with
def create_requirement_list(self) ->list:
"""Create a requirement dictionary.
"""
return []
def create_json_dict(self)->dict:
"""Create a json dictionary.
"""
json_dict = { 'action_name': self.action_name, 'description': self.description }
requirements = self.create_requirement_list()
if len(requirements) > 0:
json_dict.update({ 'requirements': requirements })
return json_dict
def get_transkription_words(self, json_dict: dict) ->list:
"""Return words with transkription positions only.
"""
words = json_dict['words']\
if bool(json_dict.get('words'))\
else []
return [ w for w in words if bool(w.get('tp_id')) ]
def get_requirement(self, json_dict: dict, index=0) ->tuple:
"""Return requirement tuple (name, input).
"""
name = requirement = None
if dict_contains_keys(json_dict, ['response_handler','requirements'])\
and index < len(json_dict['response_handler']['requirements']):
requirement_dict = json_dict['response_handler']['requirements'][index]
if dict_contains_keys(requirement_dict, ['name'])\
and dict_contains_keys(requirement_dict, ['input']):
name = requirement_dict['name']
requirement = requirement_dict['input']
return name, requirement
def match(self, response: str) ->bool:
"""Return whether response matchs with handler.
"""
if self.response_starts_with is not None:
return response.startswith(self.response_starts_with)
return True
def print_dialog(self):
"""Print dialog.
"""
if self.dialog_string is not None:
print(f'[{self.dialog_string}]')
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
transkription_words = self.get_transkription_words(json_dict)
json_word_ids = [ jw.get('id') for jw in transkription_words ]
action_dictionary = { 'words': [ word for word in page.words if word.id in json_word_ids ] }
for index, item in enumerate(self.create_requirement_list()):
name, requirement = self.get_requirement(json_dict, index=index)
action_dictionary.update({name: requirement})
return self.run_change(page, action_dictionary)
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
return self.run_change(page, {})
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
return exit_code
-
class JoinWords(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response interactively and return exit code.
"""
action_dictionary = { 'words' : shell._get_words_from_response(re.compile('^\D+\s').sub('', response), page.words),\
'add_white_space_between_words': re.match(r'^\D+\s', response) }
if self.run_change(page, action_dictionary) == 0:
return shell.run_interactive_editor(page)
return 2
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
add_white_space_between_words = action_dictionary['add_white_space_between_words']\
if bool(action_dictionary.get('add_white_space_between_words'))\
else False
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
if len(words) > 0:
if len(set([ word.line_number for word in words ])) == 1\
and len(set([ word.deleted for word in words ])) == 1:
new_word = words[0]
for word2join in words[1:]:
page.words.remove(word2join)
new_word.join(word2join, add_white_space_between_words=add_white_space_between_words)
else:
new_word = Word.join_words(words, add_white_space_between_words=add_white_space_between_words)
index = len(page.words)
if words[0] in page.words:
index = page.words.index(words[0])
elif len([ word for word in page.words if words[0] in word.word_parts ]) > 0:
index = page.words.index([ word for word in page.words if words[0] in word.word_parts ][0])
for word2join in words:
if word2join in page.words:
page.words.remove(word2join)
elif len([ word for word in page.words if word2join in word.word_parts ]) > 0:
page.words.remove([ word for word in page.words if word2join in word.word_parts ][0])
page.words.insert(index, new_word)
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class SimpleJoinWords(JoinWords):
def match(self, response: str) ->bool:
"""Return whether response matchs with handler.
"""
return re.match(r'\d+', response)
class SaveChanges(ResponseHandler):
WORD_INDEX = 0
WDICT_INDEX = 1
RELEVANT_PROPERTIES = [ ('deleted','deleted'), ('line_number','line') ]
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
self.run_change(page, {})
return shell.run_interactive_editor(page)
def _update_transkription_word(self, word, word_dict) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
exit_code = 0
for relevant_property in self.RELEVANT_PROPERTIES:
if len(word.word_parts) > 0:
if len(word_dict['tp_id'].split(':')) == 3:
wp_index = int(word_dict['tp_id'].split(':')[1].replace('w',''))
word.word_parts[wp_index].__dict__[relevant_property[self.WORD_INDEX]] = word_dict[relevant_property[self.WDICT_INDEX]]
else:
return 2
else:
word.__dict__[relevant_property[self.WORD_INDEX]] = word_dict[relevant_property[self.WDICT_INDEX]]
return exit_code
def _update_faksimile_word(self, word, word_dict, words) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
exit_code = 0
if word_dict.get('old_id') is not None:
fp_id = word_dict['fp_id']
old_id = int(word_dict['old_id'])
if len([w for w in words if w.id == old_id ]) > 0:
old_word = [w for w in words if w.id == old_id ][0]
faksimile_position = None
if len([ fp for fp in old_word.faksimile_positions if fp.id == fp_id ]) > 0:
faksimile_position = [ fp for fp in old_word.faksimile_positions if fp.id == fp_id ][0]
old_word.faksimile_positions.remove(faksimile_position)
elif len([ fp for w in old_word.word_parts for fp in w.faksimile_positions if fp.id == fp_id ]) > 0:
for w in old_word.word_parts:
for fp in w.faksimile_positions:
if fp.id == fp_id:
faksimile_position = fp
w.faksimile_positions.remove(faksimile_position)
break
if faksimile_position is not None:
word.faksimile_positions.append(faksimile_position)
else:
return 2
else:
return 3
return exit_code
def _update_word(self, word, word_dict, words) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
exit_code = 0
if bool(word_dict.get('tp_id')):
exit_code = self._update_transkription_word(word, word_dict)
if exit_code > 0:
return exit_code
elif bool(word_dict.get('fp_id')):
exit_code = self._update_faksimile_word(word, word_dict, words)
if exit_code > 0:
print(exit_code)
return exit_code
else:
return 2
return exit_code
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
json_word_ids = [ int(jw.get('id')) for jw in json_dict['words'] ]
print('updating word', json_dict, json_word_ids, page.words[0].id)
for word in page.words:
if word.id in json_word_ids:
print('updating word', word.id, word.text)
word_dict = [ jw for jw in json_dict['words'] if int(jw.get('id')) == word.id ][0]
if self._update_word(word, word_dict, page.words) > 0:
return 2
return self.run_change(page, {})
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
return exit_code
class SavePositions(SaveChanges):
def _update_word(self, word, word_dict_list) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
exit_code = 0
for word_dict in word_dict_list:
if bool(word_dict.get('tp_id')):
exit_code = self._update_transkription_position(word, word_dict)
if exit_code > 0:
return exit_code
elif bool(word_dict.get('fp_id')):
exit_code = self._update_faksimile_position(word, word_dict)
if exit_code > 0:
return exit_code
return exit_code
def _update_transkription_position(self, word, word_dict) ->int:
"""Update transkription position properites of word according to word_dict,
return exit_code
"""
tp_id_list = word_dict['tp_id'].split(':')
if len(tp_id_list) == 3 and len(word.word_parts) > 0:
wp_index = int(tp_id_list[1].replace('w',''))
tp_index = int(tp_id_list[2].replace('tp',''))
if wp_index < len(word.word_parts) and tp_index < len(word.word_parts[wp_index].transkription_positions):
word.word_parts[wp_index].transkription_positions[tp_index].left = float(word_dict['left'])
word.word_parts[wp_index].transkription_positions[tp_index].top = float(word_dict['top'])
word.word_parts[wp_index].transkription_positions[tp_index].bottom = word.word_parts[wp_index].transkription_positions[tp_index].top\
+ word.word_parts[wp_index].transkription_positions[tp_index].height
else:
return 2
elif len(tp_id_list) == 2:
tp_index = int(tp_id_list[1].replace('tp',''))
if tp_index < len(word.transkription_positions):
word.transkription_positions[tp_index].left = float(word_dict['left'])
word.transkription_positions[tp_index].top = float(word_dict['top'])
word.transkription_positions[tp_index].bottom = word.transkription_positions[tp_index].top\
+ word.transkription_positions[tp_index].height
else:
return 2
else:
return 2
return 0
def _update_faksimile_position(self, word, word_dict) ->int:
"""Update faksimile position properites of word according to word_dict,
return exit_code
"""
exit_code = 0
fp_id = word_dict['fp_id']
faksimile_position = None
if len([ fp for fp in word.faksimile_positions if fp.id == fp_id ]) > 0:
faksimile_position = [ fp for fp in word.faksimile_positions if fp.id == fp_id ][0]
if len([ fp for w in word.word_parts for fp in w.faksimile_positions if fp.id == fp_id ]) > 0:
faksimile_position = [ fp for w in word.word_parts for fp in w.faksimile_positions if fp.id == fp_id ][0]
if faksimile_position is not None:
faksimile_position.left = float(word_dict['left'])
faksimile_position.top = float(word_dict['top'])
faksimile_position.bottom = faksimile_position.top + faksimile_position.height
else:
return 2
return exit_code
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
json_word_ids = [ jw.get('id') for jw in json_dict['words'] ]
for word in page.words:
if word.id in json_word_ids:
word_dict_list = [ jw for jw in json_dict['words'] if jw.get('id') == word.id ]
if self._update_word(word, word_dict_list) > 0:
return 2
return self.run_change(page, {})
class AddDeletionPath(SaveChanges):
def _add_deletion_path(self, page, word, word_dict_list) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
exit_code = 0
for word_dict in word_dict_list:
if len([ path for path in word.deletion_paths if path.d_attribute == word_dict['deletion_path']]) == 0:
dpath = page.get_word_deletion_path(d_attribute=word_dict['deletion_path'])
if dpath is not None:
word.deletion_paths.append(dpath)
else:
exit_code = 2
return exit_code
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
transkription_words = self.get_transkription_words(json_dict)
json_word_ids = [ jw.get('id') for jw in transkription_words if bool(jw.get('deletion_path')) ]
for word in page.words:
if word.id in json_word_ids:
word_dict_list = [ jw for jw in transkription_words if jw.get('id') == word.id ]
if self._add_deletion_path(page, word, word_dict_list) > 0:
return 2
return self.run_change(page, {})
class RemoveDeletionPath(SaveChanges):
def _remove_deletion_path(self, page, word, word_dict_list) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
exit_code = 2
if len(word.word_parts) > 0:
exit_code = 2
for wpart in word.word_parts:
result = self._remove_deletion_path(page, wpart, word_dict_list)
if result == 0:
exit_code = 0
deletion_paths = [ path for path in word.deletion_paths if path.d_attribute in\
[ word_dict['deletion_path'] for word_dict in word_dict_list ] ]
if len(deletion_paths) > 0:
for path in deletion_paths:
if path in word.deletion_paths:
word.deletion_paths.remove(path)
for node in page.page_tree.xpath(f'./{WordDeletionPath.XML_TAG}[@d="{path.d_attribute}"]'):
node.getparent().remove(node)
exit_code = 0
return exit_code
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
transkription_words = self.get_transkription_words(json_dict)
json_word_ids = [ jw.get('id') for jw in transkription_words if bool(jw.get('deletion_path')) ]
for word in page.words:
if word.id in json_word_ids:
word_dict_list = [ jw for jw in transkription_words if jw.get('id') == word.id ]
if self._remove_deletion_path(page, word, word_dict_list) > 0:
return 2
return self.run_change(page, {})
class JoinDeletionPath(SaveChanges):
def _join_deletion_path(self, page, word, word_dict_list) ->int:
"""Update properites of word according to word_dict,
return exit_code
"""
deletion_paths = [ path for path in word.deletion_paths if path.d_attribute in\
[ word_dict['deletion_path'] for word_dict in word_dict_list ] ]
if len(deletion_paths) > 1:
path_string = ''
for p in deletion_paths:
path_string = path_string + ' ' + p.d_attribute.replace('M', 'L')\
if path_string != ''\
else p.d_attribute
word.deletion_paths.remove(p)
if p in page.word_deletion_paths:
page.word_deletion_paths.remove(p)
new_path = parse_path(path_string)
word.deletion_paths.append(WordDeletionPath(Path(id=deletion_paths[0].id, path=new_path), deletion_paths[0].style))
page.word_deletion_paths.append(word.deletion_paths[-1])
for node in page.page_tree.xpath(f'./{WordDeletionPath.XML_TAG}'): node.getparent().remove(node)
for p in page.word_deletion_paths: p.attach_object_to_tree(page.page_tree)
return 0
return 2
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
transkription_words = self.get_transkription_words(json_dict)
json_word_ids = [ jw.get('id') for jw in transkription_words if bool(jw.get('deletion_path')) ]
for word in page.words:
if word.id in json_word_ids:
word_dict_list = [ jw for jw in transkription_words if jw.get('id') == word.id ]
if self._join_deletion_path(page, word, word_dict_list) > 0:
return 2
return self.run_change(page, {})
class RequestPathsNearWords(SaveChanges):
def handle_response(self, page: Page, json_dict: dict) -> int:
"""Handle response and return exit code.
"""
transkription_words = self.get_transkription_words(json_dict)
json_word_ids = [ jw.get('id') for jw in transkription_words if bool(jw.get('deletion_path')) ]
for word in page.words:
if word.id in json_word_ids\
and 'add_paths_near_words' not in word.process_flags:
word.process_flags.append('add_paths_near_words')
return self.run_change(page, {})
+class SetTaskDone(SaveChanges):
+ def handle_response(self, page: Page, json_dict: dict) -> int:
+ """Handle response and return exit code.
+ """
+ if not bool(json_dict.get('task')):
+ return 2
+ task = json_dict.get('task')
+ checker = CheckerHandler(page)
+ checker.set_task_done(task)
+ return self.run_change(page, {})
+
class Reload(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
return shell.run_interactive_editor(Page(page.page_tree.docinfo.URL))
class RestoreBackup(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
if page.bak_file is not None:
return shell.run_interactive_editor(Page(page.bak_file))
else:
print('Could not restore backup file, please restore manually!')
return 2
class ChangeLine2Value(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
words = []
line_number = -1
if re.match(r'l:\d+\s\d+', response):
line_number = int(response.replace('l:', '').split(' ')[0])
words = shell._get_words_from_response(re.compile('l:\d+\s').sub('', response), page.words)
else:
if not re.match(r'l:\d+$', response):
new_response_line = input('Specify new line number>')
if re.match(r'^\d+$', new_response_line):
line_number = int(new_response_line)
else:
line_number = int(response.replace('l:', ''))
new_response = input(f'Specify ids of words for which line number should be changed to {line_number}>')
if re.match(r'\d+', new_response):
words = shell_get_words_from_response(new_response, page.words)
action_dictionary = { 'words': words, 'line_number' : line_number }
if self.run_change(page, action_dictionary) == 0:
return shell.run_interactive_editor(page)
return 2
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
line_number = action_dictionary['line_number']\
if bool(action_dictionary.get('line_number'))\
else -1
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
if line_number != -1:
for word in words: word.line_number = line_number
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class CreateCorrectionHistory(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
if re.match(r'c\w*\s\d+', response):
words = shell._get_words_from_response(re.compile('c\w*\s').sub('', response), page.words)
else:
new_response = input(f'Specify ids of words to create a correction history. >')
if re.match(r'\d+', new_response):
words = shell._get_words_from_response(new_response, page.words)
action_dictionary = { 'words': words }
if self.run_change(page, action_dictionary) == 0:
return shell.run_interactive_editor(page)
return 2
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
if len(words) > 0:
for word in words: word.create_correction_history()
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class DeleteCorrectionHistory(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response interactively and return exit code.
"""
if re.match(r'D\w*\s\d+', response):
words = shell._get_words_from_response(re.compile('D\w*\s').sub('', response), page.words)
else:
new_response = input(f'Specify ids of words to delete their correction history. >')
if re.match(r'\d+', new_response):
words = shell._get_words_from_response(new_response, page.words)
action_dictionary = { 'words' : words }
if self.run_change(page, action_dictionary) == 0:
return shell.run_interactive_editor(page)
return 2
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
if len(words) > 0:
for word in words:
print(word.text)
word.earlier_version = None
word.corrections = []
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class ChangeDeletionStatus(ResponseHandler):
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
if re.match(r'[du]\w*\s\d+', response):
words = shell._get_words_from_response(re.compile('[du]\w*\s').sub('', response), page.words)
else:
deletion_target = 'delete' if response.startswith('d') else 'undelete'
new_response = input(f'Specify ids of words to {deletion_target}. >')
if re.match(r'\d+', new_response):
words = shell._get_words_from_response(new_response, page.words)
action_dictionary = { 'words': words, 'deleted': response.startswith('d') }
if self.run_change(page, action_dictionary) == 0:
return shell.run_interactive_editor(page)
return 2
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
word_should_be_deleted = bool(action_dictionary.get('deleted'))
if len(words) > 0:
for word in words: word.deleted = word_should_be_deleted
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class SplitWords(ResponseHandler):
def _split_word(self, page, word, split_text):
"""Split word.
"""
index = page.words.index(word)
_, left, right = word.split(split_text)
page.words[index] = left
page.words.insert(index+1, right)
def create_requirement_list(self) ->list:
"""Create a requirement dictionary.
"""
return [{ 'name': 'split_text', 'type': 'string', 'input': None }]
def handle_interactive_response(self, page: Page, response: str, shell) -> int:
"""Handle response and return exit code.
"""
if re.match(r's\s\w+\s\d+', response):
words = shell._get_words_from_response(re.compile('s\s\w+\s').sub('', response), page.words)
split_text = response.split(' ')[1]
else:
split_text = input('Input split text>')
new_response = input(f'Specify ids of words to split. >')
if re.match(r'\d+', new_response):
words = shell._get_words_from_response(new_response, page.words)
action_dictionary = { 'words': words, 'split_text': split_text }
if self.run_change(page, action_dictionary) == 0:
return shell.run_interactive_editor(page)
return 2
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
split_text = action_dictionary['split_text']\
if bool(action_dictionary.get('split_text'))\
else ''
if len(words) > 0 and split_text != '':
for word in words: self._split_word(page, word, split_text)
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class AddBox(ResponseHandler):
def create_requirement_list(self) ->list:
"""Create a requirement dictionary.
"""
return [{ 'name': 'box_text', 'type': 'string', 'input': None },\
{ 'name': 'overwritten_by', 'type': 'string', 'input': None },\
{ 'name': 'is_earlier_version', 'type': 'boolean', 'input': False }]
def run_change(self, page: Page, action_dictionary: dict) -> int:
"""Run changes on page and return exit code.
"""
exit_code = 0
words = action_dictionary['words']\
if bool(action_dictionary.get('words'))\
else []
missing_text = action_dictionary.get('box_text')
is_earlier_version = action_dictionary.get('is_earlier_version')
overwritten_by = action_dictionary.get('overwritten_by')
if len(words) > 0 and missing_text is not None:
for word in words:
if overwritten_by is not None:
split_into_parts_and_attach_box(word, 0, missing_text, is_earlier_version, overwritten_by)
else:
attach_box(word, 0, missing_text, False)
word.create_correction_history()
if len(word.corrections) > 0:
for wp in word.word_parts:
wp.overwrites_word = None
if not UNITTESTING:
print(f'writing to {page.page_tree.docinfo.URL}')
save_page(page, backup=True, attach_first=True, script_name=f'{__file__}:{inspect.currentframe().f_back.f_code.co_name}')
page = Page(page.page_tree.docinfo.URL)
else:
exit_code = 2
return exit_code
class ResponseOrganizer:
RESULT = 'result'
TIMESTAMP_NOT_SET = -1
def __init__(self, manuscript=None):
self.manuscript = manuscript
+ self.do_not_send = []
self.response_handler_dictionary = {}
self._add_response_handler(JoinWords(action_name='join words', description='join words'))
self._add_response_handler(SplitWords(action_name='split words', description='split word according to split text'))
self._add_response_handler(CreateCorrectionHistory(action_name='create correction history', description='creates a correction history for selected words'))
self._add_response_handler(DeleteCorrectionHistory(action_name='delete correction history', description='deletes the correction history of selected words'))
self._add_response_handler(AddBox(action_name='add box', description='add box with overwritten text'))
self._add_response_handler(SaveChanges(action_name='save changes', description='save change to line number/deletion status for word(s)' ))
self._add_response_handler(SavePositions(action_name='save positions', description='save new transkription position(s)' ))
self._add_response_handler(AddDeletionPath(action_name='add deletion paths', description='add new deletion paths to word' ))
self._add_response_handler(JoinDeletionPath(action_name='join deletion paths', description='join deletion paths of selected words' ))
self._add_response_handler(RemoveDeletionPath(action_name='remove deletion paths', description='remove deletion paths of selected words' ))
self._add_response_handler(RequestPathsNearWords(action_name='request paths near words', description='request paths near selected words' ))
self._add_response_handler(Reload(action_name='reload', description='reload page from file' ))
+ self._add_response_handler(SetTaskDone(action_name='set task done', description='reload page from file' ), add_to_do_not_send=True)
+
- def _add_response_handler(self, response_handler: ResponseHandler):
+ def _add_response_handler(self, response_handler: ResponseHandler, add_to_do_not_send=False):
"""Add response_handler to response_handler_dictionary.
"""
+ if add_to_do_not_send:
+ self.do_not_send.append(response_handler)
self.response_handler_dictionary.update({response_handler.action_name: response_handler})
+
+ def _get_response_handlers(self) ->list:
+ """Return a list of response_handlers.
+ """
+ return [ response_handler for response_handler in self.response_handler_dictionary.values()\
+ if response_handler not in self.do_not_send ]
def create_json_dict(self, xml_file: str, last_operation_result=None) ->dict:
"""Return a json dict of page with information about action.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
page = Page(xml_file, add_paths_near_words=True, warn=True)
+ checker = CheckerHandler(page)
replace_ligatures(page)
converter = JSONConverter(page)
json_dict = converter.create_json_dict()
pages = []
if self.manuscript is not None and isfile(self.manuscript):
manuscript_tree = ET.parse(self.manuscript)
pages = [ p.replace('./', '') for p in manuscript_tree.xpath('//page/@output') if isfile(p) ]
action_dict = { 'target_file': xml_file,\
'pages': pages,\
- 'date_stamp': os.path.getmtime(xml_file) }
+ 'date_stamp': os.path.getmtime(xml_file),\
+ 'tasks': checker.get_todos() }
if last_operation_result is not None:
action_dict.update({self.RESULT: last_operation_result })
if len(w) > 0:
msg = str(w[-1].message)\
if last_operation_result is None\
else last_operation_result + '\n' + str(w[-1].message)
action_dict.update({self.RESULT: msg })
response_handlers = []
- for response_handler in self.response_handler_dictionary.values():
+ for response_handler in self._get_response_handlers():
response_handlers.append(response_handler.create_json_dict())
action_dict.update({ 'response_handlers': response_handlers })
json_dict.update({ 'actions': action_dict})
return json_dict
def handle_response(self, json_dict: dict) ->dict:
"""Handle response in json_dict and return new data json_dict.
"""
if bool(json_dict.get('target_file')):
target_file = json_dict['target_file']
if bool(json_dict.get('date_stamp')):
if json_dict['date_stamp'] == self.TIMESTAMP_NOT_SET\
or os.path.getmtime(target_file) <= json_dict['date_stamp']:
exit_code = 2
operation = 'unknown'
if bool(json_dict.get('response_handler'))\
and bool(self.response_handler_dictionary.get(json_dict['response_handler']['action_name'])):
operation = json_dict['response_handler']['action_name']
response_handler = self.response_handler_dictionary[operation]
exit_code = response_handler.handle_response(Page(target_file), json_dict)
message = f'Operation "{operation}" succeeded!' if exit_code == 0 else f'Operation "{operation}" failed'
return self.create_json_dict(target_file, last_operation_result=message)
else:
return self.create_json_dict(target_file,\
last_operation_result=f'FAIL: file {target_file} was changed between operations!')
else:
return self.create_json_dict(target_file,\
last_operation_result='ERROR: there was no key "date_stamp" in json')
else:
return { 'actions': { self.RESULT: 'ERROR: there was no key "target_file" in json!' }}
class InteractiveShell:
def __init__(self):
self.response_handlers = []
self.response_handlers.append(SimpleJoinWords(dialog_string='specify ids of words to join [default]'))
self.response_handlers.append(RestoreBackup(response_starts_with='b', dialog_string='b=restore backup'))
self.response_handlers.append(CreateCorrectionHistory(response_starts_with='c', dialog_string='c=create correction history [+ ids]'))
self.response_handlers.append(DeleteCorrectionHistory(response_starts_with='D', dialog_string='D=delete correction history [+ ids]'))
self.response_handlers.append(ChangeDeletionStatus(response_starts_with='d', dialog_string='d=mark deleted [+ ids]'))
self.response_handlers.append(SaveChanges(response_starts_with='i', dialog_string='i=fix ids' ))
self.response_handlers.append(ChangeLine2Value(response_starts_with='l', dialog_string='l[:value]=change line to value for ids' ))
self.response_handlers.append(Reload(response_starts_with='r', dialog_string='r=reload xml file'))
self.response_handlers.append(SplitWords(response_starts_with='s', dialog_string='s=split and join word ("s splittext id")'))
self.response_handlers.append(ChangeDeletionStatus(response_starts_with='u', dialog_string='u=undelete [+ ids]'))
self.response_handlers.append(JoinWords(response_starts_with='w', dialog_string='w=join words with whitespace between them [+ ids]'))
self.response_handlers.append(ResponseHandler())
def _get_words_from_response(self, response, words) ->list:
"""Return a list of word that correspond to indices
"""
if re.match(r'\d+-\d+', response)\
or re.match(r'\d+\+', response):
index_boundaries = []
if response[-1] == '+':
index_boundaries.append(int(response[:response.index('+')]))
index_boundaries.append(index_boundaries[0]+1)
else:
index_boundaries = [ int(i) for i in response.split('-') ]
index_boundaries_length_diff = len(response.split('-')[0]) - len(response.split('-')[1])
if index_boundaries_length_diff > 0:
index_boundaries[1] = int(response.split('-')[0][0-index_boundaries_length_diff-1] + response.split('-')[1])
indices = [ i for i in range(index_boundaries[0], index_boundaries[1]+1) ]
if index_boundaries[0] > index_boundaries[1]:
indices = [ index_boundaries[0] ]
while indices[-1] > index_boundaries[1]:
indices.append(indices[-1]-1)
else:
indices = [ int(i) for i in response.split(' ') ]
result_words = []
for index in indices:
if len([ word for word in words if word.id == index ]) > 0:
result_words += [ word for word in words if word.id == index ]
return result_words
def run_interactive_editor(self, page) -> int:
"""Run interactive shell.
"""
replace_ligatures(page)
HTMLConverter(page).convert()
for response_handler in self.response_handlers: response_handler.print_dialog()
response = input('>')
for response_handler in self.response_handlers:
if response_handler.match(response):
return response_handler.handle_interactive_response(page, response, self)
def replace_ligatures(page):
"""Replace ligatures
"""
if len([ word for word in page.words if re.match(r'.*[flfi]', word.text) ]) > 0:
for word in [ word for word in page.words if re.match(r'.*[fi]', word.text) ]:
word.text = word.text.replace('fi', 'fi')
for word in [ word for word in page.words if re.match(r'.*[fl]', word.text) ]:
word.text = word.text.replace('fl', 'fl')
def dict_contains_keys(a_dict, key_list)->bool:
"""Return whether dict a_dict contains key path given by key_list.
"""
if len(key_list) == 0:
return True
else:
if key_list[0] in a_dict.keys():
return dict_contains_keys(a_dict[key_list[0]], key_list[1:])
return False
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to fix faksimile position ->set them to their absolute value.
fixes/interactive_editor.py [OPTIONS]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
if len(args) < 1:
usage()
return 2
exit_status = 0
xml_file = args[0]
if isfile(xml_file):
counter = 0
shell = InteractiveShell()
for page in Page.get_pages_from_xml_file(xml_file, status_contains=STATUS_MERGED_OK):
if not UNITTESTING:
print(Fore.CYAN + f'Processing {page.title}, {page.number} with interactive editor ...' + Style.RESET_ALL)
back_up(page, page.xml_file)
counter += 1 if shell.run_interactive_editor(page) == 0 else 0
if not UNITTESTING:
print(Style.RESET_ALL + f'[{counter} pages changed by interactive shell]')
else:
raise FileNotFoundError('File {} does not exist!'.format(xml_file))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: py2ttl/py2ttl_data.py
===================================================================
--- py2ttl/py2ttl_data.py (revision 105)
+++ py2ttl/py2ttl_data.py (revision 106)
@@ -1,143 +1,143 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to convert py objects to data in turtle format.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
import getopt
import lxml.etree as ET
from os import sep, path, listdir
from os.path import isfile, isdir, dirname, basename
from progress.bar import Bar
import re
import sys
sys.path.append('svgscripts')
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.super_page import SuperPage
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from class_spec import SemanticClass
from config import check_config_files_exist, get_datatypes_dir, PROJECT_NAME, PROJECT_ONTOLOGY_FILE, PROJECT_URL
from data_handler import RDFDataHandler
sys.path.append('shared_util')
from myxmlwriter import xml2dict
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
class Py2TTLDataConverter:
"""This class can be used convert py objects to rdf data in turtle format.
"""
UNITTESTING = False
def __init__(self, manuscript_file, xml_dictionary_file=None, mapping_dictionary=None):
if mapping_dictionary is None and xml_dictionary_file is not None:
if not Py2TTLDataConverter.UNITTESTING:
print(Fore.CYAN + 'initializing mapping dictionary from file "{}" ...'.format(xml_dictionary_file))
self.mapping_dictionary = xml2dict(xml_dictionary_file)
if not Py2TTLDataConverter.UNITTESTING:
print(Fore.GREEN + '[{} classes added]'.format(str(len(self.mapping_dictionary['classes']))))
elif mapping_dictionary is not None:
self.mapping_dictionary = mapping_dictionary
else:
raise Exception('Error: Py2TTLDataConverter init expects either a xml_dictionary_file or a mapping_dictionary!')
self.manuscript_file = manuscript_file
def convert(self, page_status_list=None):
"""Convert manuscript instantiated with manuscript_file to rdf data and write to target_file.
"""
if page_status_list is None or len(page_status_list) < 1:
page_status_list = ['OK', SuperPage.STATUS_MERGED_OK]
not Py2TTLDataConverter.UNITTESTING and print(Fore.CYAN + 'initializing python objects with file "{}" ...'.format(self.manuscript_file))
manuscript = ArchivalManuscriptUnity.create_cls(self.manuscript_file, page_status_list=page_status_list, update_page_styles=True)
target_data_file = manuscript.title.replace(' ', '_') + '_DATA.ttl'
data_handler = RDFDataHandler(target_data_file, self.mapping_dictionary)
if not Py2TTLDataConverter.UNITTESTING:
print(Fore.GREEN + '[{} pages added]'.format(str(len([ page for page in manuscript.pages if 'xml_file' in page.__dict__.keys()]))))
print(Fore.CYAN + 'adding triples to rdf graph ... ')
data_handler.add_data(manuscript, '')
if not Py2TTLDataConverter.UNITTESTING:
print(Fore.GREEN + '[{} statements added]'.format(str(len(data_handler.data_graph))))
print(Fore.CYAN + 'writing graph to file "{}" ...'.format(target_data_file))
data_handler.write()
if not Py2TTLDataConverter.UNITTESTING:
print(Fore.GREEN + '[OK]')
print(Style.RESET_ALL)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to convert py objects to rdf data in turtle format.
py2ttl/py2ttl_data.py [OPTIONS]
xml file of type shared_util.myxmlwriter.FILE_TYPE_XML_MANUSCRIPT.
OPTIONS:
-h|--help: show help
-i|--include-status=STATUS include pages with status = STATUS. STATUS is a ':' seperated string of status, e.g. 'OK:faksimile merged'.
-m|--mapping=mapping_dict.xml xml file generated by py2ttl/py2ttl.py containing mapping information for each property of a class.
:return: exit code (int)
"""
check_config_files_exist()
datatypes_dir = get_datatypes_dir()
target_ontology_file = '.{0}{1}-ontology_autogenerated.ttl'.format(sep, PROJECT_NAME)
xml_dictionary_file = 'mapping_file4' + datatypes_dir.replace(sep, '.') + '2' + target_ontology_file.replace('.' + sep, '').replace(sep, '.').replace('.ttl', '.xml')
manuscript_file = None
page_status_list = None
try:
opts, args = getopt.getopt(argv, "hi:m:", ["help", "include-status=", "mapping="])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-i', '--include-status'):
page_status_list = arg.split(':')
elif opt in ('-m', '--mapping'):
xml_dictionary_file = arg
if len(args) < 1 :
usage()
return 2
manuscript_file = args[0]
if not isfile(xml_dictionary_file) or not isfile(manuscript_file):
usage()
return 2
converter = Py2TTLDataConverter(manuscript_file, xml_dictionary_file=xml_dictionary_file)
converter.convert(page_status_list=page_status_list)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: py2ttl/convert.py
===================================================================
--- py2ttl/convert.py (revision 105)
+++ py2ttl/convert.py (revision 106)
@@ -1,115 +1,115 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to convert py objects to ontology and data in turtle format.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
import getopt
import lxml.etree as ET
from os import sep, path, listdir
from os.path import isfile, isdir, dirname, basename
from progress.bar import Bar
import re
import sys
sys.path.append('svgscripts')
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from class_spec import SemanticClass
from config import check_config_files_exist, get_datatypes_dir, PROJECT_NAME, PROJECT_ONTOLOGY_FILE, PROJECT_URL
from py2ttl_data import Py2TTLDataConverter
from py2ttl_ontology import Py2TTLOntologyConverter
sys.path.append('shared_util')
from myxmlwriter import xml2dict
from main_util import get_manuscript_files
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
FILE_TYPE_XML_PROJECT = "xmlProjectFile"
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to convert py objects to a owl:Ontology and rdf data in turtle format.
py2ttl/py2ttl_data.py [OPTIONS] [ ...]
xml file of type shared_util.myxmlwriter.FILE_TYPE_XML_MANUSCRIPT.
OPTIONS:
-h|--help: show help
-i|--include-status=STATUS include pages with status = STATUS. STATUS is a ':' seperated string of status, e.g. 'OK:faksimile merged'.
:return: exit code (int)
"""
check_config_files_exist()
datatypes_dir = get_datatypes_dir()
source_ontology_file = PROJECT_ONTOLOGY_FILE
target_ontology_file = '.{0}{1}-ontology_autogenerated.ttl'.format(sep, PROJECT_NAME)
manuscript_file = None
page_status_list = [ 'OK', 'faksimile merged' ]
try:
opts, args = getopt.getopt(argv, "hi:", ["help", "include-status="])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-i', '--include-status'):
page_status_list = arg.split(':')
if len(args) < 1 :
usage()
return 2
ontology_created = False
ontology_converter = Py2TTLOntologyConverter(project_ontology_file=source_ontology_file)
output = 2
for manuscript_file in get_manuscript_files(args):
if not isfile(manuscript_file):
usage()
return 2
if not ontology_created:
print(Fore.CYAN + 'Create ontology from "{}" ...'.format(manuscript_file))
if ontology_converter.create_ontology(datatypes_dir, target_ontology_file) == 0:
print(Fore.GREEN + '[Ontology file {0} created]'.format(target_ontology_file))
ontology_created = True
else:
return 2
print(Fore.CYAN + 'Create data from "{}" ...'.format(manuscript_file))
data_converter = Py2TTLDataConverter(manuscript_file, mapping_dictionary=ontology_converter.uri_mapping4cls_and_properties)
output = data_converter.convert(page_status_list=page_status_list)
return output
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: tests_shared_util/test_main.py
===================================================================
--- tests_shared_util/test_main.py (revision 0)
+++ tests_shared_util/test_main.py (revision 106)
@@ -0,0 +1,104 @@
+import unittest
+import os
+from os.path import isfile, isdir, dirname, sep, realpath
+from datetime import datetime
+import shutil
+import tempfile
+import xml.etree.ElementTree as ET
+import lxml.etree as LET
+from rdflib import Graph, URIRef, Literal, BNode, OWL, RDF, RDFS, XSD
+from xmldiff import main
+import sys
+
+sys.path.append('svgscripts')
+from datatypes.page import Page
+
+sys.path.append('shared_util')
+try:
+ from myxmlwriter import attach_dict_to_xml_node, dict2xml, lock_xml_tree, update_metadata, write_pretty, test_lock, xml_has_type,\
+ FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_DICT, get_dictionary_from_node, xml2dict, parse_xml_of_type
+except ImportError:
+ sys.path.append(dirname(dirname(realpath(__file__))))
+ from shared_util.myxmlwriter import attach_dict_to_xml_node, dict2xml, lock_xml_tree, update_metadata, write_pretty, test_lock, xml_has_type,\
+ FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_DICT, get_dictionary_from_node, xml2dict, parse_xml_of_type
+
+class TestPrettyWriter(unittest.TestCase):
+ def setUp(self):
+ self.test_dir = tempfile.mkdtemp()
+ self.title = 'ASDF'
+ DATADIR = dirname(__file__) + sep + 'test_data'
+ self.page = DATADIR + sep + 'N_VII_1_page001.xml'
+ self.mydict = { 'asdf': { 'b': { 'a': 1, 'b': 'c' , 'c': URIRef('adf')}},\
+ 'str': 'test' }
+
+ def test_attach_dict_to_xml_node(self):
+ xml_tree = LET.Element('root')
+ attach_dict_to_xml_node(self.mydict, LET.SubElement(xml_tree, 'dict'))
+ #print(LET.dump(xml_tree))
+ self.assertEqual(xml_tree.xpath('//asdf/b/a/@type')[0], 'int')
+ self.assertEqual(xml_tree.xpath('//asdf/b/b/@type')[0], 'str')
+ self.assertEqual(xml_tree.xpath('//asdf/b/c/@type')[0], URIRef.__name__)
+
+ def test_dict2xml(self):
+ test_file = self.test_dir + sep + 'new_test.xml'
+ dict2xml(self.mydict, test_file)
+ self.assertEqual(isfile(test_file), True)
+
+ def test_get_dictionary_from_node(self):
+ test_file = self.test_dir + sep + 'source.xml'
+ dict2xml(self.mydict, test_file)
+ xml_tree = LET.parse(test_file)
+ self.assertEqual(len(xml_tree.xpath('/root/dict')[0].getchildren()), len(self.mydict.keys()))
+ for index, key in enumerate(self.mydict.keys()):
+ mydict = get_dictionary_from_node(xml_tree.xpath('/root/dict')[0].getchildren()[index])
+ self.assertEqual(key in mydict.keys(), True)
+ if type(self.mydict[key]) == dict:
+ self.assertEqual(mydict[key].keys(), self.mydict[key].keys())
+
+ def test_update_metadata(self):
+ test_tree = LET.ElementTree(LET.Element('page', attrib={"title": self.title}))
+ update_metadata(test_tree, __file__)
+ self.assertEqual(test_tree.find('./metadata').find('./createdBy').find('./script').text, __file__)
+ update_metadata(test_tree, __file__)
+ self.assertEqual(len(test_tree.find('./metadata').findall('./modifiedBy[@script="{}"]'.format(__file__))), 1)
+ update_metadata(test_tree, __file__)
+ self.assertEqual(len(test_tree.find('./metadata').findall('./modifiedBy[@script="{}"]'.format(__file__))), 1)
+
+
+ def test_write_pretty(self):
+ et_file = self.test_dir + os.sep + 'et_file.xml'
+ pretty_file = self.test_dir + os.sep + 'pretty_file.xml'
+ manuscript_tree = ET.ElementTree(ET.Element('page', attrib={"title": self.title}))
+ metadata = ET.SubElement(manuscript_tree.getroot(), 'metadata')
+ ET.SubElement(metadata, 'type').text = 'xmlManuscriptFile'
+ createdBy = ET.SubElement(metadata, 'createdBy')
+ manuscript_tree.write(et_file, xml_declaration=True, encoding='utf-8')
+ write_pretty(xml_string=ET.tostring(manuscript_tree.getroot()), file_name=pretty_file)
+ self.assertEqual(main.diff_files(et_file, pretty_file), [])
+ write_pretty(xml_element_tree=manuscript_tree, file_name=pretty_file)
+ self.assertEqual(main.diff_files(et_file, pretty_file), [])
+
+ def test_lock(self):
+ page = Page(self.page)
+ locker_dict = { 'reference_file': 'asdf.txt', 'message': 'locked on this file'}
+ lock_xml_tree(page.page_tree, **locker_dict)
+ self.assertEqual(page.is_locked(), True)
+ #test_lock(page.page_tree)
+
+ def test_xml2dict(self):
+ test_file = self.test_dir + sep + 'source.xml'
+ dict2xml(self.mydict, test_file)
+ mydict = xml2dict(test_file)
+ self.assertEqual(mydict, self.mydict)
+
+ def test_xml_has_type(self):
+ self.assertEqual(xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=self.page), True)
+ self.assertEqual(xml_has_type(FILE_TYPE_XML_DICT, xml_source_file=self.page), False)
+ with self.assertRaises(Exception):
+ parse_xml_of_type(self.page, FILE_TYPE_XML_DICT)
+
+ def tearDown(self):
+ isdir(self.test_dir) and shutil.rmtree(self.test_dir)
+
+if __name__ == "__main__":
+ unittest.main()
Index: svgscripts/process_footnotes.py
===================================================================
--- svgscripts/process_footnotes.py (revision 105)
+++ svgscripts/process_footnotes.py (revision 106)
@@ -1,277 +1,277 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to process words after they have been merged with faksimile data.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
import getopt
import lxml.etree as ET
import os
from os import listdir, sep, path, setpgrp, devnull
from os.path import exists, isfile, isdir, dirname, basename
from pathlib import Path as PathlibPath
from progress.bar import Bar
import re
import shutil
import sys
import warnings
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.atypical_writing import AtypicalWriting
from datatypes.clarification import Clarification
from datatypes.editor_comment import EditorComment
from datatypes.editor_correction import EditorCorrection
from datatypes.footnotes import extract_footnotes
from datatypes.line_continuation import LineContinuation
from datatypes.standoff_tag import StandoffTag
from datatypes.text import Text
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.uncertain_decipherment import UncertainDecipherment
from util import back_up
from process_files import update_svgposfile_status
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
ATYPICAL_GROUP = re.compile(r'(.*:.*]\s*)(¿)(.*)')
CLARIFICATION_GROUP = re.compile(r'(.*:.*]\s*)(Vk)(.*)')
CONTINUATION_GROUP = re.compile(r'(.*:\s*)(Fortsetzung\s*)')
COMMENT_GROUP = re.compile(r'(.*:.*])')
EDITOR_CORRECTION_GROUP = re.compile(r'(.*:.*]\s*)(>[?]*)(.*)')
LINE_REFERENCE_GROUP = re.compile(r'(\d+-|\d/(\d+/)*)*([0-9]+)(:.*)')
LINE_REFERENCE_GROUP_START_INDEX = 1
LINE_REFERENCE_GROUP_MID_INDEX = 2
LINE_REFERENCE_GROUP_END_INDEX = 3
LINE_COMMENT_GROUP = re.compile(r'(.*\d+:)')
UNCERTAINTY_WORD_GROUP = re.compile(r'(.*:.*]\s*)([>]*\?)(.*)')
UNCERTAINTY_EDITOR_GROUP = re.compile(r'(.*)(\?)')
WORD_REFERENCE_GROUP = re.compile(r'(.*[0-9]+:\s*)(.*)(].*)')
DEBUG = False
def categorize_footnotes(page, footnotes=None, debug=False, skip_after=-1.0, find_content=False):
"""Categorize footnotes.
"""
DEBUG = debug
if footnotes is None:
footnotes = extract_footnotes(page, skip_after=skip_after)
for footnote in footnotes:
line_match = re.match(LINE_REFERENCE_GROUP, footnote.content)
if line_match is not None:
_process_line_match(page, footnote, line_match)
else:
warnings.warn(f'Unknown editor comment without a line reference: <{footnote}>')
if find_content and len(page.text_connection_marks) > 0:
TextConnectionMark.find_content_in_footnotes(page, footnotes=footnotes)
page.update_and_attach_words2tree()
for line in page.lines: line.attach_object_to_tree(page.page_tree)
DEBUG = False
if not UNITTESTING:
write_pretty(xml_element_tree=page.page_tree, file_name=page.page_tree.docinfo.URL,\
script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
def _is_uncertain(footnote) -> bool:
"""Return whether footnote contains sign for uncertainty.
"""
uncertain_match = re.match(UNCERTAINTY_EDITOR_GROUP, footnote.content)
return (uncertain_match is not None\
and len([ markup for markup in footnote.standoff_markups\
if markup.css_string.endswith('italic;')\
and uncertain_match.end() >= markup.startIndex\
and uncertain_match.end() <= markup.endIndex ]) > 0)
def _process_line_match(page, footnote, line_match):
"""Process footnote if reference to a line matches.
"""
word_match = re.match(WORD_REFERENCE_GROUP, footnote.content)
end_line_number = int(line_match.group(LINE_REFERENCE_GROUP_END_INDEX))
lines = []
if line_match.group(LINE_REFERENCE_GROUP_START_INDEX) is not None:
if line_match.group(LINE_REFERENCE_GROUP_MID_INDEX) is not None:
line_ids = [ int(line_id) for line_id in\
line_match.group(LINE_REFERENCE_GROUP_START_INDEX).split('/')\
if line_id != '' ] + [ end_line_number ]
lines = [ line for line in page.lines if line.id in line_ids ]
else:
start_line_number = int(line_match.group(1)[0:-1])
lines = [ line for line in page.lines if line.id >= start_line_number and line.id <= end_line_number ]
else:
lines = [ line for line in page.lines if line.id == end_line_number ]
if word_match is not None:
_process_word_match(page, footnote, line_match, word_match.group(2), end_line_number)
elif len(lines) > 0:
uncertain_match = re.match(UNCERTAINTY_EDITOR_GROUP, footnote.content)
for line in lines:
_process_line_reference(page, footnote, line, _is_uncertain(footnote))
else:
warnings.warn(f'Footnote refers to missing line {line_number}: {footnote}')
def _process_line_reference(page, footnote, line, is_uncertain):
"""Process footnote if there is a line reference.
"""
continuation_match = re.match(CONTINUATION_GROUP, footnote.content)
if continuation_match is not None:
reference_string = footnote.content[continuation_match.end():]
if is_uncertain:
reference_string = reference_string[:-1]
line.editor_comments.append(LineContinuation.create_cls(reference_string=reference_string, is_uncertain=is_uncertain))
else:
comment_match = re.match(LINE_COMMENT_GROUP, footnote.content)
if comment_match is not None:
is_uncertain = _is_uncertain(footnote)
comment = footnote.content[comment_match.end():-1].strip()\
if is_uncertain\
else footnote.content[comment_match.end():].strip()
line.editor_comments.append(EditorComment(comment=comment, is_uncertain=is_uncertain))
else:
warnings.warn(f'Unknown editor comment for line "{line.id}": <{footnote}>')
def _process_word_match(page, footnote, line_match, word_text, line_number, parent_word_composition=None):
"""Process footnote if there is a word reference.
"""
referred_words = [ word for word in page.words\
if word.line_number == line_number\
and (word.text == word_text\
or re.match(rf'\W*{word_text}\W', word.text)\
or word.edited_text == word_text) ]
referred_word_parts = [ word.word_parts for word in page.words\
if word.line_number == line_number\
and len(word.word_parts) > 0\
and word_text in [ wp.text for wp in word.word_parts ] ]
overwritten_word_matches = [ word for word in page.words\
if word.line_number == line_number\
and len(word.word_parts) > 0\
and len([word_part for word_part in word.word_parts\
if word_part.overwrites_word is not None\
and word_part.overwrites_word.text == word_text]) > 0]
if len(referred_words) > 0\
or len(overwritten_word_matches) > 0\
or len(referred_word_parts) > 0:
word = None
if len(referred_words) == 1:
word = referred_words[0]
elif len(overwritten_word_matches) > 0:
word = [ word_part.overwrites_word for word_part in overwritten_word_matches[0].word_parts\
if word_part.overwrites_word is not None and word_part.overwrites_word.text == word_text][0]
elif len(referred_word_parts) > 0:
word = [ word_part for word_part in referred_word_parts[0] if word_part.text == word_text ][0]
else:
word = [ better_word for better_word in referred_words if better_word.text == word_text][0]
atypical_match = re.match(ATYPICAL_GROUP, footnote.content)
correction_match = re.match(EDITOR_CORRECTION_GROUP, footnote.content)
clarification_match = re.match(CLARIFICATION_GROUP, footnote.content)
is_uncertain = re.match(UNCERTAINTY_WORD_GROUP, footnote.content) is not None
if correction_match is not None:
correction = correction_match.group(3).strip()
word.editor_comment = EditorCorrection(correction_text=correction, is_uncertain=is_uncertain)
if not is_uncertain:
word.edited_text = correction
elif clarification_match is not None:
word.editor_comment = Clarification(text=footnote.extract_part(word_text, css_filter='bold;'))
elif atypical_match is not None:
text = footnote.extract_part(word_text, css_filter='bold;')\
if footnote.markup_contains_css_filter('bold;')\
else None
word.editor_comment = AtypicalWriting(text=text)
elif is_uncertain:
word.editor_comment = UncertainDecipherment()
else:
comment_match = re.match(COMMENT_GROUP, footnote.content)
if comment_match is not None:
is_uncertain = _is_uncertain(footnote)
comment = footnote.content[comment_match.end():-1].strip()\
if is_uncertain\
else footnote.content[comment_match.end():].strip()
word.editor_comment = EditorComment(comment=comment, is_uncertain=is_uncertain)
else:
warnings.warn(f'Unknown editor comment for word "{word.text}": <{footnote}>')
elif re.match(r'.*\s.*', word_text):
for word_part in word_text.split(' '):
_process_word_match(page, footnote, line_match, word_part, line_number, parent_word_composition=word_text)
else:
warnings.warn(f'No word found with text "{word_text}" on line {line_number}: <{footnote}>')
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to process the footnotes of a page.
svgscripts/process_footnotes.py [OPTIONS]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help show help
-s|--skip-until=left skip all nodes.get('X') < left
:return: exit code (int)
"""
skip_after=-1.0
try:
opts, args = getopt.getopt(argv, "hs:", ["help", "skip-until=" ])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-s', '--skip-until'):
skip_after = float(arg)
if len(args) < 1:
usage()
return 2
exit_status = 0
file_a = args[0]
if isfile(file_a):
manuscript_file = file_a\
if xml_has_type(FILE_TYPE_XML_MANUSCRIPT, xml_source_file=file_a)\
else None
counter = 0
for page in Page.get_pages_from_xml_file(file_a, status_contains=STATUS_MERGED_OK):
if not UNITTESTING:
print(Fore.CYAN + f'Processing {page.title}, {page.number} ...' + Style.RESET_ALL)
back_up(page, page.xml_file)
categorize_footnotes(page, skip_after=skip_after, find_content=True)
counter += 1
not UNITTESTING and print(Style.RESET_ALL + f'[{counter} pages processed]')
else:
raise FileNotFoundError('File {} does not exist!'.format(file_a))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/convert_wordPositions.py
===================================================================
--- svgscripts/convert_wordPositions.py (revision 105)
+++ svgscripts/convert_wordPositions.py (revision 106)
@@ -1,737 +1,740 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to convert the word positions to HTML for testing purposes.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import cairosvg
import getopt
import json
from lxml.html import builder as E
from lxml.html import open_in_browser
import lxml
from pathlib import Path as PathLibPath
from os import sep, listdir, mkdir, path, remove
from os.path import exists, isfile, isdir, dirname
import re
import sys
from svgpathtools import svg_to_paths
import xml.etree.ElementTree as ET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.matrix import Matrix
from datatypes.page import Page
from datatypes.page_creator import PageCreator
from datatypes.transkriptionField import TranskriptionField
from datatypes.text_field import TextField
from datatypes.writing_process import WritingProcess
from datatypes.word import Word
sys.path.append('shared_util')
from main_util import extract_paths_on_tf, get_paths_near_position
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
EXIST_DB = 'http://existdb-test.dasch.swiss/exist/rest/db/storage/nietzsche/'
LOCAL_SERVER = 'http://localhost:8000/'
class Converter:
"""The converter super class.
"""
def __init__(self, page, non_testing=True, show_word_insertion_mark=False):
self.page = page
self.non_testing = non_testing
self.show_word_insertion_mark = show_word_insertion_mark
def _get_transkription_positions(self, transkription_positions, stage_version=''):
"""Returns the transkription_positions of the indicated stage_version.
"""
convertable_transkription_positions = transkription_positions
if stage_version != '':
convertable_transkription_positions = []
if re.match(r'^\d$', stage_version):
writing_process_id = int(stage_version)
for transkription_position in transkription_positions:
if transkription_position.writing_process_id == writing_process_id:
convertable_transkription_positions.append(transkription_position)
elif re.match(r'^\d\+$', stage_version):
version_range = [ *range(int(stage_version.replace('+','')), len(WritingProcess.VERSION_DESCRIPTION)) ]
for transkription_position in transkription_positions:
if transkription_position.writing_process_id in version_range:
convertable_transkription_positions.append(transkription_position)
elif re.match(r'^\d\-\d$', stage_version):
start_stop = [ int(i) for i in re.split(r'-', stage_version) ]
version_range = [ *range(start_stop[0], start_stop[1]+1) ]
for transkription_position in transkription_positions:
if transkription_position.writing_process_id in version_range:
convertable_transkription_positions.append(transkription_position)
return convertable_transkription_positions
def _get_words(self, words, highlighted_words=None):
"""Return the words that will be hightlighted.
"""
return highlighted_words if highlighted_words is not None else words
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Prints all words.
"""
first_word_of_line = None
out = sys.stdout
if output_file is not None:
out = open(output_file, 'w')
for word in self.page.words:
if first_word_of_line is None or first_word_of_line.line_number != word.line_number:
out.write('\n')
first_word_of_line = word
if word.line_number % 2 == 0:
out.write(str(word.line_number).zfill(2) + ' ')
else:
out.write(' ')
if stage_version == '' or len(self._get_transkription_positions(word.transkription_positions, stage_version=stage_version)) > 0:
if word.text is not None:
out.write(word.text + ' ')
out.close()
return 0
@classmethod
def CREATE_CONVERTER(cls, page, non_testing=True, converter_type='', show_word_insertion_mark=False, key=''):
"""Returns a converter of type converter_type.
[:return:] SVGConverter for 'SVG', HTMLConverter for 'HTML', Converter for None
"""
cls_dict = { subclass.__name__: subclass for subclass in cls.__subclasses__() }
cls_key = converter_type + 'Converter'
if bool(cls_dict.get(cls_key)):
converter_cls = cls_dict[cls_key]
if converter_cls == JSONConverter:
return converter_cls(page, non_testing, key=key)
return converter_cls(page, non_testing, show_word_insertion_mark)
else:
return Converter(page, non_testing, show_word_insertion_mark)
class JSONConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a json file.
"""
def __init__(self, page, non_testing=True, key=''):
Converter.__init__(self, page, non_testing, False)
def _add_word_to_list(self, words, word, text, text_field=None, edited_text=None, earlier_version=None, overwrites_word=None, parent_id=-1, faksimile_positions=None):
"""Add word to list.
"""
id = word.id\
if parent_id == -1\
else parent_id
edited_text = word.edited_text\
if edited_text is None\
else edited_text
earlier_version = word.earlier_version\
if earlier_version is None\
else earlier_version
overwrites_word = word.overwrites_word\
if overwrites_word is None\
else overwrites_word
line_number = word.line_number
for tp in word.transkription_positions:
tp_id = f'w{word.id}:tp{tp.id}'\
if parent_id == -1\
else f'w{parent_id}:w{word.id}:tp{tp.id}'
if text_field is not None:
word_dict = { 'id': id, 'text': text, 'left': tp.left + text_field.left, 'top': tp.top + text_field.top,\
'width': tp.width, 'height': tp.height, 'line': line_number, 'tp_id': tp_id, 'deleted': word.deleted }
if tp.transform is not None:
matrix = tp.transform.clone_transformation_matrix()
xmin = text_field.left
ymin = text_field.top
matrix.matrix[Matrix.XINDEX] = round(tp.transform.matrix[Matrix.XINDEX] + xmin, 3)
matrix.matrix[Matrix.YINDEX] = round(tp.transform.matrix[Matrix.YINDEX] + ymin, 3)
word_dict.update({ 'transform': matrix.toString() })
if tp.left > 0:
word_dict.update({ 'left': round(tp.left - tp.transform.matrix[Matrix.XINDEX], 3)})
else:
word_dict.update({ 'left': 0})
word_dict.update({ 'top': round((tp.height-1.5)*-1, 3)})
else:
word_dict = { 'id': id, 'text': text, 'left': tp.left, 'top': tp.top, 'width': tp.width,\
'height': tp.height, 'line': line_number, 'tp_id': tp_id, 'deleted': word.deleted }
if tp.transform is not None:
word_dict.update({ 'transform': tp.transform.toString() })
if edited_text is not None:
word_dict.update({'edited_text': edited_text})
if earlier_version is not None:
word_dict.update({'earlier_version': earlier_version.text })
if overwrites_word is not None:
word_dict.update({'overwrites_word': overwrites_word.text })
if parent_id > -1:
word_dict.update({'part_text': word.text })
if len(word.deletion_paths) > 0:
for dp_index, dp in enumerate(word.deletion_paths):
if bool(word_dict.get('deletion_path')):
word_dict = word_dict.copy()
word_dict.update({'deletion_path': dp.d_attribute})
words.append(word_dict)
if len(word.deletion_paths_near_word) > 0:
word_dict.update({'paths_near_word': word.deletion_paths_near_word })
words.append(word_dict)
else:
words.append(word_dict)
if faksimile_positions is not None:
faksimile_dict = {}
for fp in word.faksimile_positions:
faksimile_dict = { 'id': id, 'text': text, 'left': fp.left, 'top': fp.top,\
'width': fp.width, 'height': fp.height, 'line': line_number, 'fp_id': fp.id, 'deleted': word.deleted }
if fp.transform is not None:
faksimile_dict.update({ 'transform': fp.transform.toString() })
if len(faksimile_dict) > 0:
if edited_text is not None:
faksimile_dict.update({'edited_text': edited_text})
if earlier_version is not None:
faksimile_dict.update({'earlier_version': earlier_version.text })
if overwrites_word is not None:
faksimile_dict.update({'overwrites_word': overwrites_word.text })
if parent_id > -1:
faksimile_dict.update({'part_text': word.text })
faksimile_positions.append(faksimile_dict)
for wp in word.word_parts:
self._add_word_to_list(words, wp, text, text_field=text_field, edited_text=edited_text,\
earlier_version=earlier_version, overwrites_word=overwrites_word, parent_id=word.id, faksimile_positions=faksimile_positions)
def create_json_dict(self) ->dict:
"""Create and return a json dictionary.
"""
words = []
faksimile_positions = []
text_field = None
if self.page.svg_image is not None:
if self.page.svg_image.text_field is None:
text_field = self.page.svg_image.text_field = TranskriptionField(self.page.svg_image.file_name).convert_to_text_field()
#self.page.svg_image.decontextualize_file_name(update_url=EXIST_DB)
for word in self.page.words:
self._add_word_to_list(words, word, word.text, text_field=text_field, faksimile_positions=faksimile_positions)
lines = []
+ faksimile_lines = []
offset = 0 if text_field is None else text_field.ymin
svg_image = self.add_object2dict(self.page.svg_image)
if svg_image is not None:
svg_image.update({ 'URL': self.page.svg_image.primaryURL })
svg_image.update({ 'x': self.page.svg_image.text_field.left })
svg_image.update({ 'y': self.page.svg_image.text_field.top })
faksimile_image = self.add_object2dict(self.page.faksimile_image)
if faksimile_image is not None:
faksimile_image.update({ 'secondaryURL': LOCAL_SERVER + "faksimiles/" + self.page.faksimile_image.file_name })
faksimile_image.update({ 'x': 0 })
faksimile_image.update({ 'y': 0 })
- for line in self.page.lines: lines.append({ 'id': line.id, 'number': line.id, 'top': line.top + offset, 'bottom': line.bottom })
- return { 'title': self.page.title, 'number': self.page.number, 'words': words,\
- 'svg': svg_image, 'lines': lines, 'faksimile': faksimile_image, 'faksimile_positions': faksimile_positions }
+ for line in self.page.lines:
+ lines.append({ 'id': line.id, 'number': line.id, 'top': line.top + offset, 'bottom': line.bottom })
+ faksimile_lines.append({ 'id': line.id, 'number': line.id, 'top': line.faksimile_inner_top, 'bottom': line.faksimile_inner_bottom })
+ return { 'title': self.page.title, 'number': self.page.number, 'words': words, 'svg': svg_image, 'lines': lines,\
+ 'faksimile': faksimile_image, 'faksimile_positions': faksimile_positions, 'faksimile_lines': faksimile_lines }
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to JSON.
"""
if output_file is None:
output_file = 'output.json'
json_file = open(output_file, "w+")
try:
json.dump(self.create_json_dict(), json_file)
except Exception:
raise Exception('Error in json.dump')
json_file.close()
return 0
def add_object2dict(self, object_instance):
"""Add an object to json_dict and generate json data and interfaces.
[:return:] json dict or object_instance
"""
json_dict = {}
object_type = type(object_instance)
if object_type.__module__ == 'builtins':
if object_type != list:
return object_instance
else:
items = []
for item in object_instance:
items.append(self.add_object2dict(item))
if len(items) > 0:
return items
else:
return { self.key: [] }
semantic_dictionary = object_type.get_semantic_dictionary()
for key, content_type in [ (key, content.get('class')) for key, content in semantic_dictionary['properties'].items()]:
content = object_instance.__dict__.get(key)
if content_type == list\
and content is not None\
and len(content) > 0\
and type(content[0]).__module__ != 'builtins':
content_list = []
for content_item in content:
content_list.append(self.add_object2dict(content_item))
json_dict.update({key: content_list})
elif content_type.__module__ == 'builtins':
if content is not None:
json_dict.update({key: content})
else:
if content is not None and type(content) == list:
content_list = []
for content_item in content:
content_list.append(self.add_object2dict(content_item))
json_dict.update({key: content_list})
else:
if content is not None:
json_dict.update({key: self.add_object2dict(content)})
return json_dict
class oldJSONConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a json file.
"""
PY2TS_DICT = { float: 'number', int: 'number', bool: 'boolean', str: 'string' }
def __init__(self, page, non_testing=True, key=''):
Converter.__init__(self, page, non_testing, False)
self.key = key
self.interface_output_dir = PathLibPath('ts_interfaces')
if not self.interface_output_dir.is_dir():
self.interface_output_dir.mkdir()
elif len(list(self.interface_output_dir.glob('*.ts'))) > 0:
for ts_file in self.interface_output_dir.glob('*.ts'):
remove(ts_file)
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to JSON.
"""
if output_file is None:
output_file = 'output.json'
class_dict = {}
if self.key != '':
object_instance = self.page.__dict__.get(self.key)
if object_instance is not None:
json_dict = self.add_object2dict(object_instance, class_dict)
if type(json_dict) == list:
json_dict = { self.key : json_dict }
else:
print(f'Page initialized from {self.page.page_tree.docinfo.URL} does not have an object at "{self.key}"!')
return 2
else:
json_dict = self.add_object2dict(self.page, class_dict)
json_file = open(output_file, "w+")
try:
json.dump(json_dict, json_file)
except Exception:
raise Exception('Error in json.dump')
json_file.close()
self.create_imports(class_dict)
return 0
def add_object2dict(self, object_instance, class_dict):
"""Add an object to json_dict and generate json data and interfaces.
[:return:] json dict or object_instance
"""
json_dict = {}
interface_list = []
object_type = type(object_instance)
if object_type.__module__ == 'builtins':
if object_type != list:
return object_instance
else:
items = []
for item in object_instance:
items.append(self.add_object2dict(item, class_dict))
if len(items) > 0:
return { self.key: items }
else:
return { self.key: 'null' }
semantic_dictionary = object_type.get_semantic_dictionary()
for key, content_type in [ (key, content.get('class')) for key, content in semantic_dictionary['properties'].items()]:
content = object_instance.__dict__.get(key)
if content_type == list\
and content is not None\
and len(content) > 0\
and type(content[0]).__module__ != 'builtins':
content_list = []
for content_item in content:
content_list.append(self.add_object2dict(content_item, class_dict))
json_dict.update({key: content_list})
interface_list.append(f'{key}: {type(content[0]).__name__}[];')
elif content_type.__module__ == 'builtins':
if content_type != list:
ts_type = self.PY2TS_DICT[content_type]\
if content_type in self.PY2TS_DICT.keys()\
else 'string'
interface_list.append(f'{key}: {ts_type};')
json_dict.update({key: content})
else:
if content is not None and type(content) == list:
interface_list.append(f'{key}: {content_type.__name__}[];')
content_list = []
for content_item in content:
content_list.append(self.add_object2dict(content_item, class_dict))
json_dict.update({key: content_list})
else:
interface_list.append(f'{key}: {content_type.__name__};')
if content is not None:
json_dict.update({key: self.add_object2dict(content, class_dict)})
if object_type not in class_dict.keys():
class_dict.update({object_type: self.create_interface(object_type.__name__, interface_list)})
return json_dict
def create_imports(self, class_dict):
"""Create an ts interface from a list of key and content_types.
[:return:] file_name of interface
"""
ts_file = PathLibPath('ts_imports.ts')
file = open(ts_file, "w+")
file.write(f'//import all interfaces from {self.interface_output_dir} ' + '\n')
for interface_name, path_name in class_dict.items() :
file.write('import {' + interface_name.__name__ + '} from \'./' + str(self.interface_output_dir.joinpath(path_name.stem)) + '\';\n')
file.close()
return ts_file
def create_interface(self, class_name, interface_list) -> PathLibPath:
"""Create an ts interface from a list of key and content_types.
[:return:] file_name of interface
"""
ts_file = self.interface_output_dir.joinpath(PathLibPath(f'{class_name.lower()}.ts'))
import_list = [ import_class_name for import_class_name in\
[ import_class_name.split(': ')[1].replace(';','').replace('[]','') for import_class_name in interface_list ]\
if import_class_name not in set(self.PY2TS_DICT.values()) ]
file = open(ts_file, "w")
for import_class_name in set(import_list):
file.write('import {' + import_class_name + '} from \'./' + import_class_name.lower() + '\';\n')
file.write(f'export interface {class_name} ' + '{\n')
for interace_string in interface_list:
file.write(f'\t' + interace_string + '\n')
file.write('}')
file.close()
return ts_file
class SVGConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a svg file that combines text as path and text-as-text.
"""
BG_COLOR = 'yellow'
OPACITY = '0.2'
def __init__(self, page, non_testing=True, show_word_insertion_mark=False, bg_color=BG_COLOR, opacity=OPACITY):
Converter.__init__(self, page, non_testing, show_word_insertion_mark)
self.bg_color = bg_color
self.opacity = opacity
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to SVG
"""
title = self.page.title if(self.page.title is not None) else 'Test Page'
title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title
svg_file = self.page.svg_file
if svg_file is None and self.page.svg_image is not None:
svg_file = self.page.svg_image.file_name
elif svg_file is None:
msg = f'ERROR: xml_source_file {self.page.docinfo.URL} does neither have a svg_file nor a svg_image!'
raise Exception(msg)
transkription_field = TranskriptionField(svg_file)
if bool(transkription_field.get_svg_attributes('xmlns')):
ET.register_namespace('', transkription_field.get_svg_attributes('xmlns'))
if bool(transkription_field.get_svg_attributes('xmlns:xlink')):
ET.register_namespace('xlink', transkription_field.get_svg_attributes('xmlns:xlink'))
svg_tree = ET.parse(svg_file)
transkription_node = ET.SubElement(svg_tree.getroot(), 'g', attrib={'id': 'Transkription'})
colors = [ 'yellow', 'orange' ] if self.bg_color == self.BG_COLOR else [ self.bg_color ]
if highlighted_words is not None:
colors = ['yellow']
else:
highlighted_words = []
color_index = 0
for word in self.page.words:
word_id = 'word_' + str(word.id)
for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version):
transkription_position_id = word_id + '_' + str(transkription_position.id)
color = colors[color_index] if word not in highlighted_words else self.bg_color
rect_node = ET.SubElement(transkription_node, 'rect',\
attrib={'id': transkription_position_id, 'x': str(transkription_position.left + transkription_field.xmin),\
'y': str(transkription_position.top + transkription_field.ymin), 'width': str(transkription_position.width),\
'height': str(transkription_position.height), 'fill': color, 'opacity': self.opacity})
if transkription_position.transform is not None:
matrix = transkription_position.transform.clone_transformation_matrix()
matrix.matrix[Matrix.XINDEX] = round(transkription_position.transform.matrix[Matrix.XINDEX] + transkription_field.xmin, 3)
matrix.matrix[Matrix.YINDEX] = round(transkription_position.transform.matrix[Matrix.YINDEX] + transkription_field.ymin, 3)
rect_node.set('transform', matrix.toString())
rect_node.set('x', str(round(transkription_position.left - transkription_position.transform.matrix[Matrix.XINDEX], 3)))
rect_node.set('y', str(round((transkription_position.height-1.5)*-1, 3)))
ET.SubElement(rect_node, 'title').text = word.text
color_index = (color_index + 1) % len(colors)
if output_file is not None:
svg_tree.write(output_file)
return 0
class HTMLConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a test HTML file.
"""
CSS = """ .highlight0 { background-color: yellow; opacity: 0.2; }
.highlight1 { background-color: pink; opacity: 0.2; }
.highlight2 { background-color: red; opacity: 0.2; }
.foreign { background-color: blue; opacity: 0.4; }
.overwritten { background-color: green; opacity: 0.4; }
.word-insertion-mark { background-color: orange; opacity: 0.2; }
.deleted { background-color: grey; opacity: 0.2; }
"""
def __init__(self, page, non_testing=True, show_word_insertion_mark=False):
Converter.__init__(self, page, non_testing, show_word_insertion_mark)
self.text_field = TextField()
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to HTML
"""
title = self.page.title if(self.page.title is not None) else 'Test Page'
title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title
if stage_version != '':
title = title + ', Schreibstufe: ' + stage_version
if self.page.svg_image is not None:
width = self.page.svg_image.width
height = self.page.svg_image.height
svg_file = self.page.svg_image.file_name
if self.page.svg_image.text_field is not None:
self.text_field = self.page.svg_image.text_field
print('Textfield found ->adjusting data')
elif self.page.svg_file is not None:
svg_file = self.page.svg_file
transkription_field = TranskriptionField(svg_file)
width = transkription_field.getWidth()
height = transkription_field.getHeight()
style_content = ' position: relative; width: {}px; height: {}px; background-image: url("{}"); background-size: {}px {}px '\
.format(width, height, path.abspath(svg_file), width, height)
style = E.STYLE('#transkription {' + style_content + '}', HTMLConverter.CSS)
head = E.HEAD(E.TITLE(title),E.META(charset='UTF-8'), style)
transkription = E.DIV(id="transkription")
counter = 0
for word in self.page.words:
highlight_class = 'highlight' + str(counter)\
if not word.deleted else 'deleted'
if highlighted_words is not None\
and word in highlighted_words:
highlight_class = 'highlight2'
earlier_text = '' if word.earlier_version is None else word.earlier_version.text
if earlier_text == '' and len(word.word_parts) > 0:
earlier_versions = [ word for word in word.word_parts if word.earlier_version is not None ]
earlier_text = earlier_versions[0].text if len(earlier_versions) > 0 else ''
if earlier_text != '':
word_title = 'id: {}/line: {}\n0: {}\n1: {}'.format(str(word.id), str(word.line_number), earlier_text, word.text)
else:
word_title = 'id: {}/line: {}\n{}'.format(str(word.id), str(word.line_number), word.text)
if word.edited_text is not None:
word_title += f'\n>{word.edited_text}'
for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version):
self._append2transkription(transkription, highlight_class, word_title, transkription_position)
if word.overwrites_word is not None:
overwritten_title = f'{word.text} overwrites {word.overwrites_word.text}'
for overwritten_transkription_position in word.overwrites_word.transkription_positions:
self._append2transkription(transkription, 'overwritten', overwritten_title, overwritten_transkription_position)
for part_word in word.word_parts:
highlight_class = 'highlight' + str(counter)\
if not part_word.deleted else 'deleted'
for part_transkription_position in self._get_transkription_positions(part_word.transkription_positions, stage_version=stage_version):
self._append2transkription(transkription, highlight_class, word_title, part_transkription_position)
if part_word.overwrites_word is not None:
overwritten_title = f'{word.text} overwrites {part_word.overwrites_word.text}'
for overwritten_transkription_position in part_word.overwrites_word.transkription_positions:
self._append2transkription(transkription, 'overwritten', overwritten_title, overwritten_transkription_position)
counter = (counter + 1) % 2
word_insertion_mark_class = 'word-insertion-mark'
counter = 0
for mark_foreign_hands in self.page.mark_foreign_hands:
highlight_class = 'foreign'
title = 'id: {}/line: {}\n{} {}'.format(str(mark_foreign_hands.id), str(mark_foreign_hands.line_number),\
mark_foreign_hands.foreign_hands_text, mark_foreign_hands.pen)
for transkription_position in mark_foreign_hands.transkription_positions:
self._append2transkription(transkription, highlight_class, title, transkription_position)
if self.show_word_insertion_mark:
for word_insertion_mark in self.page.word_insertion_marks:
wim_title = 'id: {}/line: {}\nword insertion mark'.format(str(word_insertion_mark.id), str(word_insertion_mark.line_number))
style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\
word_insertion_mark.top, word_insertion_mark.left, word_insertion_mark.width, word_insertion_mark.height)
link = E.A(' ', E.CLASS(word_insertion_mark_class), title=wim_title, style=style_content)
transkription.append(link)
html = E.HTML(head,E.BODY(transkription))
bool(self.non_testing) and open_in_browser(html)
if output_file is not None:
with open(output_file, 'wb') as f:
f.write(lxml.html.tostring(html, pretty_print=True, include_meta_content_type=True, encoding='utf-8'))
f.closed
return 0
def _append2transkription(self, transkription, highlight_class, title, transkription_position):
"""Append content to transkription-div.
"""
style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\
transkription_position.top - self.text_field.top, transkription_position.left - self.text_field.left, transkription_position.width, transkription_position.height)
if transkription_position.transform is not None:
style_content = style_content + ' transform: {}; '.format(transkription_position.transform.toCSSTransformString())
transform_origin_x = (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1\
if (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1 < 0 else 0
style_content = style_content + ' transform-origin: {}px {}px; '.format(transform_origin_x, transkription_position.height)
link = E.A(' ', E.CLASS(highlight_class), title=title, style=style_content)
transkription.append(link)
def create_pdf_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, pdf_file_name='output.pdf', bg_color=SVGConverter.BG_COLOR):
"""Creates a pdf file highlighting some words.
"""
if not pdf_file_name.endswith('pdf'):
pdf_file_name = pdf_file_name + '.pdf'
tmp_svg_file = pdf_file_name.replace('.pdf', '.svg')
create_svg_with_highlighted_words(xml_source_file=xml_source_file, page=page, highlighted_words=highlighted_words,\
svg_file_name=tmp_svg_file, bg_color=bg_color)
if isfile(tmp_svg_file):
cairosvg.svg2pdf(url=tmp_svg_file, write_to=pdf_file_name)
remove(tmp_svg_file)
def create_svg_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, svg_file_name='output.svg', bg_color=SVGConverter.BG_COLOR):
"""Creates a svg file highlighting some words.
"""
if page is None and xml_source_file is not None:
page = Page(xml_source_file)
converter = SVGConverter(page, bg_color=bg_color)
if not svg_file_name.endswith('svg'):
svg_file_name = svg_file_name + '.svg'
converter.convert(output_file=svg_file_name, highlighted_words=highlighted_words)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to convert the word positions to HTML, SVG or TEXT for testing purposes.
svgscripts/convert_wordPositions.py OPTIONS
OPTIONS:
-h|--help: show help
-H|--HTML [default] convert to HTML test file
-k|--key=key option for json converter:
only convert object == page.__dict__[key]
-o|--output=outputFile save output to file outputFile
-P|--PDF convert to PDF test file
-S|--SVG convert to SVG test file
-s|--svg=svgFile: svg web file
-T|--TEXT convert to TEXT output
-t|--text=text highlight word
-w|--word-insertion-mark show word insertion mark on HTML
-v|--version=VERSION show words that belong to writing process VERSION: { 0, 1, 2, 0-1, 0+, etc. }
-x|--testing execute in test mode, do not write to file or open browser
:return: exit code (int)
"""
convert_to_type = None
key = ''
non_testing = True
output_file = None
page = None
show_word_insertion_mark = False
stage_version = ''
svg_file = None
text = None
try:
opts, args = getopt.getopt(argv, "hk:t:HPSTws:o:v:x", ["help", "key=", "text=", "HTML", "PDF", "SVG", "TEXT", "word-insertion-mark", "svg=", "output=", "version=", "testing"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help') or not args:
usage()
return 0
elif opt in ('-v', '--version'):
if re.match(r'^(\d|\d\+|\d\-\d)$', arg):
stage_version = arg
else:
raise ValueError('OPTION -v|--version=VERSION does not work with "{}" as value for VERSION!'.format(arg))
elif opt in ('-w', '--word-insertion-mark'):
show_word_insertion_mark = True
elif opt in ('-P', '--PDF'):
convert_to_type = 'PDF'
elif opt in ('-S', '--SVG'):
convert_to_type = 'SVG'
elif opt in ('-T', '--TEXT'):
convert_to_type = 'TEXT'
elif opt in ('-H', '--HTML'):
convert_to_type = 'HTML'
elif opt in ('-x', '--testing'):
non_testing = False
elif opt in ('-s', '--svg'):
svg_file = arg
elif opt in ('-o', '--output'):
output_file = arg
elif opt in ('-k', '--key'):
key = arg
elif opt in ('-t', '--text'):
text = arg
print(arg)
if len(args) < 1:
usage()
return 2
if convert_to_type is None:
if output_file is not None and len(re.split(r'\.', output_file)) > 1:
output_file_part_list = re.split(r'\.', output_file)
convert_to_type = output_file_part_list[len(output_file_part_list)-1].upper()
else:
convert_to_type = 'HTML'
exit_code = 0
for word_position_file in args:
if not isfile(word_position_file):
print("'{}' does not exist!".format(word_position_file))
return 2
if convert_to_type == 'PDF':
if output_file is None:
output_file = 'output.pdf'
highlighted_words = None
if text is not None:
page = Page(word_position_file)
highlighted_words = [ word for word in page.words if word.text == text ]
create_pdf_with_highlighted_words(word_position_file, pdf_file_name=output_file, highlighted_words=highlighted_words)
else:
if svg_file is not None:
if isfile(svg_file):
page = PageCreator(word_position_file, svg_file=svg_file)
else:
print("'{}' does not exist!".format(word_position_file))
return 2
else:
page = Page(word_position_file)
if page.svg_file is None:
print('Please specify a svg file!')
usage()
return 2
highlighted_words = None
if text is not None:
highlighted_words = [ word for word in page.words if word.text == text ]
print([ (word.id, word.text) for word in highlighted_words ])
converter = Converter.CREATE_CONVERTER(page, non_testing=non_testing, converter_type=convert_to_type, show_word_insertion_mark=show_word_insertion_mark, key=key)
exit_code = converter.convert(output_file=output_file, stage_version=stage_version, highlighted_words=highlighted_words)
return exit_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/datatypes/reconstructed_konvolut.py
===================================================================
--- svgscripts/datatypes/reconstructed_konvolut.py (revision 0)
+++ svgscripts/datatypes/reconstructed_konvolut.py (revision 106)
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+""" This class can be used to represent a reconstruction of an original manuscript (e.g. a workbook or notebook).
+"""
+# Copyright (C) University of Basel 2019 {{{1
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see 1}}}
+
+__author__ = "Christian Steiner"
+__maintainer__ = __author__
+__copyright__ = 'University of Basel'
+__email__ = "christian.steiner@unibas.ch"
+__status__ = "Development"
+__license__ = "GPL v3"
+__version__ = "0.0.1"
+
+import abc
+from lxml import etree as ET
+from os.path import isfile
+import requests
+import sys
+
+from .description import Description
+from .faksimile_image import FaksimileImage
+from .manuscript import ManuscriptUnity
+from .page import Page, FILE_TYPE_XML_MANUSCRIPT, FILE_TYPE_SVG_WORD_POSITION
+
+
+sys.path.append('shared_util')
+from myxmlwriter import parse_xml_of_type, write_pretty, xml_has_type
+
+class NonExistentPage(Page):
+ """This class represents a page that does not exist as part of the KGW edition.
+ @label non existent page
+
+ """
+ NIETZSCHE_SOURCES_URL = 'http://www.nietzschesource.org/DFGAapi/api/fe/facsimile/'
+ def __init__(self, number=None, faksimile_image=None, status=None):
+ self.number = number
+ self.status = status
+ self.faksimile_image = faksimile_image
+
+ @classmethod
+ def create_cls(cls, page_node, faksimile_image=None):
+ """
+ Create an instance of NonExistentPage from a page_node
+
+ :return: NonExistentPage
+ """
+ number = page_node.get('title') + '_' + page_node.get('number')\
+ if bool(page_node.get('title'))\
+ else page_node.get('number')
+ return cls(number=number, status=page_node.get('status'), faksimile_image=faksimile_image)
+
+ def get_name_and_id(self):
+ """Return an identification for object as 2-tuple.
+ """
+ return type(self).__name__, self.number.replace(' ', '_')
+
+ @classmethod
+ def get_semantic_dictionary(cls):
+ """ Creates and returns a semantic dictionary as specified by SemanticClass.
+ """
+ dictionary = super(NonExistentPage,cls).get_semantic_dictionary()
+ dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('status', str))
+ return cls.return_dictionary_after_updating_super_classes(dictionary)
+
+class ReconstructedKonvolut(ManuscriptUnity):
+ """
+ This class represents a reconstruction of an original manuscript (e.g. a workbook or notebook).
+ @label reconstruction of an origianl manuscript
+
+ Args:
+ title title for identification of the reconstruction
+ manuscript_type type of manuscript: 'Arbeitsheft' or 'Notizheft'
+ manuscript_tree lxml.ElementTree
+ """
+ XML_TAG = 'reconstructed-konvolut'
+ TYPE_DICTIONARY = { 'R_n': 'Notizheft', 'R_w': 'Arbeitsheft' }
+ UNITTESTING = False
+
+ def __init__(self, title='', manuscript_type='', manuscript_tree=None):
+ super(ReconstructedKonvolut,self).__init__(title=title, manuscript_type=manuscript_type,manuscript_tree=manuscript_tree)
+
+ @classmethod
+ def create_cls(cls, xml_manuscript_file, page_status_list=None, page_xpath=''):
+ """Create an instance of ReconstructedKonvolut from a xml file of type FILE_TYPE_XML_MANUSCRIPT.
+
+ :return: ReconstructedKonvolut
+ """
+ manuscript = super(ReconstructedKonvolut,cls).create_cls(xml_manuscript_file)
+ manuscript_tree = manuscript.manuscript_tree
+ if page_xpath == '':
+ page_status = ''
+ if page_status_list is not None\
+ and type(page_status_list) is list\
+ and len(page_status_list) > 0:
+ page_status = '[' + ' and '.join([ f'contains(@status, "{status}")' for status in page_status_list ]) + ']'
+ page_xpath = f'//pages/page{page_status}/@output'
+ included_page_list = [ page_source\
+ for page_source in manuscript_tree.xpath(page_xpath)\
+ if isfile(page_source) and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_source) ]
+ for page_node in manuscript_tree.xpath('//pages/page'):
+ if bool(page_node.get('output'))\
+ and isfile(page_node.get('output'))\
+ and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_node.get('output')):
+ manuscript.pages.append(Page.create_cls(\
+ page_node.get('output'), create_dummy_page=(page_node.get('output') not in included_page_list)))
+ else:
+ faksimile_image = get_or_update_faksimile(xml_manuscript_file, page_node)
+ manuscript.pages.append(NonExistentPage.create_cls(page_node, faksimile_image))
+ manuscript.description = Description.create_cls_from_node(manuscript_tree.xpath(Description.XML_TAG)[0])\
+ if len(manuscript_tree.xpath(Description.XML_TAG)) > 0\
+ else None
+ return manuscript
+
+def get_or_update_faksimile(xml_source_file, page_node) ->FaksimileImage:
+ """Return the faksimile image of the non existent page.
+ """
+ faksimile_image = None
+ if len(page_node.xpath(f'./{FaksimileImage.XML_TAG}')) > 0:
+ faksimile_image = FaksimileImage(node=page_node.xpath(f'./{FaksimileImage.XML_TAG}')[0])
+ elif bool(page_node.get('alias')):
+ url = NonExistentPage.NIETZSCHE_SOURCES_URL + page_node.get('alias')
+ faksimile_dict = None
+ try:
+ r = requests.get(url)
+ faksimile_dict = r.json()
+ except Exception:
+ print(f'URL does not work: {url}')
+ if faksimile_dict is not None and len(faksimile_dict) > 0:
+ width = faksimile_dict['imageWidth']
+ height = faksimile_dict['imageHeight']
+ file_name = page_node.get('alias') + '.jpg'
+ URL = FaksimileImage.NIETZSCHE_SOURCES_URL + page_node.get('alias')
+ faksimile_image = FaksimileImage(file_name=file_name, URL=URL, height=height, width=width)
+ faksimile_image.attach_object_to_tree(page_node)
+ write_pretty(xml_element_tree=page_node.getroottree(), file_name=xml_source_file, script_name=__file__,\
+ file_type=FILE_TYPE_XML_MANUSCRIPT, backup=True)
+ return faksimile_image
+
Index: svgscripts/datatypes/manuscript.py
===================================================================
--- svgscripts/datatypes/manuscript.py (revision 105)
+++ svgscripts/datatypes/manuscript.py (revision 106)
@@ -1,164 +1,99 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent an archival unity of manuscript pages, i.e. workbooks, notebooks, folders of handwritten pages.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
import sys
from .color import Color
from .description import Description
from .earlier_description import EarlierDescription
from .page import Page, FILE_TYPE_XML_MANUSCRIPT, FILE_TYPE_SVG_WORD_POSITION
sys.path.append('py2ttl')
from class_spec import SemanticClass
sys.path.append('shared_util')
from myxmlwriter import parse_xml_of_type, write_pretty, xml_has_type
-class ArchivalManuscriptUnity(SemanticClass):
+class ManuscriptUnity(SemanticClass):
"""
- This class represents an archival unity of manuscript pages (workbooks, notebooks and portfolios of handwritten pages).
- @label archival unity of manuscript pages
+ This class represents a unity of manuscript pages, i.e. handwritten pages that are somehow united.
+ @label unity of manuscript pages
Args:
- title title of archival unity
- manuscript_type type of manuscript: 'Arbeitsheft', 'Notizheft', 'Mappe'
+ title title of unity
manuscript_tree lxml.ElementTree
"""
XML_TAG = 'manuscript'
- XML_COLORS_TAG = 'colors'
TYPE_DICTIONARY = { 'Mp': 'Mappe', 'N': 'Notizheft', 'W': 'Arbeitsheft' }
UNITTESTING = False
- def __init__(self, title='', manuscript_type='', manuscript_tree=None):
- self.colors = []
- self.earlier_descriptions = []
+ def __init__(self, title='', manuscript_type='',manuscript_tree=None):
self.description = None
self.manuscript_tree = manuscript_tree
self.manuscript_type = manuscript_type
self.pages = []
- self.styles = []
self.title = title
if self.manuscript_type == '' and self.title != ''\
- and self.title.split(' ')[0] in self.TYPE_DICTIONARY.keys():
+ and self.title.split(' ')[0] in self.TYPE_DICTIONARY.keys():
self.manuscript_type = self.TYPE_DICTIONARY[self.title.split(' ')[0]]
+
def get_name_and_id(self):
"""Return an identification for object as 2-tuple.
"""
return '', self.title.replace(' ', '_')
@classmethod
- def create_cls(cls, xml_manuscript_file, page_status_list=None, page_xpath='', update_page_styles=False):
- """Create an instance of ArchivalManuscriptUnity from a xml file of type FILE_TYPE_XML_MANUSCRIPT.
+ def create_cls(cls, xml_manuscript_file):
+ """Create an instance of ManuscriptUnity from a xml file of type FILE_TYPE_XML_MANUSCRIPT.
- :return: ArchivalManuscriptUnity
+ :return: ManuscriptUnity
"""
manuscript_tree = parse_xml_of_type(xml_manuscript_file, FILE_TYPE_XML_MANUSCRIPT)
title = manuscript_tree.getroot().get('title') if bool(manuscript_tree.getroot().get('title')) else ''
manuscript_type = manuscript_tree.getroot().get('type') if bool(manuscript_tree.getroot().get('type')) else ''
- manuscript = cls(title=title, manuscript_type=manuscript_type, manuscript_tree=manuscript_tree)
- manuscript.colors = [ Color.create_cls(node=color_node) for color_node in manuscript_tree.xpath('.//' + cls.XML_COLORS_TAG + '/' + Color.XML_TAG) ]
- if page_xpath == '':
- page_status = ''
- if page_status_list is not None\
- and type(page_status_list) is list\
- and len(page_status_list) > 0:
- page_status = '[' + ' and '.join([ f'contains(@status, "{status}")' for status in page_status_list ]) + ']'
- page_xpath = f'//pages/page{page_status}/@output'
- included_page_list = [ page_source\
- for page_source in manuscript_tree.xpath(page_xpath)\
- if isfile(page_source) and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_source) ]
- manuscript.pages = [ Page.create_cls(page_source, create_dummy_page=(page_source not in included_page_list))\
- for page_source in manuscript_tree.xpath('//pages/page/@output')\
- if isfile(page_source) and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_source) ]
- if update_page_styles:
- for page in manuscript.pages:
- if 'xml_file' in page.__dict__.keys():
- page.update_styles(manuscript=manuscript, add_to_parents=True, create_css=True)
- description_node = manuscript_tree.xpath(Description.XML_TAG)[0]\
- if len(manuscript_tree.xpath(Description.XML_TAG)) > 0\
- else None
- if description_node is not None:
- manuscript.description = Description.create_cls_from_node(description_node.xpath(Description.ROOT_TAG)[0])\
- if len(description_node.xpath(Description.ROOT_TAG)) > 0\
- else None
- for earlier_description_node in description_node.xpath(EarlierDescription.ROOT_TAG):
- earlier_description = EarlierDescription.create_cls_from_node(earlier_description_node)
- if earlier_description is not None:
- manuscript.earlier_descriptions.append(earlier_description)
- return manuscript
-
- def get_color(self, hex_color) -> Color:
- """Return color if it exists or None.
- """
- if hex_color in [ color.hex_color for color in self.colors ]:
- return [ color for color in self.colors if color.hex_color == hex_color ][0]
- return None
+ return cls(title=title, manuscript_type=manuscript_type, manuscript_tree=manuscript_tree)
@classmethod
def get_semantic_dictionary(cls):
""" Creates a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = {}
properties.update(cls.create_semantic_property_dictionary('title', str, 1))
properties.update(cls.create_semantic_property_dictionary('manuscript_type', str, 1))
- properties.update(cls.create_semantic_property_dictionary('styles', list))
properties.update(cls.create_semantic_property_dictionary('pages', list))
properties.update(cls.create_semantic_property_dictionary('description', Description))
- properties.update(cls.create_semantic_property_dictionary('earlier_descriptions', EarlierDescription))
dictionary.update({cls.CLASS_KEY: class_dict})
dictionary.update({cls.PROPERTIES_KEY: properties})
return cls.return_dictionary_after_updating_super_classes(dictionary)
- def update_colors(self, color):
- """Update manuscript colors if color is not contained.
- """
- if self.get_color(color.hex_color) is None:
- self.colors.append(color)
- if self.manuscript_tree is not None:
- if len(self.manuscript_tree.xpath('.//' + self.XML_COLORS_TAG)) > 0:
- self.manuscript_tree.xpath('.//' + self.XML_COLORS_TAG)[0].getparent().remove(self.manuscript_tree.xpath('.//' + self.XML_COLORS_TAG)[0])
- colors_node = ET.SubElement(self.manuscript_tree.getroot(), self.XML_COLORS_TAG)
- for color in self.colors:
- color.attach_object_to_tree(colors_node)
- if not self.UNITTESTING:
- write_pretty(xml_element_tree=self.manuscript_tree, file_name=self.manuscript_tree.docinfo.URL,\
- script_name=__file__, backup=True,\
- file_type=FILE_TYPE_XML_MANUSCRIPT)
- def update_styles(self, *styles):
- """Update manuscript styles.
- """
- for style in styles:
- if style not in self.styles:
- #print(style.css_styles)
- self.styles.append(style)
Index: svgscripts/datatypes/archival_manuscript.py
===================================================================
--- svgscripts/datatypes/archival_manuscript.py (revision 0)
+++ svgscripts/datatypes/archival_manuscript.py (revision 106)
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+""" This class can be used to represent an archival unity of manuscript pages, i.e. workbooks, notebooks, folders of handwritten pages.
+"""
+# Copyright (C) University of Basel 2019 {{{1
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see 1}}}
+
+__author__ = "Christian Steiner"
+__maintainer__ = __author__
+__copyright__ = 'University of Basel'
+__email__ = "christian.steiner@unibas.ch"
+__status__ = "Development"
+__license__ = "GPL v3"
+__version__ = "0.0.1"
+
+from lxml import etree as ET
+from os.path import isfile
+import sys
+
+from .color import Color
+from .description import Description
+from .earlier_description import EarlierDescription
+from .manuscript import ManuscriptUnity
+from .page import Page, FILE_TYPE_XML_MANUSCRIPT, FILE_TYPE_SVG_WORD_POSITION
+from .reconstructed_konvolut import ReconstructedKonvolut
+
+
+sys.path.append('py2ttl')
+from class_spec import SemanticClass
+
+sys.path.append('shared_util')
+from myxmlwriter import parse_xml_of_type, write_pretty, xml_has_type
+
+
+class ArchivalManuscriptUnity(ManuscriptUnity):
+ """
+ This class represents an archival unity of manuscript pages (workbooks, notebooks and portfolios of handwritten pages).
+ @label archival unity of manuscript pages
+
+ Args:
+ title title of archival unity
+ manuscript_type type of manuscript: 'Arbeitsheft', 'Notizheft', 'Mappe'
+ manuscript_tree lxml.ElementTree
+ """
+ XML_TAG = 'manuscript'
+ XML_COLORS_TAG = 'colors'
+ UNITTESTING = False
+
+ def __init__(self, title='', manuscript_type='', manuscript_tree=None):
+ super(ArchivalManuscriptUnity,self).__init__(title=title, manuscript_type=manuscript_type,manuscript_tree=manuscript_tree)
+ self.colors = []
+ self.earlier_descriptions = []
+ self.reconstructed_konvoluts = []
+ self.styles = []
+
+ @classmethod
+ def create_cls(cls, xml_manuscript_file, page_status_list=None, page_xpath='', update_page_styles=False):
+ """Create an instance of ArchivalManuscriptUnity from a xml file of type FILE_TYPE_XML_MANUSCRIPT.
+
+ :return: ArchivalManuscriptUnity
+ """
+ manuscript = super(ArchivalManuscriptUnity,cls).create_cls(xml_manuscript_file)
+ manuscript_tree = manuscript.manuscript_tree
+ manuscript.colors = [ Color.create_cls(node=color_node) for color_node in manuscript_tree.xpath('.//' + cls.XML_COLORS_TAG + '/' + Color.XML_TAG) ]
+ if page_xpath == '':
+ page_status = ''
+ if page_status_list is not None\
+ and type(page_status_list) is list\
+ and len(page_status_list) > 0:
+ page_status = '[' + ' and '.join([ f'contains(@status, "{status}")' for status in page_status_list ]) + ']'
+ page_xpath = f'//pages/page{page_status}/@output'
+ included_page_list = [ page_source\
+ for page_source in manuscript_tree.xpath(page_xpath)\
+ if isfile(page_source) and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_source) ]
+ manuscript.pages = [ Page.create_cls(page_source, create_dummy_page=(page_source not in included_page_list))\
+ for page_source in manuscript_tree.xpath('//pages/page/@output')\
+ if isfile(page_source) and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_source) ]
+ if update_page_styles:
+ for page in manuscript.pages:
+ if 'xml_file' in page.__dict__.keys():
+ page.update_styles(manuscript=manuscript, add_to_parents=True, create_css=True)
+ description_node = manuscript_tree.xpath(Description.XML_TAG)[0]\
+ if len(manuscript_tree.xpath(Description.XML_TAG)) > 0\
+ else None
+ if description_node is not None:
+ manuscript.description = Description.create_cls_from_node(description_node.xpath(Description.ROOT_TAG)[0])\
+ if len(description_node.xpath(Description.ROOT_TAG)) > 0\
+ else None
+ for earlier_description_node in description_node.xpath(EarlierDescription.ROOT_TAG):
+ earlier_description = EarlierDescription.create_cls_from_node(earlier_description_node)
+ if earlier_description is not None:
+ manuscript.earlier_descriptions.append(earlier_description)
+ manuscript.reconstructed_konvoluts = [ ReconstructedKonvolut.create_cls(rk_node.get('output'), page_status_list=page_status_list, page_xpath=page_xpath)\
+ for rk_node in manuscript_tree.xpath(ReconstructedKonvolut.XML_TAG) ]
+ return manuscript
+
+ def get_color(self, hex_color) -> Color:
+ """Return color if it exists or None.
+ """
+ if hex_color in [ color.hex_color for color in self.colors ]:
+ return [ color for color in self.colors if color.hex_color == hex_color ][0]
+ return None
+
+ @classmethod
+ def get_semantic_dictionary(cls):
+ """ Creates a semantic dictionary as specified by SemanticClass.
+ """
+ dictionary = super(ArchivalManuscriptUnity,cls).get_semantic_dictionary()
+ dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('styles', list))
+ dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('reconstructed_konvoluts', ReconstructedKonvolut,\
+ name='partsBelongToReconstructedKonvolut',label='parts of manuscript belong to reconstructed convolut',\
+ comment='Some of the pages of this manuscript belong to a reconstructed convolut of pages.'))
+ dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('earlier_descriptions', EarlierDescription))
+ return cls.return_dictionary_after_updating_super_classes(dictionary)
+
+ def update_colors(self, color):
+ """Update manuscript colors if color is not contained.
+ """
+ if self.get_color(color.hex_color) is None:
+ self.colors.append(color)
+ if self.manuscript_tree is not None:
+ if len(self.manuscript_tree.xpath('.//' + self.XML_COLORS_TAG)) > 0:
+ self.manuscript_tree.xpath('.//' + self.XML_COLORS_TAG)[0].getparent().remove(self.manuscript_tree.xpath('.//' + self.XML_COLORS_TAG)[0])
+ colors_node = ET.SubElement(self.manuscript_tree.getroot(), self.XML_COLORS_TAG)
+ for color in self.colors:
+ color.attach_object_to_tree(colors_node)
+ if not self.UNITTESTING:
+ write_pretty(xml_element_tree=self.manuscript_tree, file_name=self.manuscript_tree.docinfo.URL,\
+ script_name=__file__, backup=True,\
+ file_type=FILE_TYPE_XML_MANUSCRIPT)
+
+ def update_styles(self, *styles):
+ """Update manuscript styles.
+ """
+ for style in styles:
+ if style not in self.styles:
+ #print(style.css_styles)
+ self.styles.append(style)
Index: svgscripts/datatypes/page.py
===================================================================
--- svgscripts/datatypes/page.py (revision 105)
+++ svgscripts/datatypes/page.py (revision 106)
@@ -1,402 +1,406 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent a page.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile, basename
from progress.bar import Bar
from svgpathtools import svg2paths2, svg_to_paths
from svgpathtools.parser import parse_path
import re
import sys
import warnings
from .box import Box
from .color import Color
from .image import Image, SVGImage
from .faksimile_image import FaksimileImage
from .faksimile_position import FaksimilePosition
from .lineNumber import LineNumber
from .line import Line
from .mark_foreign_hands import MarkForeignHands
from .matrix import Matrix
from .path import Path
from .positional_word_part import PositionalWordPart
from .super_page import SuperPage
from .style import Style
from .text_connection_mark import TextConnectionMark
from .text_field import TextField
from .transkriptionField import TranskriptionField
from .writing_process import WritingProcess
from .word import Word
from .word_deletion_path import WordDeletionPath
from .word_insertion_mark import WordInsertionMark
sys.path.append('py2ttl')
from class_spec import SemanticClass
sys.path.append('shared_util')
from main_util import extract_paths_on_tf, get_paths_near_position
FILE_TYPE_SVG_WORD_POSITION = SuperPage.FILE_TYPE_SVG_WORD_POSITION
FILE_TYPE_XML_MANUSCRIPT = SuperPage.FILE_TYPE_XML_MANUSCRIPT
STATUS_MERGED_OK = SuperPage.STATUS_MERGED_OK
STATUS_POSTMERGED_OK = SuperPage.STATUS_POSTMERGED_OK
+
class Page(SemanticClass,SuperPage):
"""
This class represents a page.
Args:
xml_source_file (str): name of the xml file to be instantiated.
faksimile_image: FaksimileImage.
faksimile_svgFile: svg file containing information about word positions.
"""
UNITTESTING = False
def __init__(self, xml_source_file=None, faksimile_image=None, faksimile_svgFile=None, add_paths_near_words=False, warn=False, number=None):
if xml_source_file is not None:
super(Page,self).__init__(xml_source_file)
self.update_property_dictionary('faksimile_image', faksimile_image)
self.update_property_dictionary('faksimile_svgFile', faksimile_svgFile)
self.init_all_properties()
self.add_style(style_node=self.page_tree.getroot().find('.//style'))
self.faksimile_text_field = None
self.svg_text_field = None
self.init_node_objects()
self.warn = warn
self.add_deletion_paths_to_words(add_paths_near_words)
else:
self.number = number
def add_deletion_paths_to_words(self, add_paths_near_words=False):
"""Add deletion paths to words.
"""
words = [ word for word in self.words if (len(word.word_parts) == 0 and word.deleted and len(word.deletion_paths) == 0)\
or 'add_paths_near_words' in word.process_flags ]
words += [ word for word in self.words\
if len(word.word_parts) > 0 and True in\
[ (wp.deleted and len(wp.deletion_paths) == 0) for wp in word.word_parts ]]
if len(words) > 0 and ((self.svg_file is not None and isfile(self.svg_file))\
or (self.source is not None and isfile(self.source))):
svg_file = self.svg_file if self.svg_file is not None else self.source
transkription_field = TranskriptionField(svg_file)
tr_xmin = transkription_field.xmin if (self.svg_image is None or self.svg_image.text_field is None) else 0
tr_ymin = transkription_field.ymin if (self.svg_image is None or self.svg_image.text_field is None) else 0
word_deletion_paths = self.word_deletion_paths
index = 0
dp_updated = False
while index < len(words):
word = words[index]
word.add_deletion_paths(word_deletion_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin)
if len(word.deletion_paths) > 0 or True in [ len(w.deletion_paths) > 0 for w in word.word_parts ]:
deletion_paths = word.deletion_paths
for wp in word.word_parts: deletion_paths += wp.deletion_paths
for deletion_path in deletion_paths:
if deletion_path not in self.word_deletion_paths:
self.word_deletion_paths.append(deletion_path)
elif not dp_updated:
word_deletion_paths = extract_paths_on_tf(self)
dp_updated = True
index -= 1
if add_paths_near_words\
and ('add_paths_near_words' in word.process_flags\
or ((word.deleted and len(word.deletion_paths) == 0)\
or True in [ (w.deleted and len(w.deletion_paths) == 0) for w in word.word_parts ])):
if not dp_updated\
and 'add_paths_near_words' in word.process_flags:
word_deletion_paths = extract_paths_on_tf(self)
dp_updated = True
transform = None
tp = None
target_word = word
paths_near_word = []
if word.deleted and len(word.transkription_positions) > 0:
transform = word.transkription_positions[0].transform
for tp in word.transkription_positions:
word.deletion_paths_near_word += get_paths_near_position(tp, word_deletion_paths)
elif len(word.word_parts) > 0:
for wp in word.word_parts:
if wp.deleted and len(wp.transkription_positions) > 0:
target_word = wp
for tp in wp.transkription_positions:
wp.deletion_paths_near_word = get_paths_near_position(tp, word_deletion_paths)
if self.warn and (word.deleted and len(word.deletion_paths) == 0):
warnings.warn(\
f'WARNING: {self.title} {self.number}: {word.id} on {word.line_number}, {word.text} has no deletion paths! {target_word.deletion_paths_near_word}, {transform}')
index += 1
@classmethod
- def create_cls(cls, xml_source_file, create_dummy_page=False):
+ def create_cls(cls, xml_source_file=None, create_dummy_page=False, page_node=None):
"""Create a Page.
"""
if not create_dummy_page:
return cls(xml_source_file)
else:
m = re.match(r'(.*)(page[0]*)(.*)(\.xml)', xml_source_file)
if m is not None and len(m.groups()) > 3:
number = m.group(3)
else:
number = basename(xml_source_file).replace('.xml','')
return cls(number=number)
@classmethod
def get_pages_from_xml_file(cls, xml_file, status_contains='', status_not_contain='', word_selection_function=None):
"""Returns a list of Page instantiating a xml_file of type FILE_TYPE_SVG_WORD_POSITION
or xml_files contained in xml_file of type FILE_TYPE_XML_MANUSCRIPT.
[optional: instantiation depends on the fulfilment of a status_contains
and/or on the selection of some words by a word_selection_function].
"""
source_tree = ET.parse(xml_file)
if source_tree.getroot().find('metadata/type').text == cls.FILE_TYPE_SVG_WORD_POSITION:
page = cls(xml_file)
if word_selection_function is None or len(word_selection_function(page.words)) > 0:
return [ page ]
else:
return []
elif source_tree.getroot().find('metadata/type').text == FILE_TYPE_XML_MANUSCRIPT:
pages = []
xpath = '//page/@output'
if status_contains != '' and status_not_contain != '':
xpath = '//page[contains(@status, "{0}") and not(contains(@status, "{1}"))]/@output'.format(status_contains, status_not_contain)
elif status_contains != '':
xpath = '//page[contains(@status, "{0}")]/@output'.format(status_contains)
elif status_not_contain != '':
xpath = '//page[not(contains(@status, "{0}"))]/@output'.format(status_not_contain)
for xml_source_file in source_tree.xpath(xpath):
if isfile(xml_source_file):
pages += cls.get_pages_from_xml_file(xml_source_file, word_selection_function=word_selection_function)
return pages
else:
return []
@classmethod
def get_semantic_dictionary(cls):
""" Creates a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = { 'number': { 'class': str, 'cardinality': 1}}
properties.update(cls.create_semantic_property_dictionary('faksimile_image', FaksimileImage, subPropertyOf=cls.HAS_IMAGE))
properties.update(cls.create_semantic_property_dictionary('faksimile_text_field', TextField,\
name='pageIsOnFaksimileTextField', label='page is on faksimile text field',\
comment='Relates a page to the text field on a svg image.', subPropertyOf=cls.PAGE_IS_ON_TEXTFIELD))
properties.update(cls.create_semantic_property_dictionary('orientation', str))
properties.update(cls.create_semantic_property_dictionary('svg_image', SVGImage, subPropertyOf=cls.HAS_IMAGE))
properties.update(cls.create_semantic_property_dictionary('svg_text_field', TextField,\
name='pageIsOnSVGTextField', label='page is on svg text field',\
comment='Relates a page to the text field on a faksimile image.', subPropertyOf=cls.PAGE_IS_ON_TEXTFIELD))
for key in [ 'lines', 'mark_foreign_hands', 'words', 'word_deletion_paths', 'word_insertion_marks']:
properties.update(cls.create_semantic_property_dictionary(key, list))
dictionary.update({cls.CLASS_KEY: class_dict})
dictionary.update({cls.PROPERTIES_KEY: properties})
return cls.return_dictionary_after_updating_super_classes(dictionary)
def get_word_deletion_path(self, path=None, d_attribute=None) ->WordDeletionPath:
"""Return a word deletion path that belongs to page.
"""
if path is None and d_attribute is None:
raise Exception('ERROR: get_word_deletion_path needs a path or a d_attribute!')
if d_attribute is None:
d_attribute = path.d_attribute
page_paths = [ dpath for dpath in self.word_deletion_paths if dpath.d_attribute == d_attribute ]
if len(page_paths) > 0:
return page_paths[0]
else:
dpath = WordDeletionPath.create_cls(self, path=path, d_attribute=d_attribute)
if dpath is not None:
dpath.id = len(self.word_deletion_paths)
self.word_deletion_paths.append(dpath)
dpath.attach_object_to_tree(self.page_tree)
return dpath
def init_node_objects(self):
"""Initialize all node objects.
"""
self.word_insertion_marks = [ WordInsertionMark(wim_node=wim_node) for wim_node in self.page_tree.getroot().xpath('//' + WordInsertionMark.XML_TAG) ]
self.words = [ Word.create_cls(word_node) for word_node in self.page_tree.getroot().xpath('./word') ]
self.mark_foreign_hands = [ MarkForeignHands.create_cls(node) for node in self.page_tree.getroot().xpath('//' + MarkForeignHands.XML_TAG) ]
self.text_connection_marks = [ TextConnectionMark.create_cls(node) for node in self.page_tree.getroot().xpath('//' + TextConnectionMark.XML_TAG) ]
self.line_numbers = [ LineNumber(xml_text_node=line_number_node) for line_number_node in self.page_tree.getroot().xpath('//' + LineNumber.XML_TAG) ]
self.lines = [ Line.create_cls_from_node(node=line_number_node) for line_number_node in self.page_tree.getroot().xpath('//' + LineNumber.XML_TAG) ]
self.writing_processes = [ WritingProcess.create_writing_process_from_xml(node, self.words) for node in self.page_tree.xpath('//' + WritingProcess.XML_TAG) ]
self.word_deletion_paths = [ WordDeletionPath.create_cls(self, node=node) for node in self.page_tree.xpath('./' + WordDeletionPath.XML_TAG) ]
if self.faksimile_image is not None and self.faksimile_image.text_field is not None:
self.faksimile_text_field = self.faksimile_image.text_field
if self.svg_image is not None and self.svg_image.text_field is not None:
self.svg_text_field = self.svg_image.text_field
for simple_word in self.words + self.mark_foreign_hands + self.text_connection_marks:
simple_word.init_word(self)
for wim in self.word_insertion_marks:
if wim.line_number > -1:
wim.line = [ line for line in self.lines if line.id == wim.line_number ][0]
def update_and_attach_words2tree(self, update_function_on_word=None, include_special_words_of_type=[]):
"""Update word ids and attach them to page.page_tree.
"""
if not self.is_locked():
update_function_on_word = [ update_function_on_word ]\
if type(update_function_on_word) != list\
else update_function_on_word
for node in self.page_tree.xpath('.//word|.//' + MarkForeignHands.XML_TAG + '|.//' + TextConnectionMark.XML_TAG):
node.getparent().remove(node)
for index, word in enumerate(self.words):
word.id = index
for func in update_function_on_word:
if callable(func):
func(word)
word.attach_word_to_tree(self.page_tree)
for index, mark_foreign_hands in enumerate(self.mark_foreign_hands):
mark_foreign_hands.id = index
if MarkForeignHands in include_special_words_of_type:
for func in update_function_on_word:
if callable(update_function_on_word):
func(mark_foreign_hands)
mark_foreign_hands.attach_word_to_tree(self.page_tree)
for index, text_connection_mark in enumerate(self.text_connection_marks):
text_connection_mark.id = index
if TextConnectionMark in include_special_words_of_type:
for func in update_function_on_word:
if callable(update_function_on_word):
func(text_connection_mark)
text_connection_mark.attach_word_to_tree(self.page_tree)
else:
print('locked')
def update_data_source(self, faksimile_svgFile=None, xml_correction_file=None):
"""Update the data source of page.
"""
if faksimile_svgFile is not None:
self.faksimile_svgFile = faksimile_svgFile
data_node = self.page_tree.xpath('.//data-source')[0]\
if len(self.page_tree.xpath('.//data-source')) > 0\
else ET.SubElement(self.page_tree.getroot(), 'data-source')
data_node.set('file', self.faksimile_svgFile)
if xml_correction_file is not None:
data_node.set('xml-corrected-words', xml_correction_file)
def update_line_number_area(self, transkription_field, svg_tree=None, set_to_text_field_zero=True):
"""Determines the width of the area where the line numbers are written in the page.source file.
"""
THRESHOLD = 0.4
if svg_tree is None:
svg_tree = ET.parse(self.source)
if len(self.line_numbers) > 1:
line_number = self.line_numbers[9]\
if transkription_field.is_page_verso() and len(self.line_numbers) > 8\
else self.line_numbers[1]
ln_nodes = [ item for item in svg_tree.iterfind('//text', svg_tree.getroot().nsmap)\
if Matrix.IS_NEARX_TRANSKRIPTION_FIELD(item.get('transform'), transkription_field)\
and LineNumber.IS_A_LINE_NUMBER(item)\
and LineNumber(raw_text_node=item).id == line_number.id ]
if len(ln_nodes) > 0:
matrix = Matrix(transform_matrix_string=ln_nodes[0].get('transform'))
if transkription_field.is_page_verso():
transkription_field.add_line_number_area_width(matrix.getX())
elif self.svg_file is not None and isfile(self.svg_file):
svg_path_tree = ET.parse(self.svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_path_tree.getroot().nsmap.items() }
svg_x = matrix.getX()
svg_y = self.line_numbers[1].bottom + transkription_field.ymin\
if set_to_text_field_zero\
else self.line_numbers[1].bottom
use_nodes = svg_path_tree.xpath('//ns:use[@x>="{0}" and @x<="{1}" and @y>="{2}" and @y<="{3}"]'\
.format(svg_x-THRESHOLD, svg_x+THRESHOLD,svg_y-THRESHOLD, svg_y+THRESHOLD), namespaces=namespaces)
if len(use_nodes) > 0:
symbol_id = use_nodes[0].get('{%s}href' % namespaces['xlink']).replace('#', '')
d_strings = use_nodes[0].xpath('//ns:symbol[@id="{0}"]/ns:path/@d'.format(symbol_id), namespaces=namespaces)
if len(d_strings) > 0 and d_strings[0] != '':
path = parse_path(d_strings[0])
xmin, xmax, ymin, ymax = path.bbox()
width = xmax - xmin
transkription_field.add_line_number_area_width(matrix.getX() + width)
def update_page_type(self, transkription_field=None):
"""Adds a source to page and attaches it to page_tree.
"""
if self.number.endswith('r')\
or self.number.endswith('v'):
self.page_type = Page.PAGE_VERSO\
if self.number.endswith('v')\
else Page.PAGE_RECTO
else:
if transkription_field is None:
if self.source is None or not isfile(self.source):
raise FileNotFoundError('Page does not have a source!')
transkription_field = TranskriptionField(self.source, multipage_index=self.multipage_index)
self.page_type = Page.PAGE_VERSO\
if transkription_field.is_page_verso()\
else Page.PAGE_RECTO
self.page_tree.getroot().set('pageType', self.page_type)
def update_styles(self, words=None, manuscript=None, add_to_parents=False, partition_according_to_styles=False, create_css=False):
"""Update styles of words and add them to their transkription_positions.
Args:
add_to_parents: Add styles also to word (and if not None to manuscript).
partition_according_to_styles: Partition word if its transkription_positions have different styles.
"""
style_dictionary = {}
if words is None:
words = self.words
for word in words:
if len(word.word_parts) > 0:
self.update_styles(words=word.word_parts, manuscript=manuscript, create_css=create_css,\
add_to_parents=add_to_parents, partition_according_to_styles=partition_according_to_styles)
for transkription_position in word.transkription_positions:
if len(transkription_position.positional_word_parts) > 0:
style_class = transkription_position.positional_word_parts[0].style_class
writing_process_id = -1
for font_key in [ font_key for font_key in style_class.split(' ') if font_key in self.fontsizekey2stage_mapping.keys() ]:
writing_process_id = self.fontsizekey2stage_mapping.get(font_key)
style_class_key = (Style.remove_irrelevant_style_keys(style_class, self, extended_styles=create_css), writing_process_id)
if create_css:
if style_dictionary.get((style_class_key, word.deleted)) is None:
- color = Color.create_cls()
- if len(word.deletion_paths) > 0\
- and word.deletion_paths[0].style_class is not None\
+ color = None
+ if len(word.deletion_paths) > 0:
+ if word.deletion_paths[0].style_class is not None\
and word.deletion_paths[0].style_class != ''\
and self.style_dict.get(word.deletion_paths[0].style_class) is not None:
- color = Color.create_cls_from_style_object(self.style_dict.get(word.deletion_paths[0].style_class))
+ color = Color.create_cls_from_style_object(self.style_dict.get(word.deletion_paths[0].style_class))
+ else:
+ color = Color()
style_dictionary[(style_class_key, word.deleted)] = Style.create_cls(self, style_class_key[0], manuscript=manuscript,\
create_css=create_css, deletion_color=color, writing_process_id=style_class_key[1] )
transkription_position.style = style_dictionary[(style_class_key, word.deleted)]
#print(style_dictionary[(style_class_key, word.deleted)])
else:
if style_dictionary.get(style_class_key) is None:
style_dictionary[style_class_key] = Style.create_cls(self, style_class_key[0], manuscript=manuscript, create_css=create_css)
style_dictionary[style_class_key].writing_process_id = style_class_key[1]
transkription_position.style = style_dictionary[style_class_key]
if add_to_parents and transkription_position.style not in word.styles:
word.styles.append(transkription_position.style)
if partition_according_to_styles:
word.split_according_to_status('style', splits_are_parts=True)
if manuscript is not None\
and add_to_parents:
manuscript.update_styles(*style_dictionary.values())
+
Index: svgscripts/datatypes/image.py
===================================================================
--- svgscripts/datatypes/image.py (revision 105)
+++ svgscripts/datatypes/image.py (revision 106)
@@ -1,149 +1,155 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This super class can be used to represent all image types.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
import sys
from .attachable_object import AttachableObject
from .matrix import Matrix
from .text_field import TextField
sys.path.append('py2ttl')
from class_spec import SemanticClass
class Image(AttachableObject,SemanticClass):
"""
This super class represents all types of images.
Args:
file_name (str): name of the image file.
node (lxml.etree.Element) node, containing information
URL (str): URL of image file.
height (float): height of image
width (float): width of image
text_field (.text_field.TextField) text_field on image representation
"""
stringKeys = [ 'file_name', 'URL', 'local_path' ]
floatKeys = [ 'height', 'width' ]
XML_TAG = 'image'
SECONDARY_URL = 'http://localhost:8000/'
FAKSIMILE_DIR = 'faksimiles/'
def __init__(self, node=None, file_name=None, local_path=None, URL=None, height=0.0, width=0.0, matrix=None, text_field=None, tag=XML_TAG):
self.text_field = text_field
self.tag = tag
if node is not None:
self.file_name = node.get('file-name')
self.local_path = node.get('local-path')
self.URL = node.get('URL')
self.height = float(node.get('height'))
self.width = float(node.get('width'))
self.transform = Matrix(node.get('transform')) if bool(node.get('transform')) and 'matrix(' in node.get('transform') else None
if len(node.findall(TextField.XML_TAG)) > 0:
self.text_field = TextField(node=node.find(TextField.XML_TAG))
else:
self.file_name = file_name
self.local_path = local_path
self.URL = URL
self.height = height
self.width = width
self.transform = matrix
self.primaryURL = self.URL
self.secondaryURL = None
if self.file_name is not None:
self.secondaryURL = self.SECONDARY_URL + self.file_name.replace('./','')\
if self.file_name is not None and self.file_name.endswith('svg')\
else self.SECONDARY_URL + self.FAKSIMILE_DIR + self.file_name
+ self.transform_string = self.transform.toString()\
+ if self.transform is not None\
+ else None
def attach_object_to_tree(self, target_tree):
"""Attach object to tree.
"""
- obj_node = target_tree.getroot().find('.//' + self.tag) \
- if(len(target_tree.getroot().findall('.//' + self.tag)) > 0) \
- else ET.SubElement(target_tree.getroot(), self.tag)
+ if target_tree.__class__.__name__ == '_ElementTree':
+ target_tree = target_tree.getroot()
+ obj_node = target_tree.find('.//' + self.tag) \
+ if(len(target_tree.findall('.//' + self.tag)) > 0) \
+ else ET.SubElement(target_tree, self.tag)
for key in self.floatKeys:
if self.__dict__[key] is not None:
obj_node.set(key.replace('_','-'), str(round(self.__dict__[key], 3)))
for key in self.stringKeys:
if self.__dict__[key] is not None:
obj_node.set(key.replace('_','-'), self.__dict__[key])
if self.transform is not None and self.transform.isRotationMatrix():
obj_node.set('transform', self.transform.toString())
if self.text_field is not None:
self.text_field.attach_object_to_tree(obj_node)
@classmethod
def get_semantic_dictionary(cls):
""" Creates and returns a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = {}
for floatKey in Image.floatKeys:
properties.update(cls.create_semantic_property_dictionary(floatKey, float, cardinality=1))
properties.update(cls.create_semantic_property_dictionary('file_name', str, cardinality=1))
properties.update(cls.create_semantic_property_dictionary('text_field', TextField))
- properties.update(cls.create_semantic_property_dictionary('transform', str))
+ #properties.update(cls.create_semantic_property_dictionary('transform', str))
+ properties.update(cls.create_semantic_property_dictionary('transform_string', str, name='hasTransform'))
properties.update(cls.create_semantic_property_dictionary('primaryURL', str, cardinality=1, subPropertyOf=cls.HAS_URL))
properties.update(cls.create_semantic_property_dictionary('secondaryURL', str, cardinality=1, subPropertyOf=cls.HAS_URL))
dictionary.update({'class': class_dict})
dictionary.update({'properties': properties})
return dictionary
class SVGImage(Image):
"""This class represents a svg image.
"""
XML_TAG = 'svg-image'
URL_PREFIX = 'http://existdb-test.dasch.swiss/exist/rest/db/storage/nietzsche/'
def __init__(self, node=None, file_name=None, URL=None, height=0.0, width=0.0, text_field=None, tag=XML_TAG):
if node is not None and node.tag != self.XML_TAG:
file_name = node.get('file')
height = float(node.get('height')) if bool(node.get('height')) else 0.0
width = float(node.get('width')) if bool(node.get('width')) else 0.0
node = None
super(SVGImage, self).__init__(node=node, file_name=file_name, URL=URL,\
height=height, width=width, text_field=text_field, tag=self.XML_TAG)
self.primaryURL = self.URL_PREFIX + self.file_name.replace('./', '')
def decontextualize_file_name(self, update_url=None):
"""Decontextualize file name.
"""
self.file_name = self.file_name.replace('./', '')
if update_url is not None:
self.URL = update_url + self.file_name
# @classmethod
# def get_semantic_dictionary(cls):
# """ Creates and returns a semantic dictionary as specified by SemanticClass.
# """
# dictionary = super(SVGImage,cls).get_semantic_dictionary()
# return cls.return_dictionary_after_updating_super_classes(dictionary)
Index: svgscripts/datatypes/simple_word.py
===================================================================
--- svgscripts/datatypes/simple_word.py (revision 105)
+++ svgscripts/datatypes/simple_word.py (revision 106)
@@ -1,127 +1,124 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This super class can be used to represent a simple word.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
import abc
from lxml import etree as ET
import sys
from .line import Line
from .faksimile_position import FaksimilePosition
from .transkription_position import TranskriptionPosition
from .word_position import WordPosition
sys.path.append('py2ttl')
from class_spec import SemanticClass
class SimpleWord(SemanticClass, metaclass=abc.ABCMeta):
"""
This class represents a simple word.
"""
XML_TAG = 'simple-word'
XML_SUB_TAG = 'content'
def __init__(self, id=0, line_number=-1, line=None, text='', deleted=False, transkription_positions=None, faksimile_positions=None):
self.id = id
self.text = text
self.line_number = line_number
self.lines = []
if line is not None:
self.lines.append(line)
self.transkription_positions = transkription_positions if transkription_positions is not None else []
self.faksimile_positions = faksimile_positions if faksimile_positions is not None else []
def attach_word_to_tree(self, target_tree):
"""Attaches word to tree target_tree.
"""
if target_tree.__class__.__name__ == '_ElementTree':
target_tree = target_tree.getroot()
if len(target_tree.xpath('.//' + self.XML_TAG + '[@id="%s"]' % self.id)) > 0:
word_node = target_tree.xpath('.//' + self.XML_TAG + '[@id="%s"]' % self.id)[0]
word_node.getparent().remove(word_node)
word_node = ET.SubElement(target_tree, self.XML_TAG, attrib={'id': str(self.id)})
word_node.set('text', self.text)
if self.line_number > -1:
word_node.set('line-number', str(self.line_number))
for id, transkription_position in enumerate(self.transkription_positions):
transkription_position.id = id
transkription_position.attach_object_to_tree(word_node)
for faksimile_position in self.faksimile_positions:
faksimile_position.attach_object_to_tree(word_node)
return word_node
@classmethod
def create_cls(cls, word_node):
"""Creates a cls from a (lxml.Element) node.
[:return:] cls
"""
if word_node is not None: # init word from xml node
id = int(word_node.get('id'))
line_number = int(word_node.get('line-number')) if bool(word_node.get('line-number')) else -1
text = word_node.get('text')
transkription_positions = [ TranskriptionPosition(id=id, node=node) for id, node in enumerate(word_node.findall('./' + WordPosition.TRANSKRIPTION)) ]
faksimile_positions = [ WordPosition(node=node) for node in word_node.findall('./' + WordPosition.FAKSIMILE) ]
return cls(id=id, text=text, line_number=line_number, transkription_positions=transkription_positions,\
faksimile_positions=faksimile_positions)
else:
error_msg = 'word_node has not been defined'
raise Exception('Error: {}'.format(error_msg))
@classmethod
def get_semantic_dictionary(cls):
""" Creates and returns a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = { 'lines': {cls.CLASS_KEY: Line,\
cls.CARDINALITY: 1,\
cls.CARDINALITY_RESTRICTION: 'minCardinality',\
cls.PROPERTY_NAME: 'wordBelongsToLine',\
cls.PROPERTY_LABEL: 'word belongs to a line',\
cls.PROPERTY_COMMENT: 'Relating a word to a line.'}}
properties.update(cls.create_semantic_property_dictionary('transkription_positions', TranskriptionPosition,\
name='hasTranskriptionPosition', cardinality=1, cardinality_restriction='minCardinality'))
properties.update(cls.create_semantic_property_dictionary('faksimile_positions', FaksimilePosition,\
name='hasFaksimilePosition', cardinality=1, cardinality_restriction='minCardinality'))
properties.update(cls.create_semantic_property_dictionary('text', str, cardinality=1,\
subPropertyOf=cls.HOMOTYPIC_HAS_TEXT_URL_STRING))
dictionary.update({cls.CLASS_KEY: class_dict})
dictionary.update({cls.PROPERTIES_KEY: properties})
return cls.return_dictionary_after_updating_super_classes(dictionary)
def init_word(self, page):
"""Initialize word with objects from page.
"""
- #for transkription_position in self.transkription_positions:
- # transkription_position.svg_image = page.svg_image
- #self.faksimile_positions = FaksimilePosition.create_list_of_cls(self.faksimile_positions, page.faksimile_image, page.text_field)
if self.line_number > -1:
self.lines += [ line for line in page.lines if line.id == self.line_number ]
elif 'word_parts' in self.__dict__.keys() and len(self.word_parts) > 0:
self.lines += [ line for line in page.lines if line.id in [ wp.line_number for wp in self.word_parts ] ]
Index: svgscripts/create_manuscript.py
===================================================================
--- svgscripts/create_manuscript.py (revision 105)
+++ svgscripts/create_manuscript.py (revision 106)
@@ -1,204 +1,204 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to create a ArchivalManuscriptUnity.
"""
# Copyright (C) University of Basel 2020 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from colorama import Fore, Style
import getopt
import re
import sys
from os import listdir, sep, path
from os.path import isfile, isdir, dirname, basename
import lxml.etree as ET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.super_page import SuperPage
sys.path.append('shared_util')
from myxmlwriter import parse_xml_of_type, write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
UNITTESTING = False
class ManuscriptCreator:
"""This class can be used to create a ArchivalManuscriptUnity.
"""
def __init__(self, xml_target_dir):
self.xml_target_dir = xml_target_dir
def _get_or_create_element(self, node, xpath, create_id=False) ->ET.Element:
"""Return a element with name == element_name, or create if it does not exist.
"""
elements = node.xpath(xpath)
if len(elements) > 0:
return elements[0]
else:
if re.match(r'[a-z]+\[@[a-z-]+=', xpath):
element_name = re.match(r'(.+?)\[@[a-z]+.*', xpath).group(1)
num_elements = len(node.xpath(element_name))
element = ET.SubElement(node, element_name)
element_attribute = re.match(r'[a-z]+\[@(.+?)=.*', xpath).group(1)
element_value = re.match(r'[a-z]+\[@[a-z-]+="(.+?)"]', xpath).group(1)
element.set(element_attribute, element_value)
if create_id:
element.set('id', str(num_elements))
return element
else:
num_elements = len(node.xpath(xpath))
element = ET.SubElement(node, xpath)
if create_id:
element.set('id', str(num_elements))
return element
def _create_or_update_pages(self, pages_node, manuscript_page_url_mapping):
"""Create or update pages.
"""
for page_number, url in manuscript_page_url_mapping.items():
xpath = SuperPage.XML_TAG + f'[@number="{page_number}"]'
page_node = self._get_or_create_element(pages_node, xpath, create_id=True)
if not bool(page_node.get('alias')):
page_node.set('alias', basename(url))
def create_or_update_manuscripts(self, manuscript_files, page_url_mapping):
"""Create or update manuscripts.
"""
for key in page_url_mapping:
relevant_files = [ manuscript_file for manuscript_file in manuscript_files\
if basename(manuscript_file) == key.replace(' ', '_') + '.xml']
if len(relevant_files) == 0:
manuscript_files.append(key.replace(' ', '_') + '.xml')
for manuscript_file in manuscript_files:
target_file = self.xml_target_dir + sep + manuscript_file\
if dirname(manuscript_file) == ''\
else manuscript_file
title = basename(target_file).replace('.xml', '').replace('_', ' ')
manuscript = ArchivalManuscriptUnity(title=title)
if isfile(target_file):
manuscript = ArchivalManuscriptUnity.create_cls(target_file)
else:
manuscript.manuscript_tree = ET.ElementTree(ET.Element(ArchivalManuscriptUnity.XML_TAG))
manuscript.manuscript_tree.docinfo.URL = target_file
manuscript.manuscript_tree.getroot().set('title', manuscript.title)
manuscript.manuscript_tree.getroot().set('type', manuscript.manuscript_type)
if title in page_url_mapping.keys():
pages_node = self._get_or_create_element(manuscript.manuscript_tree.getroot(), 'pages')
self._create_or_update_pages(pages_node, page_url_mapping[title])
if not UNITTESTING:
write_pretty(xml_element_tree=manuscript.manuscript_tree, file_name=target_file,\
script_name=__file__, file_type=FILE_TYPE_XML_MANUSCRIPT)
def create_page_url_mapping(input_file, mapping_dictionary, default_title=''):
"""Create a page to url mapping from input file.
File content:
TITLE PAGENUMBER\nURL
See: 'tests_svgscripts/test_data/content.txt'
"""
lines = []
with open(input_file, 'r') as f:
lines = f.readlines()
key = None
url = None
current_key = default_title
for content in lines:
if content.startswith('http')\
or content.startswith('www'):
url = content.replace('\n', '')\
if content.startswith('http')\
else 'http://' + content.replace('\n', '')
if current_key not in mapping_dictionary.keys():
mapping_dictionary.update({current_key: {}})
mapping_dictionary[current_key].update({key: url})
else:
key_parts = [ part.strip() for part in content.replace('\n', '').replace('S.', '').split(',') ]
key_index = 0
if len(key_parts) > 1:
title = key_parts[0]
if title not in mapping_dictionary.keys():
current_key = title
mapping_dictionary.update({current_key: {}})
key_index = 1
key = key_parts[key_index]
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to create or update one or more manuscripts.
svgscripts/create_manuscript.py [OPTIONS] [, ...] [, ...]
One or more files mapping pages to faksimile URLs, with 'txt'-suffix
manuscript file(s) (~ArchivalManuscriptUnity).
OPTIONS:
-h|--help: show help
-t|--title=title manuscript's title, e.g. "Mp XV".
-x|--xml-target-dir directory containing xmlManuscriptFile, default "./xml"
:return: exit code (int)
"""
title = ''
xml_target_dir = ".{}xml".format(sep)
page_url_mapping = {}
try:
opts, args = getopt.getopt(argv, "ht:x:", ["help", "title=", "xml-target-dir="])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-t', '--title'):
title = arg
elif opt in ('-x', '--xml-target-dir'):
xml_target_dir = arg
manuscript_files = [ arg for arg in args if arg.endswith('.xml')\
and '_page' not in arg ]
input_files = [ arg for arg in args if arg.endswith('.txt')\
and isfile(arg)]
for input_file in input_files:
create_page_url_mapping(input_file, page_url_mapping, default_title=title)
creator = ManuscriptCreator(xml_target_dir=xml_target_dir)
creator.create_or_update_manuscripts(manuscript_files, page_url_mapping)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/process_words_post_merging.py
===================================================================
--- svgscripts/process_words_post_merging.py (revision 105)
+++ svgscripts/process_words_post_merging.py (revision 106)
@@ -1,482 +1,482 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to process words after they have been merged with faksimile data.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
from deprecated import deprecated
from functools import cmp_to_key
import getopt
import inspect
import lxml.etree as ET
from operator import attrgetter
import os
from os import listdir, sep, path, setpgrp, devnull
from os.path import exists, isfile, isdir, dirname, basename
from pathlib import Path as PathlibPath
from progress.bar import Bar
import re
import shutil
import string
from svgpathtools import svg2paths2, svg_to_paths
from svgpathtools.path import Path as SVGPath
from svgpathtools.path import Line
import sys
import tempfile
import warnings
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.box import Box
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.path import Path
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.word import Word, do_paths_intersect_saveMode, update_transkription_position_ids
from extract_line_continuation import extract_line_continuations
from util import back_up, process_warnings4status
from process_files import update_svgposfile_status
from process_footnotes import categorize_footnotes
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from main_util import extract_paths_on_tf
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
DEBUG_WORD = None
MERGED_DIR = 'merged'
WARNING_FOOTNOTES_ERROR = 'footnotes not processed'
WARNING_LINE_CONTINUATION = 'line continuation fail'
def categorize_paths(page, transkription_field=None):
"""Categorize all paths that are part of the transkription field.
:return: a dictionary containig a list for each category of path.
"""
if page.source is not None and isfile(page.source):
MAX_HEIGHT_LINES = 1
max_line = sorted(\
[line_number.bottom-line_number.top for line_number in page.line_numbers if line_number.id % 2 == 0],\
reverse=True)[0] + 2 if len(page.line_numbers) > 0 else 17
tr_xmin = 0.0
tr_ymin = 0.0
if (page.svg_image is None or page.svg_image.text_field is None)\
and transkription_field is not None:
tr_xmin = transkription_field.xmin
tr_ymin = transkription_field.ymin
paths, attributes = svg_to_paths.svg2paths(page.source)
allpaths_outside_tf = []
attributes_outside_tf = []
if transkription_field is None:
transkription_field = TranskriptionField(page.source, multipage_index=page.multipage_index)
allpaths_on_tf = extract_paths_on_tf(page, outsiders=allpaths_outside_tf, outsider_attributes=attributes_outside_tf, transkription_field=transkription_field)
path_dict = { 'text_area_deletion_paths': [],\
'deletion_or_underline_paths': [],\
'box_paths': [],\
'dots_paths': [],\
'word_connector_paths': [],\
'uncategorized_paths': [] }
for mypath in allpaths_on_tf:
xmin, xmax, ymin, ymax = mypath.path.bbox()
start_line_number = page.get_line_number(mypath.path.start.imag-tr_ymin)
if abs(xmax-xmin) < 1 and abs(ymax-ymin) < 1:
path_dict.get('dots_paths').append(mypath)
elif abs(ymax-ymin) > MAX_HEIGHT_LINES and abs(ymax-ymin) < max_line and mypath.path.iscontinuous() and mypath.path.isclosed():
path_dict.get('box_paths').append(mypath)
elif abs(ymax-ymin) > MAX_HEIGHT_LINES and abs(ymax-ymin) > max_line and mypath.path.iscontinuous() and not mypath.path.isclosed():
path_dict.get('word_connector_paths').append(mypath)
elif abs(ymax-ymin) < MAX_HEIGHT_LINES:
mypath.start_line_number = start_line_number
path_dict.get('deletion_or_underline_paths').append(mypath)
elif start_line_number != -1 and start_line_number != page.get_line_number(mypath.path.end.imag-tr_ymin):
# Check for "ladder", i.e. a path with 3 segments (seg0 is horizontal on line x, seg1 moves to line x+1, seg2 is horizontal on line x+1)
if start_line_number + 1 == page.get_line_number(mypath.path.end.imag-tr_ymin)\
and len(mypath.path._segments) == 3\
and abs(mypath.path._segments[0].bbox()[3]-mypath.path._segments[0].bbox()[2]) < MAX_HEIGHT_LINES\
and abs(mypath.path._segments[2].bbox()[3]-mypath.path._segments[2].bbox()[2]) < MAX_HEIGHT_LINES:
for index in 0, 2:
new_path = Path(parent_path=mypath, path=SVGPath(mypath.path._segments[index]))
new_path.start_line_number = page.get_line_number(new_path.path.start.imag-tr_ymin)
path_dict.get('deletion_or_underline_paths').append(new_path)
else:
path_dict.get('text_area_deletion_paths').append(mypath)
else:
path_dict.get('uncategorized_paths').append(mypath)
underline_path = mark_words_intersecting_with_paths_as_deleted(page, path_dict.get('deletion_or_underline_paths'), tr_xmin, tr_ymin)
path_dict.update({'underline_path': underline_path})
path_dict['uncategorized_paths'] += process_word_boxes(page, path_dict.get('box_paths'), transkription_field,\
paths=allpaths_outside_tf, attributes=attributes_outside_tf, max_line=max_line)
return path_dict
elif not UNITTESTING:
error_msg = 'Svg source file {} does not exist!'.format(page.source)\
if page.source is not None else 'Page does not contain a source file!'
raise FileNotFoundError(error_msg)
return {}
def copy_page_to_merged_directory(page, manuscript_file=None):
"""Copy page to directory that contains the first version of all svg_pos_files that have been
merged with the faksimile position data. MERGED_DIR is a subfolder of svg_pos_files-directory.
"""
svg_pos_file = PathlibPath(page.page_tree.docinfo.URL)
target_dir = svg_pos_file.parent / MERGED_DIR
if not target_dir.is_dir():
target_dir.mkdir()
target_pos_file = target_dir / svg_pos_file.name
save_page(page, str(svg_pos_file), target_svg_pos_file=str(target_pos_file), status=STATUS_MERGED_OK, manuscript_file=manuscript_file)
def find_special_words(page, transkription_field=None):
"""Find special words, remove them from words, process their content.
"""
if page.source is None or not isfile(page.source):
raise FileNotFoundError('Page does not have a source!')
if transkription_field is None:
transkription_field = TranskriptionField(page.source, multipage_index=page.multipage_index)
set_to_text_field_zero = (page.svg_image is None or page.svg_image.text_field is None)
special_char_list = MarkForeignHands.get_special_char_list()
special_char_list += TextConnectionMark.get_special_char_list()
single_char_words = [ word for word in page.words if len(word.text) == 1 and word.text in special_char_list ]
if not UNITTESTING:
bar = Bar('find special words', max=len(single_char_words))
for word in single_char_words:
not bool(UNITTESTING) and bar.next()
if word.text == MarkForeignHands.CLASS_MARK:
id = len(page.mark_foreign_hands)
page.mark_foreign_hands.append(MarkForeignHands.create_cls_from_word(word, id=id))
page.words.remove(word)
elif word.text in TextConnectionMark.SPECIAL_CHAR_LIST[0]\
or (word.text in TextConnectionMark.SPECIAL_CHAR_LIST\
and any(style in page.sonderzeichen_list for style\
in word.transkription_positions[0].positional_word_parts[0].style_class.split(' '))):
id = len(page.text_connection_marks)
page.text_connection_marks.append(TextConnectionMark.create_cls_from_word(word, id=id))
page.words.remove(word)
not bool(UNITTESTING) and bar.finish()
svg_tree = ET.parse(page.source)
page.update_page_type(transkription_field=transkription_field)
page.update_line_number_area(transkription_field, svg_tree=svg_tree, set_to_text_field_zero=set_to_text_field_zero)
if page.marginals_source is not None:
svg_tree = ET.parse(page.marginals_source)
italic_classes = [ key for key in page.style_dict\
if bool(page.style_dict[key].get('font-family')) and page.style_dict[key]['font-family'].endswith('Italic') ]
if len(page.mark_foreign_hands) > 0:
MarkForeignHands.find_content(page.mark_foreign_hands, transkription_field, svg_tree, italic_classes=italic_classes,\
SonderzeichenList=page.sonderzeichen_list, set_to_text_field_zero=set_to_text_field_zero)
if len(page.text_connection_marks) > 0:
TextConnectionMark.find_content_in_footnotes(page, transkription_field, svg_tree)
def mark_words_intersecting_with_paths_as_deleted(page, deletion_paths, tr_xmin=0.0, tr_ymin=0.0):
"""Marks all words that intersect with deletion paths as deleted
and adds these paths to word_deletion_paths.
[:return:] list of .path.Path that might be word_underline_paths
"""
if not UNITTESTING:
bar = Bar('mark words that intersect with deletion paths', max=len(page.words))
for word in page.words:
not bool(UNITTESTING) and bar.next()
word = mark_word_if_it_intersects_with_paths_as_deleted(word, page, deletion_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin)
for part_word in word.word_parts:
part_word = mark_word_if_it_intersects_with_paths_as_deleted(part_word, page, deletion_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin)
word.partition_according_to_deletion()
not bool(UNITTESTING) and bar.finish()
# return those paths in deletion_paths that are not in page.word_deletion_paths
return [ word_underline_path for word_underline_path in set(deletion_paths) - set(page.word_deletion_paths) ]
def mark_word_if_it_intersects_with_paths_as_deleted(word, page, deletion_paths, tr_xmin=0.0, tr_ymin=0.0):
"""Marks word if it intersects with deletion paths as deleted
and adds these paths to word_deletion_paths.
[:return:] word
"""
word.deleted = False
for transkription_position in word.transkription_positions:
word_path = Path.create_path_from_transkription_position(transkription_position,\
tr_xmin=tr_xmin, tr_ymin=tr_ymin)
intersecting_paths = [ deletion_path for deletion_path in deletion_paths\
if do_paths_intersect_saveMode(deletion_path, word_path) ]
if DEBUG_WORD is not None and word.text == DEBUG_WORD.text and word.line_number == DEBUG_WORD.line_number:
relevant_paths = [ path for path in deletion_paths if path.start_line_number == DEBUG_WORD.line_number ]
#print(word.line_number, word_path.path.bbox(), [ path.path.bbox() for path in relevant_paths])
if len(intersecting_paths) > 0:
#print(f'{word.line_number}: {word.id}, {word.text}: {intersecting_paths}')
transkription_position.deleted = True
transkription_position._deletion_paths += intersecting_paths
for deletion_path in intersecting_paths:
if deletion_path.parent_path is not None:
deletion_path = deletion_path.parent_path
if deletion_path not in page.word_deletion_paths:
deletion_path.tag = Path.WORD_DELETION_PATH_TAG
deletion_path.attach_object_to_tree(page.page_tree)
page.word_deletion_paths.append(deletion_path)
return word
def post_merging_processing_and_saving(svg_pos_file=None, new_words=None, page=None, manuscript_file=None, target_svg_pos_file=None):
"""Process words after merging with faksimile word positions.
"""
if page is None and svg_pos_file is None:
raise Exception('ERROR: post_merging_processing_and_saving needs either a Page or a svg_pos_file!')
if page is None:
page = Page(svg_pos_file)
if page.source is None or not isfile(page.source):
raise FileNotFoundError('Page instantiated from {} does not contain an existing source!'.format(svg_pos_file))
if svg_pos_file is None:
svg_pos_file = page.page_tree.docinfo.URL
if new_words is not None:
page.words = sorted(new_words, key=attrgetter('id'))
for word_node in page.page_tree.xpath('.//word'):
word_node.getparent().remove(word_node)
manuscript = ArchivalManuscriptUnity.create_cls(manuscript_file)\
if manuscript_file is not None\
else None
copy_page_to_merged_directory(page, manuscript_file=manuscript_file)
transkription_field = TranskriptionField(page.source, multipage_index=page.multipage_index)
update_faksimile_line_positions(page)
status = STATUS_MERGED_OK
page.update_styles(manuscript=manuscript, partition_according_to_styles=True)
categorize_paths(page, transkription_field=transkription_field)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('default')
try:
find_special_words(page, transkription_field=transkription_field)
categorize_footnotes(page)
extract_line_continuations(page, warning_message=WARNING_LINE_CONTINUATION)
except Exception:
warnings.warn(WARNING_FOOTNOTES_ERROR)
status = process_warnings4status(w, [ WARNING_FOOTNOTES_ERROR, WARNING_LINE_CONTINUATION ], status, STATUS_POSTMERGED_OK)
save_page(page, svg_pos_file, target_svg_pos_file=target_svg_pos_file, status=status, manuscript_file=manuscript_file)
def process_word_boxes(page, box_paths, transkription_field, paths=None, attributes=None, max_line=17) -> list:
"""Process word boxes: partition words according to word boxes.
[:return:] a list of paths that are not boxes
"""
MAX_HEIGHT_LINES = 1
not_boxes = []
if not UNITTESTING:
bar = Bar('process word boxes', max=len(page.words))
svg_tree = ET.parse(page.source)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
allpaths_on_margin_field = []
tr_xmin = 0 if page.svg_image is not None and page.svg_image.text_field is not None\
else transkription_field.xmin
tr_ymin = 0 if page.svg_image is not None and page.svg_image.text_field is not None\
else transkription_field.ymin
if paths is None or attributes is None:
paths = []
raw_paths, attributes = svg_to_paths.svg2paths(page.source)
for index, raw_path in enumerate(raw_paths):
paths.append(Path.create_cls(id=index, path=raw_path, style_class=attributes[index].get('class'), page=page))
for index, mypath in enumerate(paths):
path = mypath.path
xmin, xmax, ymin, ymax = path.bbox()
attribute = attributes[index]
if len(path) > 0\
and path != transkription_field.path\
and ((path.bbox()[1] < transkription_field.xmin and transkription_field.is_page_verso())\
or (path.bbox()[0] > transkription_field.xmax and not transkription_field.is_page_verso()))\
and abs(ymax-ymin) < max_line:
allpaths_on_margin_field.append(mypath)#Path.create_cls(id=index, path=path, style_class=attribute.get('class'), page=page))
box_line_number_dict = {}
for box_path in sorted(box_paths, key=lambda path: path.get_median_y()):
line_number = page.get_line_number(box_path.get_median_y(tr_ymin=tr_ymin))
if line_number > 0:
if line_number not in box_line_number_dict.keys():
box_line_number_dict.update({ line_number: [ box_path ]})
else:
box_line_number_dict.get(line_number).append(box_path)
boxes = []
for line_number in box_line_number_dict.keys():
box_paths_on_line = sorted(box_line_number_dict[line_number], key=lambda path: path.get_x())
margin_boxes_on_line = sorted([ margin_box for margin_box in allpaths_on_margin_field\
if page.get_line_number(margin_box.get_median_y(tr_ymin=tr_ymin)) == line_number ],\
key=lambda path: path.get_x())
threshold = 3 if line_number % 2 == 0 else 1.5
if len(margin_boxes_on_line) > 0:
for box_path in box_paths_on_line:
#print(line_number, box_path.path.d(), len(margin_boxes_on_line))
box = Box.create_box(box_path, margin_boxes_on_line, svg_tree=svg_tree,\
namespaces=namespaces, threshold=threshold)
if box is not None:
boxes.append(box)
else:
not_boxes += box_paths_on_line
if len(boxes) > 0:
for word in page.words:
word.process_boxes(boxes, tr_xmin=tr_xmin, tr_ymin=tr_ymin)
word.create_correction_history(page)
if not bool(UNITTESTING):
bar.next()
elif word.earlier_version is not None:
#print(f'{word.text} -> {word.earlier_version.text}')
if word.earlier_version.earlier_version is not None:
print(f'{word.earlier_version.earlier_version.text}')
not bool(UNITTESTING) and bar.finish()
return not_boxes
def reset_page(page):
"""Reset all words that have word_parts in order to run the script a second time.
"""
svg_pos_file = PathlibPath(page.page_tree.docinfo.URL)
first_merge_version = svg_pos_file.parent / MERGED_DIR / svg_pos_file.name
if first_merge_version.exists():
page = Page(str(first_merge_version))
else:
word_with_wordparts = [ word for word in page.words if len(word.word_parts) > 0 ]
word_with_wordparts += [ word for word in page.words if word.earlier_version is not None ]
page_changed = False
if len(word_with_wordparts) > 0:
for word in word_with_wordparts:
word.undo_partitioning()
update_transkription_position_ids(word)
page_changed = True
no_line_numbers = [ word for word in page.words if word.line_number == -1 ]
if len(no_line_numbers) > 0:
for word in no_line_numbers:
if len(word.transkription_positions) > 0:
word.line_number = page.get_line_number((word.transkription_positions[0].top+word.transkription_positions[0].bottom)/2)
else:
msg = f'Word {word.id} {word.text} has no transkription_position!'
warnings.warn(msg)
page_changed = True
if page_changed:
page.update_and_attach_words2tree()
def save_page(page, svg_pos_file, target_svg_pos_file=None, status=None, manuscript_file=None):
"""Save page to target_file and update status of file.
"""
page.update_and_attach_words2tree()
if not UNITTESTING:
if target_svg_pos_file is None:
target_svg_pos_file = svg_pos_file
if status is not None:
update_svgposfile_status(svg_pos_file, manuscript_file=manuscript_file, status=status)
write_pretty(xml_element_tree=page.page_tree, file_name=target_svg_pos_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
def update_faksimile_line_positions(page):
"""Update faksimile_positions of the lines
"""
num_lines = len(page.line_numbers)
ymin = page.text_field.ymin\
if page.text_field is not None\
else 0.0
for line_number in page.line_numbers:
if len([ word.faksimile_positions[0] for word in page.words\
if len(word.faksimile_positions) > 0 and word.line_number == line_number.id ]) > 0:
line_number.faksimile_inner_top = min([ word.faksimile_positions[0].top for word in page.words\
if len(word.faksimile_positions) > 0 and word.line_number == line_number.id ])
line_number.faksimile_inner_bottom = max([ word.faksimile_positions[0].bottom for word in page.words\
if len(word.faksimile_positions) > 0 and word.line_number == line_number.id ])
if line_number.id % 2 == 0:
line_number.faksimile_outer_top = line_number.faksimile_inner_top - ymin
line_number.faksimile_outer_bottom = line_number.faksimile_inner_bottom - ymin
for index, line_number in enumerate(page.line_numbers):
if line_number.faksimile_inner_bottom == 0.0\
or line_number.faksimile_inner_bottom < line_number.faksimile_inner_top:
if index == 0 and num_lines > 1:
line_number.faksimile_inner_bottom = page.line_numbers[index+1].top
elif index == num_lines-1 and page.text_field is not None:
line_number.faksimile_inner_bottom = round(page.text_field.height + page.text_field.ymin, 3)
elif index > 0 and index < num_lines-1:
line_number.faksimile_inner_bottom = page.line_numbers[index+1].faksimile_inner_top\
if page.line_numbers[index+1].faksimile_inner_top > page.line_numbers[index-1].faksimile_inner_bottom\
else page.line_numbers[index-1].faksimile_inner_bottom
line_number.attach_object_to_tree(page.page_tree)
def update_writing_process_ids(page):
"""Update the writing_process_ids of the words and split accordingly.
"""
for word in page.words:
word.set_writing_process_id_to_transkription_positions(page)
word.partition_according_to_writing_process_id()
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to process words after they have been merged with faksimile data.
svgscripts/process_words_post_merging.py [OPTIONS]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help show help
-i|--include-missing-line-number run script on files that contain words without line numbers
-r|--rerun rerun script on a svg_pos_file that has already been processed
:return: exit code (int)
"""
status_not_contain = STATUS_POSTMERGED_OK
include_missing_line_number = False
try:
opts, args = getopt.getopt(argv, "hir", ["help", "include-missing-line-number", "rerun" ])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-i', '--include-missing-line-number'):
include_missing_line_number = True
elif opt in ('-r', '--rerun'):
status_not_contain = ''
if len(args) < 1:
usage()
return 2
exit_status = 0
file_a = args[0]
if isfile(file_a):
manuscript_file = file_a\
if xml_has_type(FILE_TYPE_XML_MANUSCRIPT, xml_source_file=file_a)\
else None
counter = 0
for page in Page.get_pages_from_xml_file(file_a, status_contains=STATUS_MERGED_OK, status_not_contain=status_not_contain):
reset_page(page)
no_line_numbers = [ word for word in page.words if word.line_number == -1 ]
if not include_missing_line_number and len(no_line_numbers) > 0:
not UNITTESTING and print(Fore.RED + f'Page {page.title}, {page.number} has words with no line number!')
for word in no_line_numbers:
not UNITTESTING and print(f'Word {word.id}: {word.text}')
else:
back_up(page, page.xml_file)
not UNITTESTING and print(Fore.CYAN + f'Processing {page.title}, {page.number} ...' + Style.RESET_ALL)
post_merging_processing_and_saving(page=page, manuscript_file=manuscript_file)
counter += 1
not UNITTESTING and print(Style.RESET_ALL + f'[{counter} pages processed]')
else:
raise FileNotFoundError('File {} does not exist!'.format(file_a))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: tests_svgscripts/test_page.py
===================================================================
--- tests_svgscripts/test_page.py (revision 105)
+++ tests_svgscripts/test_page.py (revision 106)
@@ -1,163 +1,164 @@
import unittest
from os import sep, path
from os.path import isdir, isfile, dirname, basename
import lxml.etree as ET
import sys
import sys
sys.path.append('svgscripts')
dir_changed = False
if not isdir('datatypes'):
sys.path.append(dirname(sys.path[0]))
dir_changed = True
from datatypes.lineNumber import LineNumber
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.path import Path
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.style import Style
from datatypes.writing_process import WritingProcess
from datatypes.word import Word
class TestPage(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.test_file = DATADIR + sep + 'test.xml'
self.test_svg_file = DATADIR + sep + 'test421.svg'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.xml_fileB = DATADIR + sep + 'N_VII_1_page006.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml'
self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
self.test_styles_color = DATADIR + sep + 'N_VII_1_page013.xml'
def test_Page(self):
page = Page(self.test_file)
self.assertEqual(page.title, 'Mp XIV 1')
self.assertEqual(page.number, '421')
self.assertEqual(len(page.sonderzeichen_list), 2)
self.assertEqual('st21' in page.sonderzeichen_list, True)
self.assertEqual('st23' in page.sonderzeichen_list, True)
self.assertEqual(page.style_dict['st0']['fill'], '#F8F9F8')
stage0 = [ key for key, value in page.fontsizekey2stage_mapping.items() if value == 0 ]
stage1 = [ key for key, value in page.fontsizekey2stage_mapping.items() if value == 1 ]
stage2 = [ key for key, value in page.fontsizekey2stage_mapping.items() if value == 2 ]
fontStage0 = float(page.style_dict.get(stage0[0]).get('font-size').replace('px',''))
fontStage1 = float(page.style_dict.get(stage1[0]).get('font-size').replace('px',''))
fontStage2 = float(page.style_dict.get(stage2[0]).get('font-size').replace('px',''))
self.assertEqual(fontStage0 > fontStage1, True)
self.assertEqual(fontStage1 > fontStage2, True)
page = Page.create_cls(self.test_tcm_xml, create_dummy_page=True)
self.assertEqual(page.number, '1')
def test_get_biggest_fontSize4styles(self):
page = Page(self.test_file)
style_set = { 'st12', 'st2', 'st14', 'st13' }
self.assertEqual(page.get_biggest_fontSize4styles(style_set=style_set), 10)
def test_get_words(self):
page = Page(self.test_file)
words = page.words
self.assertEqual(len(words), 440)
self.assertEqual(words[0].text, '$')
self.assertEqual(words[439].text, 'mußte!')
def test_get_word_deletion_path(self):
page = Page('xml/Mp_XIV_page417.xml')
dpath = page.get_word_deletion_path(d_attribute='M 273.343,251.451 L 276.479,251.451 L 276.479,251.751 L 273.343,251.751 L 273.343,251.451')
def test_update_page_type(self):
page = Page(self.pdf_xml)
tf = TranskriptionField(self.pdf_xml_source)
page.update_page_type(transkription_field=tf)
self.assertEqual(page.page_type, Page.PAGE_VERSO)
#page = Page(self.xml_fileB)
#page.update_page_type()
#self.assertEqual(page.page_type, Page.PAGE_RECTO)
def test_update_line_number_area(self):
page = Page(self.xml_file)
transkription_field = TranskriptionField(page.source)
page.update_line_number_area(transkription_field)
self.assertEqual(transkription_field.line_number_area_width > 0, True)
self.assertEqual(transkription_field.line_number_area_width < 15, True)
page = Page(self.xml_fileB)
transkription_field = TranskriptionField(page.source)
page.update_line_number_area(transkription_field)
self.assertEqual(transkription_field.line_number_area_width > 0, True)
self.assertEqual(transkription_field.line_number_area_width < 15, True)
def test_get_pages_from_xml_file(self):
pages = Page.get_pages_from_xml_file(self.test_manuscript)
self.assertEqual(len(pages), 4)
self.assertEqual(pages[0].number, '5')
self.assertEqual(pages[1].number, '6')
pages = Page.get_pages_from_xml_file(self.test_manuscript, status_contains=STATUS_MERGED_OK)
self.assertEqual(len(pages), 2)
self.assertEqual(pages[0].number, '5')
pages = Page.get_pages_from_xml_file(self.test_manuscript, status_contains=STATUS_MERGED_OK, status_not_contain=STATUS_POSTMERGED_OK)
self.assertEqual(len(pages), 1)
def test_get_semantic_dictionary(self):
dictionary = Page.get_semantic_dictionary()
#print(dictionary)
def test_update_styles(self):
+ #:map :w:!python3 -m unittest tests_svgscripts.test_page.TestPage.test_update_styles
page = Page(self.pdf_xml)
page.words = [ word for word in page.words if word.text == 'Schopenhauer' ]
page.update_styles(add_to_parents=True)
self.assertEqual(len(page.words[0].styles), 1)
self.assertEqual(page.words[0].styles[0].color.name, 'black')
self.assertEqual(page.words[0].styles[0].font, Style.NIETSCHES_FONTS['latin'])
self.assertEqual(page.words[0].styles[0].writing_instrument, Style.WRITING_INSTRUMENTS[('black',False)])
page = Page(self.test_styles_color)
page.words = [ word for word in page.words if word.text == 'Versöhnlichkeit' ]
page.update_styles(add_to_parents=True)
self.assertEqual(len(page.words[0].styles), 1)
self.assertEqual(page.words[0].styles[0].color.name, 'green')
self.assertEqual(page.words[0].styles[0].font, Style.NIETSCHES_FONTS['german'])
self.assertEqual(page.words[0].styles[0].writing_instrument, Style.WRITING_INSTRUMENTS[('green',False)])
self.assertEqual(page.words[0].styles[0].writing_process_id, WritingProcess.INSERTION_AND_ADDITION)
page = Page(self.test_styles_color)
page.words = [ word for word in page.words if word.text == 'Versöhnlichkeit' or word.text == 'gewisse' ]
self.assertEqual(len(page.words), 2)
word = page.words[0]
word.transkription_positions += page.words[1].transkription_positions
page.words = [ word ]
page.update_styles(add_to_parents=True, partition_according_to_styles=True)
self.assertEqual(len(page.words[0].word_parts), 2)
page = Page(self.test_styles_color)
page.update_styles(add_to_parents=True, create_css=True)
for word in page.words:
self.assertTrue(len(word.styles) > 0)
- for style in word.styles:
- self.assertTrue(len(style.css_styles) > 0)
+ self.assertTrue((not word.deleted and len([ style for style in word.styles if 'line-through' in style.css_string]) == 0)\
+ or (word.deleted and len([ style for style in word.styles if 'line-through' in style.css_string]) > 0))
def test_add_deletion_paths_to_words(self):
page = Page('xml/Mp_XIV_page416.xml')
word = [ word for word in page.words if word.deleted or True in [ part.deleted for part in word.word_parts ]][0]
page.add_deletion_paths_to_words()
self.assertTrue(len(word.deletion_paths) > 0)
page = Page('xml/Mp_XIV_page417.xml')
word = [ word for word in page.words if word.text == 'wird.)' ][0]
page.add_deletion_paths_to_words(add_paths_near_words=True)
self.assertTrue(len(word.deletion_paths_near_word) > 0)
def test_lock(self):
page = Page(self.test_tcm_xml)
self.assertEqual(page.is_locked(), False)
page.lock('asdf.txt')
self.assertEqual(page.is_locked(), True)
self.assertEqual(page.page_tree.xpath('//lock/reference-file/text()')[0], 'asdf.txt')
page.unlock()
self.assertEqual(page.is_locked(), False)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_archival_manuscript.py
===================================================================
--- tests_svgscripts/test_archival_manuscript.py (revision 0)
+++ tests_svgscripts/test_archival_manuscript.py (revision 106)
@@ -0,0 +1,56 @@
+import unittest
+from os import sep, path
+from os.path import basename, dirname, isfile
+import lxml.etree as ET
+import sys
+
+sys.path.append('svgscripts')
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
+from datatypes.color import Color
+
+class TestArchivalManuscriptUnity(unittest.TestCase):
+ def setUp(self):
+ ArchivalManuscriptUnity.UNITTESTING = True
+ DATADIR = dirname(__file__) + sep + 'test_data'
+ self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
+
+ def test_init(self):
+ title = 'Test I 1'
+ manuscript = ArchivalManuscriptUnity(title=title)
+ self.assertEqual(manuscript.title, title)
+
+ def test_get_semanticAndDataDict(self):
+ semantic_dict = ArchivalManuscriptUnity.get_semantic_dictionary()
+ #print(semantic_dict)
+
+ def test_create_cls(self):
+ manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
+ self.assertTrue(manuscript.description is not None)
+ self.assertEqual(len(manuscript.earlier_descriptions), 2)
+ self.assertEqual(manuscript.title, basename(self.test_manuscript).replace('.xml','').replace('_', ' '))
+ self.assertEqual(manuscript.manuscript_type, 'Notizheft')
+ self.assertEqual(len(manuscript.pages), 4)
+ manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript, page_status_list=['faksimile merged'])
+ self.assertEqual(len([ page for page in manuscript.pages if 'xml_file' in page.__dict__.keys()]), 2)
+ manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript, page_status_list=['faksimile merged', 'words processed'])
+ self.assertEqual(len([ page for page in manuscript.pages if 'xml_file' in page.__dict__.keys()]), 1)
+ manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript, page_xpath='//pages/page/@output')
+ self.assertEqual(len(manuscript.pages), 4)
+
+ def test_get_color(self):
+ color = Color()
+ manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
+ self.assertEqual(manuscript.get_color(color.hex_color) is not None, True)
+ self.assertEqual(manuscript.get_color("#F7F6F5") is None, True)
+
+ def test_update_colors(self):
+ color = Color()
+ manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
+ manuscript.update_colors(color)
+ self.assertEqual(len(manuscript.colors), 2)
+ #print(ET.dump(manuscript.manuscript_tree.getroot()))
+
+
+
+if __name__ == "__main__":
+ unittest.main()
Index: tests_svgscripts/test_manuscript.py
===================================================================
--- tests_svgscripts/test_manuscript.py (revision 105)
+++ tests_svgscripts/test_manuscript.py (revision 106)
@@ -1,56 +1,56 @@
import unittest
from os import sep, path
from os.path import basename, dirname, isfile
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.color import Color
class TestArchivalManuscriptUnity(unittest.TestCase):
def setUp(self):
ArchivalManuscriptUnity.UNITTESTING = True
DATADIR = dirname(__file__) + sep + 'test_data'
self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
def test_init(self):
title = 'Test I 1'
manuscript = ArchivalManuscriptUnity(title=title)
self.assertEqual(manuscript.title, title)
def test_get_semanticAndDataDict(self):
semantic_dict = ArchivalManuscriptUnity.get_semantic_dictionary()
#print(semantic_dict)
def test_create_cls(self):
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
self.assertTrue(manuscript.description is not None)
self.assertEqual(len(manuscript.earlier_descriptions), 2)
self.assertEqual(manuscript.title, basename(self.test_manuscript).replace('.xml','').replace('_', ' '))
self.assertEqual(manuscript.manuscript_type, 'Notizheft')
self.assertEqual(len(manuscript.pages), 4)
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript, page_status_list=['faksimile merged'])
self.assertEqual(len([ page for page in manuscript.pages if 'xml_file' in page.__dict__.keys()]), 2)
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript, page_status_list=['faksimile merged', 'words processed'])
self.assertEqual(len([ page for page in manuscript.pages if 'xml_file' in page.__dict__.keys()]), 1)
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript, page_xpath='//pages/page/@output')
self.assertEqual(len(manuscript.pages), 4)
def test_get_color(self):
color = Color()
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
self.assertEqual(manuscript.get_color(color.hex_color) is not None, True)
self.assertEqual(manuscript.get_color("#F7F6F5") is None, True)
def test_update_colors(self):
color = Color()
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
manuscript.update_colors(color)
self.assertEqual(len(manuscript.colors), 2)
#print(ET.dump(manuscript.manuscript_tree.getroot()))
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_create_manuscript.py
===================================================================
--- tests_svgscripts/test_create_manuscript.py (revision 105)
+++ tests_svgscripts/test_create_manuscript.py (revision 106)
@@ -1,51 +1,51 @@
import unittest
from os import sep, path, remove
from os.path import isfile
import lxml.etree as ET
import warnings
import sys
sys.path.append('svgscripts')
import create_manuscript
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
class TestCreateManuscript(unittest.TestCase):
def setUp(self):
create_manuscript.UNITTESTING = True
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.content_file = DATADIR + sep + 'content.txt'
def test_create_page_url_mapping(self):
mapping = {}
create_manuscript.create_page_url_mapping(self.content_file, mapping)
self.assertTrue('Mp XV' in mapping.keys())
#print(mapping)
#mapping = {}
#create_manuscript.create_page_url_mapping('content.txt', mapping, default_title='Mp XV')
#print(mapping)
creator = create_manuscript.ManuscriptCreator('')
pages_node = ET.Element('pages')
#creator._create_or_update_pages(pages_node, mapping['Mp XV'])
#print(ET.dump(pages_node))
def test_get_or_create_element(self):
creator = create_manuscript.ManuscriptCreator('')
manuscript_tree = ET.ElementTree(ET.Element(ArchivalManuscriptUnity.XML_TAG))
self.assertEqual(len(manuscript_tree.xpath('test')), 0)
node = creator._get_or_create_element(manuscript_tree.getroot(), 'test', create_id=True)
self.assertEqual(len(manuscript_tree.xpath('test')), 1)
node = creator._get_or_create_element(manuscript_tree.getroot(), 'test[@id="0"]')
self.assertEqual(len(manuscript_tree.xpath('test')), 1)
node = creator._get_or_create_element(manuscript_tree.getroot(), 'page[@number="10"]')
self.assertEqual(node.get('number'), '10')
node = creator._get_or_create_element(manuscript_tree.getroot(), 'page[@number="0"]', create_id=True)
self.assertEqual(node.get('id'), '1')
self.assertEqual(node.get('number'), '0')
@unittest.skip('files missing')
def test_main(self):
create_manuscript.main(['-x', 'xml', '-t', 'Mp XV', self.content_file])
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_word.py
===================================================================
--- tests_svgscripts/test_word.py (revision 105)
+++ tests_svgscripts/test_word.py (revision 106)
@@ -1,505 +1,505 @@
import unittest
from os import sep, path
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from process_words_post_merging import reset_page, update_writing_process_ids
from datatypes.box import Box
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.matrix import Matrix
import datatypes.page
from datatypes.path import Path
from datatypes.positional_word_part import PositionalWordPart
from datatypes.style import Style
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.word import Word, execute_function_on_parts, update_transkription_position_ids, do_paths_intersect_saveMode
from datatypes.word_deletion_path import WordDeletionPath
from datatypes.word_position import WordPosition
sys.path.append('py2ttl')
from class_spec import SemanticClass
sys.path.append('shared_util')
from main_util import extract_paths_on_tf
class Page:
def __init__(self):
self.svg_file = None
def get_line_number(self, input=0):
return -1
def get_biggest_fontSize4styles(self, style_set={}):
return 7
class TestWord(unittest.TestCase):
TESTCASE = None
def setUp(self):
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.test_file = DATADIR + sep + 'N_VII_1_page009.xml'
self.word_deletion_path_file = DATADIR + sep + 'N_VII_1_page138.xml'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.word_part_objs = [{'text': 'a' }, {'text': 'b' }, {'text': 'c' }]
x = 0
for dict in self.word_part_objs:
dict['class'] = 'st22'
dict['x'] = x
dict['y'] = 11
x += 1
mylist = {'text': 'abc', 'id': '0', 'line-number': '2', 'deleted': 'true' }
word_position = TranskriptionPosition(x=0, y=1, height=10, width=10, matrix=Matrix('matrix(0.94 0.342 -0.342 0.94 0 0)'))
self.transkription_positions = [ word_position ]
self.word_node = ET.Element('word', attrib=mylist)
word_position.attach_object_to_tree(self.word_node)
x = 0
word_path = Path.create_path_from_transkription_position(word_position, include_pwps=False)
word_path.tag = WordDeletionPath.XML_TAG
word_path.attach_object_to_tree(self.word_node)
for char in mylist['text']:
ET.SubElement(self.word_node, 'part', attrib={'text': char, 'x': str(x), 'y': '11', 'class': 'st22' })
x += 1
def test_add_deletion_paths(self):
page = datatypes.page.Page(self.word_deletion_path_file)
word = [ word for word in page.words if word.text == 'AufBau'][0]
#self.assertTrue(word.deleted)
self.assertTrue(len(word.word_parts) > 0)
self.assertTrue(word.word_parts[0].deleted)
word.add_deletion_paths(page.word_deletion_paths, tr_xmin=28.347656, tr_ymin=49.921875)
self.assertTrue(len(word.word_parts[0].deletion_paths) > 0)
#print(word.deletion_paths)
page = datatypes.page.Page('xml/Mp_XIV_page420.xml')
words = [ word for word in page.words if word.deleted or True in [ part.deleted for part in word.word_parts ]]
words[0].add_deletion_paths(extract_paths_on_tf(page))
word_path = Path.create_path_from_transkription_position(words[0].transkription_positions[0], include_pwps=False)
#print( words[0].text, words[0].deletion_paths)
def test_join_words(self):
words = [ Word(id=4, text='asdf-', line_number=1, deleted=True), Word(id=5, text='bsdf', line_number=2, deleted=False) ]
new_word = Word.join_words(words)
self.assertEqual(new_word.id, 4)
self.assertEqual(new_word.text, 'asdf-bsdf')
self.assertEqual(new_word.edited_text, 'asdfbsdf')
self.assertEqual(new_word.deleted, False)
self.assertEqual(new_word.line_number, -1)
words = [ Word(id=1, word_parts=[Word(id=4, text='asdf-', line_number=1, deleted=True), Word(id=5, text='bsdf', line_number=2, deleted=False)]),\
Word(id=4, text='.', line_number=2, deleted=True), Word(id=5, text='.', line_number=2, deleted=False) ]
new_word = Word.join_words(words)
self.assertEqual(new_word.text, 'asdf-bsdf..')
new_word = Word.join_words(words, add_white_space_between_words=True)
self.assertEqual(new_word.text, 'asdf- bsdf . .')
def test_Word_with_word_part_objs(self):
word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
self.assertEqual(word.id, 0)
self.assertEqual(word.transkription_positions[0].bottom, 13)
self.assertEqual(word.transkription_positions[0].height, 10)
self.assertEqual(word.transkription_positions[0].top, 3)
self.assertEqual(word.transkription_positions[0].left, 0)
self.assertEqual(word.transkription_positions[0].width, 10)
self.assertEqual(word.text, 'abc')
def test_Word_with_word_node(self):
word = Word.create_cls(self.word_node)
self.assertEqual(word.id, 0)
self.assertEqual(word.deleted, True)
self.assertTrue(len(word.deletion_paths) > 0)
self.assertEqual(word.transkription_positions[0].bottom, 11)
self.assertEqual(word.transkription_positions[0].height, 10)
self.assertEqual(word.transkription_positions[0].top, 1)
self.assertEqual(word.transkription_positions[0].left, 0)
self.assertEqual(word.transkription_positions[0].width, 10)
self.assertEqual(word.text, 'abc')
self.assertEqual(word.line_number, 2)
self.assertEqual(word.transkription_positions[0].transform.isRotationMatrix(), True)
def test_attach_word_to_tree(self):
newWord = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
empty_tree = ET.ElementTree(ET.Element('page'))
newWord.attach_word_to_tree(empty_tree)
for word_node in empty_tree.getroot().xpath('//word'):
word = Word.CREATE_WORD(word_node=word_node)
self.assertEqual(word.id, 0)
self.assertEqual(word.deleted, False)
self.assertEqual(word.transkription_positions[0].bottom, 13)
self.assertEqual(word.transkription_positions[0].height, 10)
self.assertEqual(word.transkription_positions[0].top, 3)
self.assertEqual(word.transkription_positions[0].left, 0)
self.assertEqual(word.transkription_positions[0].width, 10)
self.assertEqual(word.text, 'abc')
@unittest.skipUnless(TESTCASE is None or TESTCASE == 0, 'Not testing this case')
def test_create_correction_history_case0(self):
# Case 1: whole word over box
box = Box(earlier_text='XYX')
word = Word(text='ASDF', transkription_positions=[TranskriptionPosition()])
word.word_box = box
word.create_correction_history()
self.assertEqual(word.earlier_version is None, True)
self.assertEqual(word.overwrites_word is not None, True)
@unittest.skipUnless(TESTCASE is None or TESTCASE == 1, 'Not testing this case')
def test_create_correction_history_case1(self):
# Case 2: part of word over box
box = Box(earlier_text='XYX')
partA = Word(text='A', transkription_positions=[TranskriptionPosition()])
partA.word_box = box
partB = Word(text='SDF', transkription_positions=[TranskriptionPosition()])
word = Word(text='ASDF', word_parts=[ partA, partB])
word.create_correction_history()
self.assertEqual(word.earlier_version is None, True)
self.assertEqual(word.word_parts[0].overwrites_word is not None, True)
@unittest.skipUnless(TESTCASE is None or TESTCASE == 2, 'Not testing this case')
def test_create_correction_history_case3(self):
# Case 3: part of word over box, word under box is part of earlier version
box = Box(earlier_text='XYX')
tp0 = TranskriptionPosition()
tp0.style = Style(writing_process_id=0)
tp1 = TranskriptionPosition()
tp1.style = Style(writing_process_id=1)
partA = Word(id=0, text='Test', transkription_positions=[ tp0])
partB = Word(id=1, text='er', transkription_positions=[ tp1])
partB.word_box = box
word = Word(text='Tester', writing_process_id=1, word_parts=[ partA, partB ] )
word.create_correction_history(box_style=tp0.style)
self.assertEqual(word.text, 'Tester')
self.assertEqual(word.earlier_version is not None, True)
self.assertEqual(word.earlier_version.text, 'TestXYX')
self.assertEqual(word.word_parts[1].isTransformationOfWord, word.earlier_version.word_parts[1])
@unittest.skipUnless(TESTCASE is None or TESTCASE == 3, 'Not testing this case')
def test_create_correction_history_case4(self):
# Case 4: part of word is deleted
partA = Word(id=0, text='A', deleted=True, transkription_positions=[TranskriptionPosition()])
partB = Word(id=1, text='SDF', transkription_positions=[TranskriptionPosition()])
word = Word(text='ASDF', word_parts=[ partA, partB])
word.create_correction_history()
self.assertEqual(word.earlier_version is not None, True)
self.assertEqual(word.word_parts[0].isDeletionOfWord is not None, True)
self.assertEqual(word.word_parts[0].isDeletionOfWord, word.earlier_version.word_parts[0])
self.assertEqual(word.edited_text, 'SDF')
@unittest.skipUnless(TESTCASE is None or TESTCASE == 4, 'Not testing this case')
def test_create_correction_history_case5(self):
tp0 = TranskriptionPosition()
tp0.style = Style(writing_process_id=0)
tp1 = TranskriptionPosition()
tp1.style = Style(writing_process_id=1)
partA = Word(id=0, text='Test', transkription_positions=[ tp0])
partB = Word(id=1, text='er', transkription_positions=[ tp1])
word = Word(text='Tester', word_parts=[ partA, partB ] )
word.create_correction_history()
self.assertEqual(word.earlier_version is not None, True)
self.assertEqual(word.word_parts[1].extendsEarlierVersion, True)
self.assertEqual(word.word_parts[1].isExtensionOfWord, word.earlier_version)
#@unittest.skipUnless(TESTCASE is None or TESTCASE == 5, 'Not testing this case')
#@unittest.skip('case tested, relies on a local xml file')
def test_create_correction_history_case_full(self):
page = datatypes.page.Page('xml/N_VII_1_page138.xml')
manuscript = ArchivalManuscriptUnity()
reset_page(page)
update_writing_process_ids(page)
word = [ word for word in page.words if word.text == 'Verschiedenes' and word.line_number == 4 ][0]
wordAufBau = [ word for word in page.words if word.text == 'AufBau' ][0]
#page.words = [ word ]
page.update_styles(manuscript=manuscript, partition_according_to_styles=True)
word.word_parts[0].transkription_positions[0].has_box = Box(earlier_text='v')
self.assertEqual(len(word.word_parts), 2)
word_over_box = word._get_partial_word_over_box()
update_transkription_position_ids(word)
word.create_correction_history(page)
self.assertEqual(word.writing_process_id, 1)
self.assertEqual(word.earlier_version is not None, True)
self.assertEqual(word.earlier_version.text, 'verschiedenes')
#print(word.earlier_version.id, [ (w.id, w.text) for w in word.earlier_version.word_parts ])
empty_tree = ET.ElementTree(ET.Element('page'))
word_node = word.attach_word_to_tree(empty_tree)
#print(ET.dump(word_node))
"""
self.assertEqual(word.word_parts[0].isDeletionOfWord, word.earlier_version.word_parts[0])
self.assertEqual(word.word_parts[1].isTransformationOfWord, word.earlier_version.word_parts[1])
self.assertEqual(word.word_parts[1].overwrites_word is not None, True)
"""
word = wordAufBau
page.words = [ word ]
page.update_styles(manuscript=manuscript, partition_according_to_styles=True)
word.word_parts[0].deleted = True
word.word_parts[1].transkription_positions[0].has_box = Box(earlier_text='b')
self.assertEqual(len(word.word_parts), 3)
word_over_box = word._get_partial_word_over_box()
self.assertEqual(len(word.word_parts), 3)
update_transkription_position_ids(word)
word.create_correction_history(page)
self.assertEqual(word.writing_process_id, 2)
self.assertEqual(word.earlier_version is not None, True)
self.assertEqual(word.text, 'AufBau')
self.assertEqual(word.edited_text, 'Bau')
self.assertEqual(word.earlier_version.text, 'Aufbau')
self.assertEqual(word.word_parts[0].isDeletionOfWord, word.earlier_version.word_parts[0])
self.assertEqual(word.word_parts[1].isTransformationOfWord, word.earlier_version.word_parts[1])
self.assertEqual(word.word_parts[1].overwrites_word is not None, True)
empty_tree = ET.ElementTree(ET.Element('page'))
word_node = word.attach_word_to_tree(empty_tree)
#print(ET.dump(word_node))
newWord = Word.create_cls(word_node)
#@unittest.skip('')
def test_earlier_version(self):
partA = Word(id=0, text='A', deleted=True, transkription_positions=[TranskriptionPosition()])
partB = Word(id=1, text='SDF', transkription_positions=[TranskriptionPosition()])
word = Word(text='ASDF', word_parts=[ partA, partB])
earlier_version = word.create_earlier_version()
self.assertEqual(earlier_version is not None, True)
self.assertEqual(word.word_parts[0].isDeletionOfWord is not None, True)
self.assertEqual(word.word_parts[0].isDeletionOfWord, earlier_version.word_parts[0])
def test_undo_partitioning(self):
tps = []
for i, xy in enumerate([ 3, 4, 5 ]):
tps.append(TranskriptionPosition(id=i, x=xy, y=xy, height=10, width=10))
partA = Word(id=0, text='Auf', writing_process_id=1, deleted=True, transkription_positions=[ tps[0]])
partB = Word(id=1, text='B', writing_process_id=2, transkription_positions=[tps[1]])
partC = Word(id=2, text='au', writing_process_id=1,transkription_positions=[tps[2]])
word = Word(text='Aufbau', writing_process_id=2, word_parts=[ partA, partB, partC ] )
word.undo_partitioning()
self.assertEqual(len(word.transkription_positions), len(tps))
self.assertEqual(len(word.word_parts), 0)
"""
page = datatypes.page.Page('xml/N_VII_1_page138.xml')
word = page.words[77]
word.undo_partitioning()
self.assertEqual(len(word.word_parts), 0)
self.assertEqual(len(word.transkription_positions), 3)
update_transkription_position_ids(word)
empty_tree = ET.ElementTree(ET.Element('page'))
word_node = word.attach_word_to_tree(empty_tree)
print(ET.dump(word_node))
"""
def test_split(self):
page = Page()
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, self.word_part_objs)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
previousWord, currentWord, nextWord = word.split('b')
self.assertEqual(previousWord.id, 0)
self.assertEqual(previousWord.text, 'a')
self.assertEqual(currentWord.id, 1)
self.assertEqual(nextWord.id, 2)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
previousWord, currentWord, nextWord = word.split('bc')
self.assertEqual(previousWord.id, 0)
self.assertEqual(previousWord.text, 'a')
self.assertEqual(currentWord.id, 1)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
previousWord, currentWord, nextWord = word.split('ab', start_id=10)
self.assertEqual(currentWord.id, 10)
self.assertEqual(currentWord.text, 'ab')
self.assertEqual(currentWord.transkription_positions[0].width, 2.1)
self.assertEqual(nextWord.id, 11)
self.assertEqual(nextWord.transkription_positions[0].width, 5.2)
word_part_objs=[{'text': 'x', 'class':'st22', 'x': 0, 'y': 0},\
{'text': 'Insofern', 'class':'st22', 'x': 1, 'y': 0},\
{'text': 'x', 'class':'st22', 'x': 10, 'y': 0}]
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
with self.assertWarns(Warning):
previousWord, currentWord, nextWord = word.split('Insofer')
word_part_objs=[{'text': 'xInsofern', 'class':'st22', 'x': 0, 'y': 0}]
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
with self.assertWarns(Warning):
previousWord, currentWord, nextWord = word.split('Insofern')
def test_join(self):
word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}])
word.join(other_word, add_white_space_between_words=True)
self.assertEqual(word.text, 'abc .')
word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}])
word.join(other_word)
self.assertEqual(word.text, 'abc.')
other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}])
word.join(other_word, append_at_end_of_new_word=False)
self.assertEqual(word.text, '.abc.')
"""
tree = ET.ElementTree(ET.Element('page'))
word.attach_word_to_tree(tree)
print(ET.dump(tree.getroot()))
"""
def test_get_semanticAndDataDict(self):
dictionary = Word.get_semantic_dictionary()
#print(dictionary)
info_dict = dictionary['properties'].get('isDeletionOfWord')
self.assertEqual(SemanticClass.SUPER_PROPERTY in info_dict.keys(), True)
super_info_dict = info_dict[SemanticClass.SUPER_PROPERTY]
#print(info_dict[SemanticClass.SUPER_PROPERTY].get(SemanticClass.PROPERTY_NAME))
def test_simplify_transkription_positions(self):
node_string = """
"""
nodeA = ET.fromstring(node_string)
node_string = """
"""
nodeB = ET.fromstring(node_string)
word = Word(text="Si", transkription_positions=[ TranskriptionPosition(node=nodeA), TranskriptionPosition(node=nodeB) ])
self.assertEqual(len(word.transkription_positions), 2)
word.simplify_transkription_positions()
self.assertEqual(len(word.transkription_positions), 1)
word = Word(text="Si", transkription_positions=[ TranskriptionPosition(node=nodeA), TranskriptionPosition(node=nodeB) ])
word.transkription_positions[1].writing_process_id = -1
word.simplify_transkription_positions()
self.assertEqual(len(word.transkription_positions), 1)
self.assertEqual(word.transkription_positions[0].writing_process_id, 0)
"""
tree = ET.ElementTree(ET.Element('page'))
word.attach_word_to_tree(tree)
print(ET.dump(tree.getroot()))
"""
def test_partition(self):
page = datatypes.page.Page(self.test_file)
word = page.words[67]
self.assertEqual(word.belongs_to_multiple_writing_processes(), True)
word.partition_according_to_writing_process_id()
self.assertEqual(len(word.word_parts), 3)
self.assertEqual(word.belongs_to_multiple_writing_processes(), False)
self.assertEqual(word.belongs_to_multiple_writing_processes(include_parts=True), True)
empty_tree = ET.ElementTree(ET.Element('page'))
word_node = word.attach_word_to_tree(empty_tree)
newWord = Word.create_cls(word_node)
self.assertEqual(len(newWord.word_parts), 3)
#print(ET.dump(empty_tree.getroot()))
def test_partition_deletion(self):
page = datatypes.page.Page(self.test_file)
word = page.words[67]
for transkription_position in word.transkription_positions:
transkription_position.deleted = transkription_position.writing_process_id == 1
#print([ transkription_position.deleted for transkription_position in word.transkription_positions])
self.assertEqual(word.has_mixed_status('deleted'), True)
word.partition_according_to_deletion()
self.assertEqual(len(word.word_parts), 3)
self.assertEqual(word.has_mixed_status('deleted'), False)
property_key = 'deleted'
#print([ w.transkription_positions[0].deleted for w in word.word_parts])
#print(len(set(pword.transkription_positions[0].__dict__[property_key] for pword in word.word_parts\
# if len(pword.transkription_positions) > 0 and property_key in pword.transkription_positions[0].__dict__.keys())))
self.assertEqual(word.has_mixed_status('deleted', include_parts=True), True)
page = datatypes.page.Page(self.test_file)
word = page.words[67]
word.partition_according_to_writing_process_id()
#print([(word.text, word.deleted) for word in word.word_parts])
word.word_parts[1].transkription_positions[1].deleted = True
word.partition_according_to_deletion()
self.assertEqual(len(word.word_parts), 4)
#print([(word.text, word.deleted) for word in word.word_parts])
partA = Word(text='A', deleted=True)
partB = Word(text='SDF', deleted=False)
word = Word(text='ASDF', word_parts=[ partA, partB])
self.assertEqual(word.has_mixed_status('deleted', include_parts=True), True)
def test_execute_function_on_parts(self):
page = datatypes.page.Page(self.test_file)
word_parts = [ page.words[67], page.words[68] ]
word_parts, none = execute_function_on_parts(word_parts, 'partition_according_to_writing_process_id')
self.assertEqual(len(word_parts) == 4, True)
def test_process_word_boxes(self):
page = datatypes.page.Page(self.pdf_xml)
page.source = self.pdf_xml_source
page.update_styles(partition_according_to_styles=True)
tr = TranskriptionField(page.source)
box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\
'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\
'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\
'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\
'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315']
box_paths = [ Box(d_string=d_string, earlier_text='test') for d_string in box_path_d ]
indices = [30, 277, 288, 297, 321]
for word_id, index in enumerate(indices):
word_over_box = page.words[index].process_boxes(box_paths, tr_xmin=tr.xmin, tr_ymin=tr.ymin)
self.assertEqual(word_over_box is not None, True)
self.assertEqual(word_over_box == page.words[index] or word_over_box in page.words[index].word_parts, True)
#self.assertEqual(word_over_box in page.words[index].word_parts, True)
def test_process_word_several_boxesOn1LIne(self):
page = datatypes.page.Page(self.pdf_xml)
page.source = self.pdf_xml_source
for word in page.words:
word.set_writing_process_id_to_transkription_positions(page)
word.partition_according_to_writing_process_id()
tr = TranskriptionField(page.source)
box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\
'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\
'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\
'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\
'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315']
box_paths = [ Box(d_string=d_string, earlier_text='test') for d_string in box_path_d ]
indices = [30, 277, 288, 297, 321]
empty_tree = ET.ElementTree(ET.Element('page'))
for word_id, index in enumerate(indices):
word_over_box = page.words[index].process_boxes(box_paths, tr_xmin=tr.xmin, tr_ymin=tr.ymin)
self.assertEqual(word_over_box is not None, True)
def test_split_according_to_status(self):
page = datatypes.page.Page(self.test_file)
word = page.words[67]
for transkription_position in word.transkription_positions:
transkription_position.text = 'asdf'\
if transkription_position.writing_process_id == 1\
else word.text
self.assertEqual(word.has_mixed_status('text'), True)
new_words = word.split_according_to_status('text')
#print([word.text for word in new_words ])
self.assertEqual(len(new_words) > 1, True)
self.assertEqual(new_words[0].id, word.id)
self.assertEqual(new_words[0].deleted, word.deleted)
self.assertEqual(new_words[1].id, word.id+1)
manuscript = ArchivalManuscriptUnity()
page = datatypes.page.Page(self.test_file)
word = page.words[67]
page.words = [ word ]
page.update_styles(manuscript=manuscript)
new_words = word.split_according_to_status('style', splits_are_parts=True)
self.assertEqual(len(word.word_parts), 3)
def test__create_new_word(self):
manuscript = ArchivalManuscriptUnity()
page = datatypes.page.Page(self.test_file)
word = page.words[67]
page.words = [ word ]
page.update_styles(manuscript=manuscript)
newWord = word._create_new_word([ word.transkription_positions[0] ], 'style')
for key in Word.COPY_PROPERTY_KEY:
self.assertEqual(newWord.__dict__[key], word.__dict__[key])
self.assertEqual(len(newWord.styles), 1)
def test__get_partial_word_over_box(self):
word = Word(text='test', transkription_positions=[ TranskriptionPosition(id=0), TranskriptionPosition(id=1) ])
word.transkription_positions[0].has_box = Box(earlier_text='asdf')
word._get_partial_word_over_box()
self.assertEqual(len(word.word_parts), 2)
partA = Word(id=0, text='A', transkription_positions=[TranskriptionPosition()])
partB = Word(id=1, text='SDF', transkription_positions=[TranskriptionPosition(), TranskriptionPosition(id=1)])
partB.transkription_positions[0].has_box = Box(earlier_text='asdf')
word = Word(text='ASDF', word_parts=[ partA, partB])
word._get_partial_word_over_box()
self.assertEqual(len(word.word_parts), 2)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_color.py
===================================================================
--- tests_svgscripts/test_color.py (revision 105)
+++ tests_svgscripts/test_color.py (revision 106)
@@ -1,77 +1,77 @@
import unittest
from os import sep, path
from os.path import dirname, basename, isfile, isdir
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.color import Color
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
class GColor: # Gnome supported
END = "\x1b[0m"
# If Foreground is False that means color effect on Background
def RGB(R, G, B): # R: 0-255 , G: 0-255 , B: 0-255
FB_G = 38 # Effect on foreground
return "\x1b[" + str(FB_G) + ";2;" + str(R) + ";" + str(G) + ";" + str(B) + "m"
class TestColor(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.test_file = DATADIR + sep + 'test.xml'
self.test_svg_file = DATADIR + sep + 'test421.svg'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.xml_fileB = DATADIR + sep + 'N_VII_1_page006.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.test_page = DATADIR + sep + 'N_VII_1_page001.xml'
self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
def test_attach_object_to_tree(self):
color = Color(color_name='blue', hex_color='#009CDE')
empty_tree = ET.ElementTree(ET.Element('page'))
color.attach_object_to_tree(empty_tree)
color_nodes = empty_tree.xpath('.//' + Color.XML_TAG)
self.assertEqual(len(color_nodes), 1)
color = Color.create_cls(node=color_nodes[0])
self.assertEqual(color.hex_color, '#009CDE')
def test_create_cls(self):
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
manuscript.UNITTESTING = True
color = Color.create_cls()
self.assertEqual(color.name, 'black')
color = Color.create_cls(hex_color='#009CDE', manuscript=manuscript)
self.assertEqual(color.name, 'blue')
self.assertEqual(color in manuscript.colors, True)
self.assertEqual(manuscript.get_color(color.hex_color), color)
color = Color.create_cls(hex_color='#009CDE', manuscript=manuscript)
self.assertEqual(color.name, 'blue')
"""
color_string = "#000000, #009CDE, #1D1D1B, #4CA32F, #93CDF1, #9D9D9C, #ADD8F5, #B2B2B2, #C6C6C6, #CD1719, #CED5CE, #DADADA, #DC0714, #DC0814, #F0977A, #F0F0F0, #F8F9F8, #FF6600, #FFFFFF".replace(',','')
color_list = color_string.split(' ')
for hex_color in color_list:
color = Color.create_cls(hex_color=hex_color)
print(GColor.RGB(*color.rgb_color), color.name, GColor.END)
"""
def test_get_semantic_dictionary(self):
dictionary = Color.get_semantic_dictionary()
#print(dictionary)
def test_eq(self):
colorA = Color.create_cls()
colorB = Color()
self.assertEqual(colorA == colorB, True)
colorC = Color.create_cls(hex_color="#FFFFFF")
self.assertEqual(colorC.name, 'white')
self.assertEqual(colorA != colorC, True)
def test_create_cls_from_style_object(self):
color = Color.create_cls_from_style_object({'clip-path': 'url(#SVGID_1750_)', 'fill': 'none', 'stroke': '#F0F0F0', 'stroke-miterlimit': '10', 'stroke-width': '0.4'})
self.assertEqual(color.name, 'white')
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_style.py
===================================================================
--- tests_svgscripts/test_style.py (revision 105)
+++ tests_svgscripts/test_style.py (revision 106)
@@ -1,98 +1,95 @@
import unittest
from os import sep, path
from os.path import dirname, basename, isfile, isdir
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.color import Color
-from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.archival_manuscript import ArchivalManuscriptUnity
from datatypes.page import Page
from datatypes.style import Style
class TestStyle(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.test_file = DATADIR + sep + 'test.xml'
self.test_svg_file = DATADIR + sep + 'test421.svg'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.xml_fileB = DATADIR + sep + 'N_VII_1_page006.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.test_page = DATADIR + sep + 'N_VII_1_page001.xml'
self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
+ self.test_styles_color = DATADIR + sep + 'N_VII_1_page013.xml'
def test_create_cls(self):
page = Page(self.test_page)
style_string = "st11 st10 st5"
style = Style.create_cls(page, style_string)
self.assertEqual(style.font_family, 'Weidemann-Book')
self.assertEqual(style.color.hex_color, "#DADADA")
self.assertEqual(style.writing_instrument, 'schwarze Tinte')
style_string = "st11 st10"
style = Style.create_cls(page, style_string)
self.assertEqual(style.font_family, 'Weidemann-Book')
self.assertEqual(style.color.name, "black")
self.assertEqual(style.writing_instrument, 'schwarze Tinte')
style_string = "st11 st3"
style = Style.create_cls(page, style_string, create_css=True)
#style.writing_process_id = 1
#style.create_css_styles()
self.assertEqual(style.font_family, 'Weidemann-Book')
self.assertEqual(style.font_size, '9px')
- style_string = "st18"
- page = Page(self.test_page)
- style = Style.create_cls(page, style_string)
- self.assertEqual(style.color.name, 'black')
def test_remove_irrelevant_style_keys(self):
page = Page(self.test_page)
style_string = "st11 st10 st9 st5 st0"
self.assertEqual(Style.remove_irrelevant_style_keys(style_string, page), "st11 st5 st9")
def test_process_style_classes(self):
style = Style()
style.color = Color.create_cls(hex_color='#009CDE')
style.process_style_classes()
self.assertEqual(style.writing_instrument, 'violette Tinte')
self.assertEqual(style.font, 'deutsche Schreibschrift')
style.font_family = "NewsGothicBT-Bold"
style.process_style_classes()
self.assertEqual(style.writing_instrument, 'Blaustift')
self.assertEqual(style.font, 'lateinische Schreibschrift')
style = Style()
style.font_family = "NewsGothicBT-Bold"
style.process_style_classes()
#print(style.css_styles)
def test_get_semantic_dictionary(self):
dictionary = Style.get_semantic_dictionary()
#print(dictionary)
def test_copy(self):
manuscript = ArchivalManuscriptUnity.create_cls(self.test_manuscript)
page = Page(self.test_page)
page.words = [ page.words[0] ]
page.update_styles(manuscript=manuscript, add_to_parents=True)
self.assertEqual(len(manuscript.styles), 1)
styleA = page.words[0].transkription_positions[0].style
styleB = styleA.create_a_copy()
self.assertEqual(styleA == styleB, True)
styleB = styleA.create_a_copy(reduce_writing_process_id=True)
self.assertEqual(styleA != styleB, True)
def test_eq(self):
page = Page(self.test_page)
style_string = "st11 st10 st5"
styleA = Style.create_cls(page, style_string)
styleB = Style.create_cls(page, style_string)
self.assertEqual(styleA == styleB, True)
style_string = "st11 st10"
styleC = Style.create_cls(page, style_string)
self.assertEqual(styleA != styleC, True)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_reconstructed_konvolut.py
===================================================================
--- tests_svgscripts/test_reconstructed_konvolut.py (revision 0)
+++ tests_svgscripts/test_reconstructed_konvolut.py (revision 106)
@@ -0,0 +1,28 @@
+import unittest
+from os import sep, path
+from os.path import basename, dirname, isfile
+import lxml.etree as ET
+import sys
+
+sys.path.append('svgscripts')
+from datatypes.reconstructed_konvolut import ReconstructedKonvolut, NonExistentPage
+
+class TestReconstructedKonvolut(unittest.TestCase):
+ def setUp(self):
+ ReconstructedKonvolut.UNITTESTING = True
+ DATADIR = dirname(__file__) + sep + 'test_data'
+ self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
+
+ def test_get_semanticAndDataDict(self):
+ semantic_dict = ReconstructedKonvolut.get_semantic_dictionary()
+ #print(semantic_dict)
+
+ def test_create_cls(self):
+ manuscript = ReconstructedKonvolut.create_cls('xml/Rekonstruiertes_Quartheft_von_1886-87.xml')
+ self.assertTrue(manuscript.description is not None)
+ self.assertEqual(manuscript.manuscript_type, 'Arbeitsheft')
+ self.assertEqual(len(manuscript.pages), 13)
+
+
+if __name__ == "__main__":
+ unittest.main()