Index: TODO.md =================================================================== --- TODO.md (revision 84) +++ TODO.md (revision 85) @@ -1,98 +1,100 @@ # Wortsuche: - Die Wortsuche sollte über die topologische Nähe der Wörter zueinander gewichtet werden. - Wortpfade, d.h. Abfolgen der Wörter sollen vermieden werden, da dies nicht automatisch generiert werden kann und höchst fehleranfällig ist. - Daher sollen die Worteinfügungen auch nicht dafür verwendet werden, alternative Textverläufe aufzuzeichnen. # TODO ## Faksimile data input - word boxes on faksimile by drawing rects with inkscape [IN PROGRESS, see "Leitfaden.pdf"] - naming word boxes by using title of rects [IN PROGRESS, see "Leitfaden\_Kontrolle\_und\_Beschriftung\_der\_Wortrahmen.pdf"] - correcting faksimile svg or transkription xml if words do not correspond ## Processing ### faksimile data input, i.e. svg-file resulting from drawing boxes etc. with inkscape - process faksimile words: - join\_faksimileAndTranskription.py [DONE] - create a data input task for words that do not correspond [DONE] ### transkription, i.e. svg-file resulting from pdf-file ->created with InDesign - fix: - - AufBau xml/N\_VII\_1\_page138.xml + - xml/N\_VII\_1\_page138.xml: + - AufBau [DONE] + - Verschiedenes [DONE] - process text field: - Word [DONE] - SpecialWord - MarkForeignHands [DONE] - TextConnectionMark [DONE] - WordInsertionMark [DONE] - all paths -> page.categorize\_paths [TODO] - word-deletion -> Path [DONE] - make parts of word if only parts of a word are deleted, also introduce earlier version of word [DONE] - correction concerning punctuations in words that are deleted, script does not recognize parts of deleted words as deleted if they consist of punctuation marks. [TODO] - word-undeletion (e.g. N VII 1, 18,6 -> "mit") - underline - text-area-deletion - text-connection-lines - boxes - process footnotes: - Return footnotes with standoff [DONE] - TextConnectionMark [DONE] - TextConnection with uncertainty [TODO] - "Fortsetzung [0-9]+,[0-9]+?" - "Fortsetzung von [0-9]+,[0-9]+?" - concerning Word: - uncertain transcription: "?" / may have bold word parts - atypical writting: "¿" and bold word parts - clarification corrections ("Verdeutlichungskorrekturen"): "Vk" and bold word parts - correction: "word>" and ">?" (with uncertainty) - concerning word deletion: - atypical writting: "¿" and "Durchstreichung" (see N VII 1, 11,2) - process margins: - MarkForeignHands [DONE] - ForeignHandTextAreaDeletion [TODO] - boxes: make earlier version of a word [TODO] - TextConnection [TODO] - from: ([0-9]+,)*[0-9]+ -) - to: -) ([0-9]+,)*[0-9]+ ## Datatypes - make datatypes: - Page [ok] --> page orientation!!! - SimpleWord - SpecialWord - MarkForeignHands ("Zeichen für Fremde Hand") [DONE] - TextConnectionMark ("Anschlußzeichen") [DONE] - has a Reference - Word [ok] --> deal with non-horizontal text [DONE] --> hyphenation [TODO] --> add style info to word: font { German, Latin } [DONE] --> pen color [DONE] --> connect style with character glyph-id from svg path file --> has parts [DONE] --> versions: later version of earlier version [DONE] - WritingProcess >>>> use only in connection with earlier versions of word - correlates with font size: - biggest font to biggest-1 font: stage 0 - font in between: stage 1 - smallest font to smallest+1 font: stage 2 - Style [DONE] - WordPosition [ok] - TranskriptionPosition [ok] - FaksimilePosition [ok] - LineNumber [reDo] - change to Line - Reference [TODO]+ - TextConnection - needs change of LineNumber to Line - ForeignHandTextAreaDeletion [TODO] - Freehand: - Deletion [DONE] - make parts of word if only parts of a word are deleted, also introduce earlier version of word [DONE] - WordInsertionMark [reDO] - Underline [TODO] Index: svgscripts/datatypes/path.py =================================================================== --- svgscripts/datatypes/path.py (revision 84) +++ svgscripts/datatypes/path.py (revision 85) @@ -1,168 +1,170 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This super class can be used to represent all svg path types. """ # Copyright (C) University of Basel 2019 {{{1 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see 1}}} __author__ = "Christian Steiner" __maintainer__ = __author__ __copyright__ = 'University of Basel' __email__ = "christian.steiner@unibas.ch" __status__ = "Development" __license__ = "GPL v3" __version__ = "0.0.1" from lxml import etree as ET from os.path import isfile from svgpathtools.parser import parse_path import sys from .attachable_object import AttachableObject sys.path.append('py2ttl') from class_spec import SemanticClass class Path(AttachableObject,SemanticClass): """ This super class represents all types of svg paths. Args: node (lxml.etree.Element) node, containing information path (svgpathtools.path.Path) svg path representation. """ XML_TAG = 'path' WORD_DELETION_PATH_TAG = 'word-deletion-path' BOX_TAG = 'box-path' - def __init__(self, id=0, node=None, path=None, d_string=None, style_class='', tag=XML_TAG): + def __init__(self, id=0, node=None, path=None, parent_path=None, d_string=None, style_class='', tag=XML_TAG): self.intKeys = [ 'id' ] self.stringKeys = [ 'style_class' ] self.floatKeys = [] + self.start_line_number = -1 + self.parent_path = parent_path if node is not None: self.id = int(node.get('id')) if bool(node.get('id')) else 0 self.path = parse_path(node.get('d')) if bool(node.get('d')) else None self.d_attribute = node.get('d') self.style_class = node.get('style-class') self.tag = node.tag else: self.tag = tag self.id = id self.path = path if self.path is None\ and d_string is not None\ and d_string != '': self.path = parse_path(d_string) self.d_attribute = self.path.d() if self.path is not None else '' self.style_class = style_class def attach_object_to_tree(self, target_tree): """Attach object to tree. """ if target_tree.__class__.__name__ == '_ElementTree': target_tree = target_tree.getroot() obj_node = target_tree.xpath('.//' + self.tag + '[@id="%s"]' % self.id)[0] \ if(len(target_tree.xpath('.//' + self.tag + '[@id="%s"]' % self.id)) > 0) \ else ET.SubElement(target_tree, self.tag) for key in self.floatKeys: if self.__dict__[key] is not None: obj_node.set(key.replace('_','-'), str(round(self.__dict__[key], 3))) for key in self.intKeys + self.stringKeys: if self.__dict__[key] is not None: obj_node.set(key.replace('_','-'), str(self.__dict__[key])) if self.path is not None: obj_node.set('d', self.path.d()) def contains_path(self, other_path): """Returns true if other_path is contained in this path. """ this_xmin, this_xmax, this_ymin, this_ymax = self.path.bbox() other_xmin, other_xmax, other_ymin, other_ymax = other_path.path.bbox() return other_xmin >= this_xmin and other_xmax <= this_xmax\ and other_ymin >= this_ymin and other_ymax <= this_ymax def contains_start_of_path(self, other_path): """Returns true if start of other_path is contained in this path. """ this_xmin, this_xmax, this_ymin, this_ymax = self.path.bbox() other_xmin, other_xmax, other_ymin, other_ymax = other_path.path.bbox() return other_xmin >= this_xmin and other_xmin < this_xmax\ and other_ymin >= this_ymin and other_ymax <= this_ymax def contains_end_of_path(self, other_path): """Returns true if end of other_path is contained in this path. """ this_xmin, this_xmax, this_ymin, this_ymax = self.path.bbox() other_xmin, other_xmax, other_ymin, other_ymax = other_path.path.bbox() return other_xmax >= this_xmin and other_xmax < this_xmax\ and other_ymin >= this_ymin and other_ymax <= this_ymax @classmethod def create_path_from_transkription_position(cls, transkription_position, tr_xmin=0.0, tr_ymin=0.0): """Create a .path.Path from a .transkription_position.TranskriptionPosition. """ if len(transkription_position.positional_word_parts) > 0: first_pwp = transkription_position.positional_word_parts[0] last_pwp = transkription_position.positional_word_parts[len(transkription_position.positional_word_parts)-1] xmin = tr_xmin + first_pwp.left xmax = tr_xmin + last_pwp.left + last_pwp.width ymin = tr_ymin + sorted(pwp.top for pwp in transkription_position.positional_word_parts)[0] ymax = tr_ymin + sorted([pwp.bottom for pwp in transkription_position.positional_word_parts], reverse=True)[0] else: xmin = tr_xmin + transkription_position.left xmax = xmin + transkription_position.width ymin = tr_ymin + transkription_position.top ymax = ymin + transkription_position.height word_path = parse_path('M {}, {} L {}, {} L {}, {} L {}, {} z'.format(xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax)) return cls(path=word_path) def do_paths_intersect(self, other_path): """Returns true if paths intersect, false if not or if there was an exception. """ try: return self.path.intersect(other_path.path, justonemode=True) except AssertionError: return False def get_median_y(self, tr_ymin=0.0): """Return the median of ymin + ymax. """ return (self.path.bbox()[2] + self.path.bbox()[3])/2 - tr_ymin def get_x(self, tr_xmin=0.0): """Return xmin. """ return self.path.bbox()[0] - tr_xmin @classmethod def get_semantic_dictionary(cls): """ Creates and returns a semantic dictionary as specified by SemanticClass. """ dictionary = {} class_dict = cls.get_class_dictionary() properties = {'d_attribute': { 'class': str, 'cardinality': 0,\ 'name': 'hasDAttribute', 'label': 'svg path has d attribute',\ 'comment': 'The d attribute defines a path to be drawn.'}} properties.update(cls.create_semantic_property_dictionary('style_class', str)) dictionary.update({cls.CLASS_KEY: class_dict}) dictionary.update({cls.PROPERTIES_KEY: properties}) return cls.return_dictionary_after_updating_super_classes(dictionary) def is_partially_contained_by(self, other_path): """Returns true if other_path containes this path partially. """ return other_path.contains_start_of_path(self) or other_path.contains_end_of_path(self) Index: svgscripts/datatypes/word.py =================================================================== --- svgscripts/datatypes/word.py (revision 84) +++ svgscripts/datatypes/word.py (revision 85) @@ -1,762 +1,773 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This class can be used to represent a word. """ # Copyright (C) University of Basel 2019 {{{1 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see 1}}} __author__ = "Christian Steiner" __maintainer__ = __author__ __copyright__ = 'University of Basel' __email__ = "christian.steiner@unibas.ch" __status__ = "Development" __license__ = "GPL v3" __version__ = "0.0.1" import copy import inspect from lxml import etree as ET from operator import attrgetter import sys import warnings from .box import Box from .matrix import Matrix from .path import Path from .simple_word import SimpleWord from .style import Style from .word_position import WordPosition from .transkription_position import TranskriptionPosition from .writing_process import WritingProcess def execute_function_on_parts(word_parts, func_name): """Execute function on parts and add those parts instead of original word to word_parts. :return: new word_parts, output from func """ copy_parts = word_parts[:] for word in word_parts: output = eval('word.{0}()'.format(func_name)) if len(word.word_parts) > 0: for part_word in word.word_parts: copy_parts.insert(copy_parts.index(word), part_word) copy_parts.remove(word) word.word_parts = [] return copy_parts, output def update_transkription_position_ids(word): """Update transkription_position' ids according to index. """ word_part_ids = [ wp.id for wp in word.word_parts ] if len(word_part_ids) != len(set(word_part_ids)): for id, wp in enumerate(word.word_parts): wp.id = id for index, transkription_position in enumerate(sorted(word.transkription_positions, key=attrgetter('left'))): transkription_position.id = index transkription_position.has_box = None transkription_position.deleted = False class Word(SimpleWord): """ This class represents a word. """ COPY_PROPERTY_KEY = [ 'line_number', 'deleted', 'writing_process_id' ] APPEND_PROPERTY2LIST_SOURCE_TARGET_KEYS = { 'style': 'styles' } DATA = 'debug-data' XML_TAG = 'word' XML_EARLIER_VERSION = 'earlier-version' XML_OVERWRITES = 'overwrites' XML_CORRECTION_DICT = { 'isClarificationOfWord': 'clarifiesWord',\ 'isDeletionOfWord': 'deletesEarlierPart',\ 'isExtensionOfWord': 'extendsEarlierVersion',\ 'isTransformationOfWord': 'transformsEarlierPart' } def __init__(self, id=0, text='', line_number=-1, deleted=False, transkription_positions=None, faksimile_positions=None, word_part_objs=None, word_parts=None, writing_process_id=-1, earlier_version=None, box_paths=None, styles=None): super(Word,self).__init__(id=id, text=text, line_number=line_number, transkription_positions=transkription_positions,\ faksimile_positions=faksimile_positions) self.corrections = [] self.deleted = deleted self.debug_container = {} self.debug_msg = None self.earlier_version = earlier_version self.edited_text = None self.isClarificationOfWord = None self.isDeletionOfWord = None self.isExtensionOfWord = None self.isTransformationOfWord = None if len(self.text) == 0 and len(''.join([ tp.get_text() for tp in self.transkription_positions if type(tp) == TranskriptionPosition ])) > 0: self.text = ''.join([ tp.get_text() for tp in self.transkription_positions ]) self.overwrites_word = None self.styles = styles\ if styles is not None\ else [] self.writing_process_id = writing_process_id self.writing_processes = [] self.word_insertion_mark = None self.word_box = None self.word_parts = word_parts if word_parts is not None else [] self.word_part_objs = word_part_objs if word_part_objs is not None else [] def attach_word_to_tree(self, target_tree): """Attaches word to tree target_tree. """ word_node = super(Word,self).attach_word_to_tree(target_tree) if self.deleted is not None: word_node.set('deleted', str(self.deleted).lower()) if self.edited_text is not None: word_node.set('edited-text', self.edited_text) if self.writing_process_id > -1: word_node.set('writing-process-id', str(self.writing_process_id)) for index, word_part in enumerate(self.word_parts): word_part.id = index word_part.attach_word_to_tree(word_node) if self.earlier_version is not None: earlier_node = ET.SubElement(word_node, self.XML_EARLIER_VERSION) self.earlier_version.attach_word_to_tree(earlier_node) if self.overwrites_word is not None\ and len(self.overwrites_word.transkription_positions) > 0: overwrite_node = ET.SubElement(word_node, self.XML_OVERWRITES) self.overwrites_word.attach_word_to_tree(overwrite_node) if self.word_box is not None: self.word_box.attach_object_to_tree(word_node) if len(self.corrections) > 0: word_node.set('corrections', ' '.join(set([ str(word.id) for word in self.corrections ]))) for key in self.XML_CORRECTION_DICT.keys(): if self.__dict__[key] is not None: word_node.set(self.XML_CORRECTION_DICT[key], 'true') return word_node def belongs_to_multiple_writing_processes(self, include_parts=False): """Returns true if transkription_positions belong to different WritingProcesses. """ if len(self.word_parts) > 0 and include_parts: return len(set(word.writing_process_id for word in self.word_parts)) > 1 return len(set(tp.writing_process_id for tp in self.transkription_positions )) > 1 def set_parent_word_writing_process_id(self): """Set writing_process_id for parent word. """ ids = set(word.transkription_positions[0].style for word in self.word_parts\ if len(word.transkription_positions) > 0 and word.transkription_positions[0].style is not None) if len(ids) > 1: self.writing_process_id = max([style.writing_process_id for style in ids]) if len(set(word.transkription_positions[0].style.create_a_copy_wo_writing_process_id()\ for word in self.word_parts\ if len(word.transkription_positions) > 0 and word.transkription_positions[0].style is not None))\ > 1: self.writing_process_id += 1 @classmethod def create_cls(cls, word_node): """Creates a word from a (lxml.Element) node. [:return:] Word """ cls = super(Word,cls).create_cls(word_node) cls.writing_process_id = int(word_node.get('writing-process-id')) if bool(word_node.get('writing-process-id')) else -1 cls.split_strings = None cls.join_string = word_node.get('join') if bool(word_node.get('split')): cls.split_strings = word_node.get('split').split(' ') if ''.join(cls.split_strings) != cls.text: error_msg = 'Error in file {0}: word with id="{1}" has split attributes that do not correspond to its text attribute!\n'.\ format(word_node.getroottree().docinfo.URL, str(cls.id))\ + 'Split attributes: "{0}".\n'.format(' '.join(cls.split_strings))\ + 'Text attribute: "{0}".\n'.format(cls.text) raise Exception(error_msg) cls.deleted = word_node.get('deleted') == 'true'\ if bool(word_node.get('deleted')) else None cls.edited_text = word_node.get('edited-text') cls.word_parts = [ cls.create_cls(node) for node in word_node.xpath('./' + cls.XML_TAG) ] if bool(word_node.get('corrections')): for index in [ int(i) for i in word_node.get('corrections').split(' ') ]: if index < len(cls.word_parts): cls.corrections.append(cls.word_parts[index]) cls.earlier_version = None if len(word_node.xpath('./' + cls.XML_EARLIER_VERSION + '/' + cls.XML_TAG)) > 0: cls.earlier_version = [ cls.create_cls(node) for node in word_node.xpath('./' + cls.XML_EARLIER_VERSION + '/' + cls.XML_TAG) ][0] for key_value in cls.XML_CORRECTION_DICT.values(): if word_node.get(key_value) == 'true': cls.__dict__[key_value] = True if cls.earlier_version is not None: for word_part in cls.word_parts: for key in [ key for key, value in cls.XML_CORRECTION_DICT.items() if value.endswith('Part') ]: if cls.XML_CORRECTION_DICT[key] in word_part.__dict__.keys() and word_part.__dict__[cls.XML_CORRECTION_DICT[key]]\ and len(cls.word_parts) <= len(cls.earlier_version.word_parts): try: word_part.__dict__[key] = cls.earlier_version.word_parts[word_part.id] except Exception: msg = f'{cls.id} {cls.text}: {word_part.id}' raise Exception(msg) for key in [ key for key, value in cls.XML_CORRECTION_DICT.items() if value.endswith('EarlierVersion') ]: if cls.XML_CORRECTION_DICT[key] in word_part.__dict__.keys() and word_part.__dict__[cls.XML_CORRECTION_DICT[key]]: word_part.__dict__[key] = cls.earlier_version for key in [ key for key, value in cls.XML_CORRECTION_DICT.items() if value.endswith('Word') ]: if cls.XML_CORRECTION_DICT[key] in word_part.__dict__.keys() and word_part.__dict__[cls.XML_CORRECTION_DICT[key]]: word_part.__dict__[key] = cls cls.overwrites_word = [ cls.create_cls(node) for node in word_node.xpath('./' + cls.XML_OVERWRITES + '/' + cls.XML_TAG)][0]\ if len(word_node.xpath('./' + cls.XML_OVERWRITES + '/' + cls.XML_TAG)) > 0\ else None cls.word_box = [ Box(node=node) for node in word_node.xpath('./' + Box.XML_TAG) ][0]\ if len(word_node.xpath('./' + Box.XML_TAG)) > 0\ else None return cls def create_earlier_version(self, root_word=None, id=0): """Create an earlier version of word. """ if root_word is None: root_word = self root_word.set_parent_word_writing_process_id() word_parts = [] for id, word_part in enumerate(self.word_parts): earlierWordPart = word_part.create_earlier_version(root_word=root_word, id=id) if word_part.deleted: word_part.isDeletionOfWord = earlierWordPart word_parts.append(earlierWordPart) if word_part not in self.corrections: self.corrections.append(word_part) elif word_part.overwrites_word is not None\ and (len(word_part.transkription_positions) > 0\ and word_part.overwrites_word.transkription_positions[0].style is not None\ and word_part.transkription_positions[0].style is not None\ and word_part.transkription_positions[0].style\ != word_part.overwrites_word.transkription_positions[0].style): word_part.overwrites_word.id = word_part.id word_parts.append(word_part.overwrites_word) word_part.isTransformationOfWord = word_part.overwrites_word - #print('transform') + #print(f'transform: {self.text}') if word_part not in self.corrections: self.corrections.append(word_part) elif root_word.writing_process_id > -1\ - and (len(word_part.transkription_positions) > 0 + and (len(word_part.transkription_positions) > 0\ + and word_part.transkription_positions[0].style is not None\ and word_part.transkription_positions[0].style.writing_process_id\ == root_word.writing_process_id): word_part.extendsEarlierVersion = True #print('extends') if word_part not in self.corrections: self.corrections.append(word_part) else: #print('default') word_parts.append(earlierWordPart) text = ''.join([ word.text for word in word_parts ])\ if len(word_parts) > 0\ else self.text if len(word_parts) == 1: self.transkription_positions += word_parts[0].transkription_positions self.faksimile_positions += word_parts[0].faksimile_positions word_parts = [] new_transkription_positions = copy.deepcopy(self.transkription_positions) if len(self.transkription_positions) > 0\ and self.transkription_positions[0].style is not None: writing_process_id = self.transkription_positions[0].style.writing_process_id for new_tp in new_transkription_positions: new_tp.style.writing_process_id = writing_process_id return Word(id=id, text=text, transkription_positions=new_transkription_positions,\ faksimile_positions=self.faksimile_positions, line_number=self.line_number,\ word_parts=word_parts) def create_correction_history(self, page=None, box_style=None): """Create correction history. """ if self.word_box is not None: + if self.text == 'erschiedenes': + print(f'{self.text}: here is a BUG! TODO: find self.word_box of self.word_parts[1]!!') manuscript = self.transkription_positions[0].style.manuscript\ if len(self.transkription_positions) > 0\ and self.transkription_positions[0].style is not None\ else None style = Style() if page is not None: style = Style.create_cls(page, self.word_box.text_style_class, manuscript=manuscript) if box_style is not None: style = box_style transkription_positions = TranskriptionPosition.copy_list_of_cls(self.transkription_positions) for transkription_position in transkription_positions: transkription_position.style = style self.overwrites_word = Word(text=self.word_box.earlier_text, transkription_positions=transkription_positions,\ line_number=self.line_number) for word_part in self.word_parts: word_part.create_correction_history() if len(self.word_parts) > 0: earlier_version = self.create_earlier_version() extending_words = self._get_parts_with_property_key('extendsEarlierVersion') if len(extending_words) > 0: for word in extending_words: word.isExtensionOfWord = earlier_version if self.has_mixed_status('deleted', include_parts=True): self.edited_text = ''.join([ word.text for word in self.word_parts if not word.deleted ]) if len(self.corrections) > 0: self.earlier_version = earlier_version @staticmethod def CREATE_WORD(word_node=None, page=None, word_part_objs=[], id=0, height=0, endX=0, endSign=None, matrix=None, line_number=-1, debug_msg=None): """Creates a word from a (lxml.Element) node or word_part_objs. [:return:] Word """ if word_node is not None: # init word from xml node id = int(word_node.get('id')) line_number = int(word_node.get('line-number')) if bool(word_node.get('line-number')) else line_number text = word_node.get('text') deleted = bool(word_node.get('deleted')) and word_node.get('deleted') == 'true' transkription_positions = [ TranskriptionPosition(node=node) for node in word_node.findall('.//' + WordPosition.TRANSKRIPTION) ] faksimile_positions = [ WordPosition(node=node) for node in word_node.findall('.//' + WordPosition.FAKSIMILE) ] word_part_objs = [ item.attrib for item in word_node.findall('.//' + Word.DATA + '/part')]\ if len(word_node.findall('.//' + Word.DATA)) > 0\ else [ item.attrib for item in word_node.findall('.//part')] return Word(id=id, text=text, deleted=deleted, line_number=line_number, transkription_positions=transkription_positions,\ faksimile_positions=faksimile_positions, word_part_objs=word_part_objs) elif len(word_part_objs) > 0: # init word from word_part_obj that has been extracted from svg file WIDTH = 5 TOPCORRECTION = 2.0 FONTWIDTHFACTOR = 0.7 # factor that multiplies lastCharFontSize height = height x = round(float(word_part_objs[0]['x']), 3) if(page is not None and bool(page.style_dict)): HEIGHT_FACTOR = 1.1 # factor that multiplies biggest_font_size -> height style_set = set(' '.join(set( dict['class'] for dict in word_part_objs)).split(' ')) biggest_font_size = page.get_biggest_fontSize4styles(style_set=style_set) height = round(biggest_font_size * HEIGHT_FACTOR + HEIGHT_FACTOR / biggest_font_size, 3) TOPCORRECTION = 1 + HEIGHT_FACTOR / biggest_font_size if endSign is not None and '%' in endSign: lastCharFontSizeList = [ float(page.style_dict[key]['font-size'].replace('px',''))\ for key in word_part_objs[len(word_part_objs)-1]['class'].split(' ')\ if bool(page.style_dict[key].get('font-size'))] lastCharFontSize = lastCharFontSizeList[0] if len(lastCharFontSizeList) > 0 else 1 endX = float(endX) + lastCharFontSize * FONTWIDTHFACTOR elif endSign is not None and '%' in endSign: endX = float(endX) + WIDTH bottom = round(float(word_part_objs[0]['y']), 3) y = round(bottom - height + TOPCORRECTION, 3) width = round(float(endX) - x, 3) transkription_positions = [ WordPosition(height=height, width=width, x=x, y=y, matrix=matrix, tag=WordPosition.TRANSKRIPTION) ] text = ''.join([ dict['text'] for dict in word_part_objs]) line_number = page.get_line_number( (y + bottom)/2) if page is not None else line_number word = Word(id=id, text=text, line_number=line_number, transkription_positions=transkription_positions, word_part_objs=word_part_objs) word.debug_msg = debug_msg return word else: error_msg = 'word_node has not been defined' if (word_node is None) else 'word_part_objs is empty' raise Exception('Error: {}'.format(error_msg)) @classmethod def get_semantic_dictionary(cls): """ Creates and returns a semantic dictionary as specified by SemanticClass. """ dictionary = super(Word,cls).get_semantic_dictionary() dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('styles', Style,\ cardinality=1, cardinality_restriction='minCardinality',\ name='wordHasStyle', label='word has style', comment='Word has an appearance that is characterized by this style.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('corrections', Word,\ name='wordHasCorrection', label='word has corrections', comment='Word has a correction made by the author.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('deleted', bool,\ name='isWordDeleted', label='has word been deleted', comment='Word has been deleted by the author.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('earlier_version', Word,\ name='wordHasEarlierVersion', label='word has an earlier version', comment='There is a earlier version of this word.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('edited_text', str,\ name='hasEditedText', label='word has an edited text', comment='Word has a text that is edited automatically by removing deleted parts or hyphens.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isClarificationOfWord', Word,\ name='isClarificationOfWord', label='word is a clarification of word',\ comment='The author has used this part of the word in order to clarify the appearance of that word.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isDeletionOfWord', Word,\ name='isDeletionOfWord', label='word is a deletion of word',\ comment='The author has used this part of a word in order to delete the corresponding part of an earlier version of this word.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isExtensionOfWord', Word,\ name='isExtensionOfWord', label='word is a extension of word',\ comment='The author has used this part of a word in order to extend an earlier version of this word.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isTransformationOfWord', Word,\ name='isTransformationOfWord', label='word is a transformation of word',\ comment='The author has used this part of a word in order to transform the corresponding part of an earlier version of this word.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('overwrites_word', Word,\ name='overwritesWord', label='word overwrites word',\ comment='The author has used this word in order to overwrite that word.')) dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('word_parts', list,\ name='wordHasWordParts', label='word has word parts', comment='Word consists of a list of words.',\ subPropertyOf=cls.HAS_HOMOTYPIC_PARTS_URL_STRING)) super_property_dictionary = cls.create_semantic_property_dictionary(cls.SUPER_PROPERTY, Word,\ name='isCorrectionOfWord', label='word is a correction of word',\ comment='The author has used this word in order to correct that word.') for key in cls.XML_CORRECTION_DICT.keys(): correction_dict = dictionary[cls.PROPERTIES_KEY].get(key) correction_dict.update(super_property_dictionary) dictionary[cls.PROPERTIES_KEY].update({key: correction_dict}) return cls.return_dictionary_after_updating_super_classes(dictionary) def has_mixed_status(self, property_key, include_parts=False, concerns_word=True): """Returns true if transkription_positions have mixed status concerning the property_key in their __dict__. """ if False in set(property_key in tp.__dict__.keys() for tp in self.transkription_positions): return False if len(self.word_parts) > 0 and include_parts: if concerns_word: if False in set(property_key in word.__dict__.keys() for word in self.word_parts): return False return len(set(word.__dict__[property_key] for word in self.word_parts)) > 1 else: return len(set(word.transkription_positions[0].__dict__[property_key] for word in self.word_parts\ if len(word.transkription_positions) > 0 and property_key in word.transkription_positions[0].__dict__.keys())) > 1 return len(set(tp.__dict__[property_key] for tp in self.transkription_positions )) > 1 def init_word(self, page): """Initialize word with objects from page. """ super(Word,self).init_word(page) if self.writing_process_id > -1: self.writing_processes += [ wp for wp in page.writing_processes if wp.id == self.writing_process_id ] writing_processes = self.writing_processes for word_part in self.word_parts: word_part.init_word(page) self.lines += word_part.lines self.writing_processes += word_part.writing_processes self.lines = [ line for line in set(self.lines) ] self.writing_processes = [ wp for wp in set(self.writing_processes)] if self.overwrites_word is not None: self.overwrites_word.init_word(page) if self.earlier_version is not None: if self.earlier_version.writing_process_id == -1: self.earlier_version.writing_process_id = self.writing_process_id-1 if self.earlier_version.line_number == -1: self.earlier_version.line_number = self.line_number self.earlier_version.init_word(page) def join(self, other_word, append_at_end_of_new_word=True): """Joins other_word with this word by changing the text of current word and adding other_word.transkription_positions. """ if append_at_end_of_new_word: self.text = self.text + other_word.text for position in other_word.transkription_positions: position.id = str(len(self.transkription_positions)) self.transkription_positions.append(position) else: self.text = other_word.text + self.text index = 0 for position in other_word.transkription_positions: self.transkription_positions.insert(index, position) index += 1 while index < len(self.transkription_positions): self.transkription_positions[index].id = str(index) index += 1 self.simplify_transkription_positions() def partition_according_to_deletion(self): """Partition a word according to its transkription_positions' deletion status ->split word and add partial words as its parts. """ if self.has_mixed_status('deleted'): transkription_positions = [] last_status = None for transkription_position in self.transkription_positions: if transkription_position.deleted != last_status\ and len(transkription_positions) > 0: newWord = Word(id=len(self.word_parts), line_number=self.line_number,\ transkription_positions=transkription_positions, deleted=last_status, writing_process_id=self.writing_process_id) self.word_parts.append(newWord) transkription_positions = [] transkription_positions.append(transkription_position) last_status = transkription_position.deleted if len(transkription_positions) > 0: newWord = Word(id=len(self.word_parts), line_number=self.line_number,\ transkription_positions=transkription_positions, deleted=last_status, writing_process_id=self.writing_process_id) self.word_parts.append(newWord) self.transkription_positions = [] self.line_number = -1 self.deleted = False elif len(self.word_parts) > 0: self.word_parts, none = execute_function_on_parts(self.word_parts, 'partition_according_to_deletion') elif not self.deleted\ and len(self.transkription_positions) > 0\ and self.transkription_positions[0].deleted: self.deleted = True def partition_according_to_writing_process_id(self): """Partition a word according to its transkription_positions' writing_process_ids ->split word and add partial words as its parts. """ if self.belongs_to_multiple_writing_processes(): last_writing_process_id = -1 transkription_positions = [] for transkription_position in self.transkription_positions: if transkription_position.writing_process_id != last_writing_process_id\ and len(transkription_positions) > 0: newWord = Word(id=len(self.word_parts), line_number=self.line_number,\ transkription_positions=transkription_positions, writing_process_id=last_writing_process_id) self.word_parts.append(newWord) transkription_positions = [] transkription_positions.append(transkription_position) last_writing_process_id = transkription_position.writing_process_id if len(transkription_positions) > 0: newWord = Word(id=len(self.word_parts), line_number=self.line_number,\ transkription_positions=transkription_positions, writing_process_id=last_writing_process_id) self.word_parts.append(newWord) self.transkription_positions = [] elif len(self.word_parts) > 0: self.word_parts, none = execute_function_on_parts(self.word_parts, 'partition_according_to_writing_process_id') if self.belongs_to_multiple_writing_processes(include_parts=True): self.writing_process_id = sorted(set([ word.writing_process_id for word in self.word_parts ]), reverse=True)[0] elif len(self.transkription_positions) > 0: self.writing_process_id = self.transkription_positions[0].writing_process_id - def process_boxes(self, box_paths, tr_xmin=0.0, tr_ymin=0.0): + def process_boxes(self, box_paths, tr_xmin=0.0, tr_ymin=0.0, previous_word_has_box=False): """Determines whether word is over a word box. """ word_over_box = None if len(self.word_parts) > 0: for word in self.word_parts: - current_word = word.process_boxes(box_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin) + current_word = word.process_boxes(box_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin, previous_word_has_box=(word_over_box is not None)) if current_word is not None and current_word.word_box is not None: word_over_box = current_word else: new_tp_dict = {} - for transkription_position in self.transkription_positions: + for index, transkription_position in enumerate(self.transkription_positions): + if previous_word_has_box and index == 0: + if len(transkription_position.positional_word_parts) > 0: + transkription_position.positional_word_parts[0].left += transkription_position.positional_word_parts[0].width/2 + #print(f'{self.text}: {transkription_position.positional_word_parts[0].left}') + else: + transkription_position.left += 1 word_path = Path.create_path_from_transkription_position(transkription_position,\ tr_xmin=tr_xmin, tr_ymin=tr_ymin) containing_boxes = [ box_path for box_path in box_paths\ if word_path.is_partially_contained_by(box_path)\ or box_path.do_paths_intersect(word_path) ] if len(containing_boxes) > 0: + if previous_word_has_box: + print(f'{self.text}: {word_path.path.bbox()} {containing_boxes[0].path.bbox()}') self._set_box_to_transkription_position(containing_boxes[0], word_path,\ transkription_position, new_tp_dict, tr_xmin) for replace_tp in new_tp_dict.keys(): for tp in new_tp_dict.get(replace_tp): self.transkription_positions.insert(self.transkription_positions.index(replace_tp), tp) self.transkription_positions.remove(replace_tp) word_over_box = self._get_partial_word_over_box() update_transkription_position_ids(self) return word_over_box def set_word_insertion_mark(self, word_insertion_mark): """Sets word_insertion_mark """ self.word_insertion_mark = word_insertion_mark def set_writing_process_id_to_transkription_positions(self, page): """Determines the writing process id of the transkription_positions. """ for transkription_position in self.transkription_positions: if len(transkription_position.positional_word_parts) > 0: for font_key in transkription_position.positional_word_parts[0].style_class.split(' '): if font_key in page.fontsizekey2stage_mapping.keys(): transkription_position.writing_process_id = page.fontsizekey2stage_mapping.get(font_key) def simplify_transkription_positions(self): """Merge transkription_positions if possible. """ index = len(self.transkription_positions)-1 while index > 0\ and False not in [ 'positional_word_parts' in tp.__dict__.keys() for tp in self.transkription_positions ]: current_tp = self.transkription_positions[index] index -= 1 previous_tp = self.transkription_positions[index] if previous_tp.is_mergebale_with(current_tp): positional_word_parts = previous_tp.positional_word_parts positional_word_parts += current_tp.positional_word_parts transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(\ positional_word_parts, debug_msg_string='simplifying transkription positions', transkription_position_id=previous_tp.id) if len(transkription_positions) == 1: transkription_positions[0].writing_process_id = previous_tp.writing_process_id\ if previous_tp.writing_process_id != -1\ else current_tp.writing_process_id self.transkription_positions.pop(index+1) self.transkription_positions[index] = transkription_positions[0] #print(self.text, len(self.transkription_positions)) def split(self, split_string, start_id=0): """Splits the word and returns an 3-tuple of new words. """ previousString, currentString, nextString = self.text.partition(split_string) currentWord = None previousWord = None nextWord = None previousIndex = 0 current_id = start_id all_positional_word_parts = [] for position in self.transkription_positions: all_positional_word_parts += position.positional_word_parts if len(all_positional_word_parts) == 0: warnings.warn('ATTENTION: Word: {} {} with Strings "{}, {}, {}": there are no parts!'.format(self.id, self.text, previousString, currentString, nextString)) if len(previousString) > 0: previous_pwps = [] while previousIndex < len(all_positional_word_parts) and previousString != ''.join([ pwp.text for pwp in previous_pwps ]): previous_pwps.append(all_positional_word_parts[previousIndex]) previousIndex += 1 if previousString != ''.join([ pwp.text for pwp in previous_pwps ]): warnings.warn('ATTENTION: "{}" does not match a word_part_obj!'.format(previousString)) else: previous_transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(previous_pwps, debug_msg_string='word.split') previous_text = ''.join([ pwp.text for pwp in previous_pwps ]) previousWord = Word(text=previous_text, id=current_id, line_number=self.line_number, transkription_positions=previous_transkription_positions) current_id += 1 all_positional_word_parts = all_positional_word_parts[previousIndex:] if len(nextString) > 0: tmp_pwps = [] index = 0 while index < len(all_positional_word_parts) and currentString != ''.join([ pwp.text for pwp in tmp_pwps ]): tmp_pwps.append(all_positional_word_parts[index]) index += 1 if currentString != ''.join([ pwp.text for pwp in tmp_pwps ]): warnings.warn('ATTENTION: "{}" does not match a word_part_obj!'.format(currentString)) else: next_pwps = all_positional_word_parts[index:] next_transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(next_pwps, debug_msg_string='word.split') next_text = ''.join([ pwp.text for pwp in next_pwps ]) nextWord = Word(text=next_text, id=current_id+1, line_number=self.line_number, transkription_positions=next_transkription_positions) all_positional_word_parts = all_positional_word_parts[:index] current_transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(all_positional_word_parts, debug_msg_string='word.split') current_text = ''.join([ pwp.text for pwp in all_positional_word_parts ]) currentWord = Word(text=current_text, id=current_id, line_number=self.line_number, transkription_positions=current_transkription_positions) return previousWord, currentWord, nextWord def split_according_to_status(self, status, splits_are_parts=False): """Split a word according to its transkription_positions' text. :return: a list of new word.Word """ new_words = [] if self.has_mixed_status(status): last_status = None transkription_positions = [] for transkription_position in self.transkription_positions: if transkription_position.__dict__[status] != last_status\ and len(transkription_positions) > 0: new_words.append(\ self._create_new_word(transkription_positions, status, new_id=self.id+len(new_words))) transkription_positions = [] transkription_positions.append(transkription_position) last_status = transkription_position.__dict__[status] if len(transkription_positions) > 0: new_words.append(\ self._create_new_word(transkription_positions, status, new_id=self.id+len(new_words))) if splits_are_parts: self.word_parts += new_words if len(self.word_parts) > 0: self.transkription_positions = [] return new_words def undo_partitioning(self): """Undo partitioning. """ if len(self.word_parts) > 0: for word_part in self.word_parts: word_part.undo_partitioning() if self.text != ''.join([ tp.get_text() for tp in self.transkription_positions ]): self.transkription_positions += word_part.transkription_positions self.earlier_version = None self.edited_text = None self.word_box = None self.word_parts = [] self.corrections = [] self.earlier_versions = [] self.box_paths = [] def _create_new_word(self, transkription_positions, status, new_id=0): """Create a new word from self and transkription_positions. """ newWord = Word(id=new_id, transkription_positions=transkription_positions) for key in self.COPY_PROPERTY_KEY: if key != status and key in self.__dict__.keys(): newWord.__dict__[key] = self.__dict__[key] if status in self.APPEND_PROPERTY2LIST_SOURCE_TARGET_KEYS.keys(): newWord.__dict__[self.APPEND_PROPERTY2LIST_SOURCE_TARGET_KEYS[status]].append(transkription_positions[0].__dict__[status]) else: newWord.__dict__[status] = transkription_positions[0].__dict__[status] return newWord def _get_parts_with_property_key(self, property_key): """Return a list of word_parts with property == property_key. """ word_parts = [] for word_part in self.word_parts: if property_key in word_part.__dict__.keys(): word_parts.append(word_part) else: word_parts += word_part._get_parts_with_property_key(property_key) return word_parts def _get_partial_word_over_box(self): """Partition a word according to its transkription_positions' has_box ->split word and add partial words as its parts. :return: word over box or self """ word_over_box = None if self.has_mixed_status('has_box'): transkription_positions = [] last_word_box = None for transkription_position in self.transkription_positions: if transkription_position.has_box != last_word_box\ and len(transkription_positions) > 0: newWord = Word(id=len(self.word_parts), line_number=self.line_number,\ transkription_positions=transkription_positions, deleted=self.deleted, writing_process_id=self.writing_process_id) self.word_parts.append(newWord) if last_word_box is not None: word_over_box = newWord word_over_box.word_box = last_word_box transkription_positions = [] transkription_positions.append(transkription_position) last_word_box = transkription_position.has_box if len(transkription_positions) > 0: newWord = Word(id=len(self.word_parts), line_number=self.line_number,\ transkription_positions=transkription_positions, deleted=self.deleted, writing_process_id=self.writing_process_id) self.word_parts.append(newWord) if last_word_box is not None: word_over_box = newWord word_over_box.word_box = last_word_box self.transkription_positions = [] elif len(self.word_parts) > 0: #self.word_parts, word_over_box = execute_function_on_parts(self.word_parts, inspect.currentframe().f_code.co_name) #'get_partial_word_over_box') for word_part in self.word_parts: if word_over_box is None: word_over_box = word_part._get_partial_word_over_box() else: break elif len([ tp for tp in self.transkription_positions if tp.has_box is not None]) == 1: word_over_box = self word_over_box.word_box = [ tp for tp in self.transkription_positions if tp.has_box is not None][0].has_box return word_over_box def _set_box_to_transkription_position(self, box_path, word_path, transkription_position, new_transkription_positions_dictionary, tr_xmin): """Set box_path to transkription_position that is contained by box_path. Create new transkription_positions by splitting old ones if necessaryand add them to new_transkription_positions_dictionary. """ if box_path.contains_path(word_path): transkription_position.has_box = box_path elif box_path.contains_start_of_path(word_path): split_position = box_path.path.bbox()[1] - tr_xmin new_tps = transkription_position.split(split_position) if len(new_tps) == 2: new_tps[0].has_box = box_path new_transkription_positions_dictionary.update({ transkription_position: new_tps }) else: transkription_position.has_box = box_path elif box_path.contains_end_of_path(word_path): split_position = box_path.path.bbox()[0] - tr_xmin new_tps = transkription_position.split(split_position) if len(new_tps) == 2: new_tps[1].has_box = box_path new_transkription_positions_dictionary.update({ transkription_position: new_tps }) else: transkription_position.has_box = box_path else: # box_path in the middle of word_pathz split_position1 = box_path.path.bbox()[0] - tr_xmin split_position2 = box_path.path.bbox()[1] - tr_xmin new_tps = transkription_position.split(split_position1, split_position2) if len(new_tps) >= 2: new_tps[1].has_box = box_path new_transkription_positions_dictionary.update({ transkription_position: new_tps }) else: transkription_position.has_box = box_path Index: svgscripts/process_words_post_merging.py =================================================================== --- svgscripts/process_words_post_merging.py (revision 84) +++ svgscripts/process_words_post_merging.py (revision 85) @@ -1,377 +1,407 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This program can be used to process words after they have been merged with faksimile data. """ # Copyright (C) University of Basel 2019 {{{1 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see 1}}} from colorama import Fore, Style from deprecated import deprecated from functools import cmp_to_key import getopt import inspect import lxml.etree as ET import re import shutil import string from svgpathtools import svg2paths2, svg_to_paths +from svgpathtools.path import Path as SVGPath import sys import tempfile from operator import attrgetter import os from os import listdir, sep, path, setpgrp, devnull from os.path import exists, isfile, isdir, dirname, basename from progress.bar import Bar import warnings if dirname(__file__) not in sys.path: sys.path.append(dirname(__file__)) from datatypes.box import Box from datatypes.manuscript import ArchivalManuscriptUnity from datatypes.mark_foreign_hands import MarkForeignHands from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK from datatypes.path import Path from datatypes.text_connection_mark import TextConnectionMark from datatypes.transkriptionField import TranskriptionField -from datatypes.word import update_transkription_position_ids +from datatypes.word import Word, update_transkription_position_ids from util import back_up from process_files import update_svgposfile_status sys.path.append('shared_util') from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT __author__ = "Christian Steiner" __maintainer__ = __author__ __copyright__ = 'University of Basel' __email__ = "christian.steiner@unibas.ch" __status__ = "Development" __license__ = "GPL v3" __version__ = "0.0.1" UNITTESTING = False +DEBUG_WORD = None def categorize_paths(page, transkription_field=None): """Categorize all paths that are part of the transkription field. :return: a dictionary containig a list for each category of path. """ if page.source is not None and isfile(page.source): MAX_HEIGHT_LINES = 1 max_line = sorted(\ [line_number.bottom-line_number.top for line_number in page.line_numbers if line_number.id % 2 == 0],\ reverse=True)[0] + 2 if len(page.line_numbers) > 0 else 17 tr_xmin = transkription_field.xmin if transkription_field is not None else 0.0 tr_ymin = transkription_field.ymin if transkription_field is not None else 0.0 paths, attributes = svg_to_paths.svg2paths(page.source) allpaths_on_tf = [] allpaths_outside_tf = [] attributes_outside_tf = [] if transkription_field is None: transkription_field = TranskriptionField(page.source) - for index in range(0, len(paths)): - path = paths[index] + for index, path in enumerate(paths): attribute = attributes[index] if len(path) > 0\ and path != transkription_field.path\ - and path.bbox()[0] > tr_xmin\ - and path.bbox()[1] < transkription_field.xmax: + and path.bbox()[0] >= tr_xmin\ + and path.bbox()[1] <= transkription_field.xmax: allpaths_on_tf.append(Path(id=index, path=path, style_class=attribute.get('class'))) elif len(path) > 0\ and path != transkription_field.path: - allpaths_outside_tf.append(path) + allpaths_outside_tf.append(path) #TODO change to: append(Path(id=index, path=path, style_class=... attributes_outside_tf.append(attribute) path_dict = { 'text_area_deletion_paths': [],\ 'deletion_or_underline_paths': [],\ 'box_paths': [],\ 'dots_paths': [],\ 'word_connector_paths': [],\ 'uncategorized_paths': [] } for mypath in allpaths_on_tf: xmin, xmax, ymin, ymax = mypath.path.bbox() start_line_number = page.get_line_number(mypath.path.start.imag-tr_ymin) if abs(xmax-xmin) < 1 and abs(ymax-ymin) < 1: path_dict.get('dots_paths').append(mypath) elif abs(ymax-ymin) > MAX_HEIGHT_LINES and abs(ymax-ymin) < max_line and mypath.path.iscontinuous() and mypath.path.isclosed(): path_dict.get('box_paths').append(mypath) elif abs(ymax-ymin) > MAX_HEIGHT_LINES and abs(ymax-ymin) > max_line and mypath.path.iscontinuous() and not mypath.path.isclosed(): path_dict.get('word_connector_paths').append(mypath) elif abs(ymax-ymin) < MAX_HEIGHT_LINES: + mypath.start_line_number = start_line_number path_dict.get('deletion_or_underline_paths').append(mypath) elif start_line_number != -1 and start_line_number != page.get_line_number(mypath.path.end.imag-tr_ymin): - path_dict.get('text_area_deletion_paths').append(mypath) + # Check for "ladder", i.e. a path with 3 segments (seg0 is horizontal on line x, seg1 moves to line x+1, seg2 is horizontal on line x+1) + if start_line_number + 1 == page.get_line_number(mypath.path.end.imag-tr_ymin)\ + and len(mypath.path._segments) == 3\ + and abs(mypath.path._segments[0].bbox()[3]-mypath.path._segments[0].bbox()[2]) < MAX_HEIGHT_LINES\ + and abs(mypath.path._segments[2].bbox()[3]-mypath.path._segments[2].bbox()[2]) < MAX_HEIGHT_LINES: + for index in 0, 2: + new_path = Path(parent_path=mypath, path=SVGPath(mypath.path._segments[index])) + new_path.start_line_number = page.get_line_number(new_path.path.start.imag-tr_ymin) + path_dict.get('deletion_or_underline_paths').append(new_path) + else: + path_dict.get('text_area_deletion_paths').append(mypath) else: path_dict.get('uncategorized_paths').append(mypath) underline_path = mark_words_intersecting_with_paths_as_deleted(page, path_dict.get('deletion_or_underline_paths'), tr_xmin, tr_ymin) path_dict.update({'underline_path': underline_path}) process_word_boxes(page, path_dict.get('box_paths'), transkription_field,\ paths=allpaths_outside_tf, attributes=attributes_outside_tf, max_line=max_line) return path_dict elif not UNITTESTING: error_msg = 'Svg source file {} does not exist!'.format(page.source)\ if page.source is not None else 'Page does not contain a source file!' raise FileNotFoundError(error_msg) return {} -def do_paths_intersect_saveMode(path1, path2): +def do_paths_intersect_saveMode(mypath1, mypath2): """Returns true if paths intersect, false if not or if there was an exception. """ try: - return path1.intersect(path2, justonemode=True) + return mypath1.path.intersect(mypath2.path, justonemode=True)\ + or mypath1.is_partially_contained_by(mypath2) except AssertionError: return False def find_special_words(page, transkription_field=None): """Find special words, remove them from words, process their content. """ if page.source is None or not isfile(page.source): raise FileNotFoundError('Page does not have a source!') if transkription_field is None: transkription_field = TranskriptionField(page.source) special_char_list = MarkForeignHands.get_special_char_list() special_char_list += TextConnectionMark.get_special_char_list() single_char_words = [ word for word in page.words if len(word.text) == 1 and word.text in special_char_list ] if not UNITTESTING: bar = Bar('find special words', max=len(single_char_words)) for word in single_char_words: not bool(UNITTESTING) and bar.next() if word.text == MarkForeignHands.CLASS_MARK: id = len(page.mark_foreign_hands) page.mark_foreign_hands.append(MarkForeignHands.create_cls_from_word(word, id=id)) page.words.remove(word) elif word.text in TextConnectionMark.SPECIAL_CHAR_LIST[0]\ or (word.text in TextConnectionMark.SPECIAL_CHAR_LIST\ and any(style in page.sonderzeichen_list for style\ in word.transkription_positions[0].positional_word_parts[0].style_class.split(' '))): id = len(page.text_connection_marks) page.text_connection_marks.append(TextConnectionMark.create_cls_from_word(word, id=id)) page.words.remove(word) not bool(UNITTESTING) and bar.finish() svg_tree = ET.parse(page.source) page.update_page_type(transkription_field=transkription_field) page.update_line_number_area(transkription_field, svg_tree=svg_tree) italic_classes = [ key for key in page.style_dict\ if bool(page.style_dict[key].get('font-family')) and page.style_dict[key]['font-family'].endswith('Italic') ] if len(page.mark_foreign_hands) > 0: MarkForeignHands.find_content(page.mark_foreign_hands, transkription_field, svg_tree, italic_classes=italic_classes,\ SonderzeichenList=page.sonderzeichen_list) if len(page.text_connection_marks) > 0: TextConnectionMark.find_content_in_footnotes(page.text_connection_marks, transkription_field, svg_tree,\ title=page.title, page_number=page.number) def mark_words_intersecting_with_paths_as_deleted(page, deletion_paths, tr_xmin=0.0, tr_ymin=0.0): """Marks all words that intersect with deletion paths as deleted and adds these paths to word_deletion_paths. [:return:] list of .path.Path that might be word_underline_paths """ if not UNITTESTING: bar = Bar('mark words that intersect with deletion paths', max=len(page.words)) for word in page.words: not bool(UNITTESTING) and bar.next() - word.deleted = False - for transkription_position in word.transkription_positions: - word_path = Path.create_path_from_transkription_position(transkription_position,\ - tr_xmin=tr_xmin, tr_ymin=tr_ymin) - intersecting_paths = [ deletion_path for deletion_path in deletion_paths\ - if do_paths_intersect_saveMode(deletion_path.path, word_path.path) ] - if len(intersecting_paths) > 0: - transkription_position.deleted = True - for deletion_path in intersecting_paths: - if deletion_path not in page.word_deletion_paths: - deletion_path.tag = Path.WORD_DELETION_PATH_TAG - deletion_path.attach_object_to_tree(page.page_tree) - page.word_deletion_paths.append(deletion_path) + word = mark_word_if_it_intersects_with_paths_as_deleted(word, page, deletion_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin) + for part_word in word.word_parts: + part_word = mark_word_if_it_intersects_with_paths_as_deleted(part_word, page, deletion_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin) word.partition_according_to_deletion() not bool(UNITTESTING) and bar.finish() # return those paths in deletion_paths that are not in page.word_deletion_paths return [ word_underline_path for word_underline_path in set(deletion_paths) - set(page.word_deletion_paths) ] +def mark_word_if_it_intersects_with_paths_as_deleted(word, page, deletion_paths, tr_xmin=0.0, tr_ymin=0.0): + """Marks word if it intersects with deletion paths as deleted + and adds these paths to word_deletion_paths. + + [:return:] word + """ + word.deleted = False + for transkription_position in word.transkription_positions: + word_path = Path.create_path_from_transkription_position(transkription_position,\ + tr_xmin=tr_xmin, tr_ymin=tr_ymin) + intersecting_paths = [ deletion_path for deletion_path in deletion_paths\ + if do_paths_intersect_saveMode(deletion_path, word_path) ] + if DEBUG_WORD is not None and word.text == DEBUG_WORD.text and word.line_number == DEBUG_WORD.line_number: + relevant_paths = [ path for path in deletion_paths if path.start_line_number == DEBUG_WORD.line_number ] + print(word.line_number, word_path.path.bbox(), [ path.path.bbox() for path in relevant_paths]) + if len(intersecting_paths) > 0: + #print(f'{word.id}, {word.text}: {intersecting_paths}') + transkription_position.deleted = True + for deletion_path in intersecting_paths: + if deletion_path.parent_path is not None: + deletion_path = deletion_path.parent_path + if deletion_path not in page.word_deletion_paths: + deletion_path.tag = Path.WORD_DELETION_PATH_TAG + deletion_path.attach_object_to_tree(page.page_tree) + page.word_deletion_paths.append(deletion_path) + return word + def post_merging_processing_and_saving(svg_pos_file=None, new_words=None, page=None, manuscript_file=None, target_svg_pos_file=None): """Process words after merging with faksimile word positions. """ if page is None and svg_pos_file is None: raise Exception('ERROR: post_merging_processing_and_saving needs either a Page or a svg_pos_file!') if page is None: page = Page(svg_pos_file) if page.source is None or not isfile(page.source): raise FileNotFoundError('Page instantiated from {} does not contain an existing source!'.format(svg_pos_file)) if svg_pos_file is None: svg_pos_file = page.page_tree.docinfo.URL if new_words is not None: page.words = sorted(new_words, key=attrgetter('id')) - for word_node in page.page_tree.xpath('.//word'): - word_node.getparent().remove(word_node) + for word_node in page.page_tree.xpath('.//word'): + word_node.getparent().remove(word_node) manuscript = ArchivalManuscriptUnity.create_cls(manuscript_file)\ if manuscript_file is not None\ else None transkription_field = TranskriptionField(page.source) find_special_words(page, transkription_field=transkription_field) #update_writing_process_ids(page) page.update_styles(manuscript=manuscript, partition_according_to_styles=True) #TODO: find_hyphenated_words(page) categorize_paths(page, transkription_field=transkription_field) page.update_and_attach_words2tree() if not UNITTESTING: if target_svg_pos_file is None: target_svg_pos_file = svg_pos_file status = STATUS_MERGED_OK + ":" + STATUS_POSTMERGED_OK update_svgposfile_status(svg_pos_file, manuscript_file=manuscript_file, status=status) write_pretty(xml_element_tree=page.page_tree, file_name=target_svg_pos_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION) def process_word_boxes(page, box_paths, transkription_field, paths=None, attributes=None, max_line=17): """Process word boxes: partition words according to word boxes. """ MAX_HEIGHT_LINES = 1 if not UNITTESTING: bar = Bar('process word boxes', max=len(page.words)) svg_tree = ET.parse(page.source) namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() } allpaths_on_margin_field = [] if paths is None or attributes is None: paths, attributes = svg_to_paths.svg2paths(page.source) for index in range(0, len(paths)): path = paths[index] xmin, xmax, ymin, ymax = path.bbox() attribute = attributes[index] if len(path) > 0\ and path != transkription_field.path\ and ((path.bbox()[1] < transkription_field.xmin and transkription_field.is_page_verso())\ or (path.bbox()[0] > transkription_field.xmax and not transkription_field.is_page_verso()))\ and abs(ymax-ymin) < max_line: allpaths_on_margin_field.append(Path(id=index, path=path, style_class=attribute.get('class'))) box_line_number_dict = {} for box_path in sorted(box_paths, key=lambda path: path.get_median_y()): line_number = page.get_line_number(box_path.get_median_y(tr_ymin=transkription_field.ymin)) if line_number not in box_line_number_dict.keys(): box_line_number_dict.update({ line_number: [ box_path ]}) else: box_line_number_dict.get(line_number).append(box_path) boxes = [] for line_number in box_line_number_dict.keys(): box_paths_on_line = sorted(box_line_number_dict[line_number], key=lambda path: path.get_x()) margin_boxes_on_line = sorted([ margin_box for margin_box in allpaths_on_margin_field\ if page.get_line_number(margin_box.get_median_y(tr_ymin=transkription_field.ymin)) == line_number ],\ key=lambda path: path.get_x()) threshold = 3 if line_number % 2 == 0 else 1.5 for box_path in box_paths_on_line: box = Box.create_box(box_path, margin_boxes_on_line, svg_tree=svg_tree,\ transkription_field=transkription_field, namespaces=namespaces, threshold=threshold) if box is not None: boxes.append(box) if len(boxes) > 0: for word in page.words: not bool(UNITTESTING) and bar.next() word.process_boxes(boxes, tr_xmin=transkription_field.xmin, tr_ymin=transkription_field.ymin) word.create_correction_history(page) not bool(UNITTESTING) and bar.finish() def reset_page(page): """Reset all words that have word_parts in order to run the script a second time. """ word_with_wordparts = [ word for word in page.words if len(word.word_parts) > 0 ] word_with_wordparts += [ word for word in page.words if word.earlier_version is not None ] page_changed = False if len(word_with_wordparts) > 0: for word in word_with_wordparts: word.undo_partitioning() update_transkription_position_ids(word) page_changed = True no_line_numbers = [ word for word in page.words if word.line_number == -1 ] if len(no_line_numbers) > 0: for word in no_line_numbers: if len(word.transkription_positions) > 0: word.line_number = page.get_line_number((word.transkription_positions[0].top+word.transkription_positions[0].bottom)/2) else: msg = f'Word {word.id} {word.text} has no transkription_position!' warnings.warn(msg) page_changed = True if page_changed: page.update_and_attach_words2tree() def update_writing_process_ids(page): """Update the writing_process_ids of the words and split accordingly. """ for word in page.words: word.set_writing_process_id_to_transkription_positions(page) word.partition_according_to_writing_process_id() def usage(): """prints information on how to use the script """ print(main.__doc__) def main(argv): """This program can be used to process words after they have been merged with faksimile data. svgscripts/process_words_post_merging.py [OPTIONS] a xml file about a manuscript, containing information about its pages. a xml file about a page, containing information about svg word positions. OPTIONS: -h|--help show help -i|--include-missing-line-number run script on files that contain words without line numbers -r|--rerun rerun script on a svg_pos_file that has already been processed :return: exit code (int) """ status_not_contain = STATUS_POSTMERGED_OK include_missing_line_number = False try: opts, args = getopt.getopt(argv, "hir", ["help", "include-missing-line-number", "rerun" ]) except getopt.GetoptError: usage() return 2 for opt, arg in opts: if opt in ('-h', '--help'): usage() return 0 elif opt in ('-i', '--include-missing-line-number'): include_missing_line_number = True elif opt in ('-r', '--rerun'): status_not_contain = '' if len(args) < 1: usage() return 2 exit_status = 0 file_a = args[0] if isfile(file_a): manuscript_file = file_a\ if xml_has_type(FILE_TYPE_XML_MANUSCRIPT, xml_source_file=file_a)\ else None counter = 0 for page in Page.get_pages_from_xml_file(file_a, status_contains=STATUS_MERGED_OK, status_not_contain=status_not_contain): reset_page(page) no_line_numbers = [ word for word in page.words if word.line_number == -1 ] if not include_missing_line_number and len(no_line_numbers) > 0: not UNITTESTING and print(Fore.RED + f'Page {page.title}, {page.number} has words with no line number!') for word in no_line_numbers: not UNITTESTING and print(f'Word {word.id}: {word.text}') else: back_up(page, page.xml_file) not UNITTESTING and print(Fore.CYAN + f'Processing {page.title}, {page.number} ...' + Style.RESET_ALL) post_merging_processing_and_saving(page=page, manuscript_file=manuscript_file) counter += 1 not UNITTESTING and print(Style.RESET_ALL + f'[{counter} pages processed]') else: raise FileNotFoundError('File {} does not exist!'.format(file_a)) return exit_status if __name__ == "__main__": sys.exit(main(sys.argv[1:])) Index: svgscripts/convert_wordPositions.py =================================================================== --- svgscripts/convert_wordPositions.py (revision 84) +++ svgscripts/convert_wordPositions.py (revision 85) @@ -1,388 +1,390 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This program can be used to convert the word positions to HTML for testing purposes. """ # Copyright (C) University of Basel 2019 {{{1 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see 1}}} import cairosvg import getopt from lxml.html import builder as E from lxml.html import open_in_browser import lxml from os import sep, listdir, mkdir, path, remove from os.path import exists, isfile, isdir, dirname import re import sys from svgpathtools import svg_to_paths import xml.etree.ElementTree as ET if dirname(__file__) not in sys.path: sys.path.append(dirname(__file__)) from datatypes.matrix import Matrix from datatypes.page import Page from datatypes.page_creator import PageCreator from datatypes.transkriptionField import TranskriptionField from datatypes.writing_process import WritingProcess from datatypes.word import Word __author__ = "Christian Steiner" __maintainer__ = __author__ __copyright__ = 'University of Basel' __email__ = "christian.steiner@unibas.ch" __status__ = "Development" __license__ = "GPL v3" __version__ = "0.0.1" class Converter: """The converter super class. """ def __init__(self, page, non_testing=True, show_word_insertion_mark=False): self.page = page self.non_testing = non_testing self.show_word_insertion_mark = show_word_insertion_mark def _get_transkription_positions(self, transkription_positions, stage_version=''): """Returns the transkription_positions of the indicated stage_version. """ convertable_transkription_positions = transkription_positions if stage_version != '': convertable_transkription_positions = [] if re.match(r'^\d$', stage_version): writing_process_id = int(stage_version) for transkription_position in transkription_positions: if transkription_position.writing_process_id == writing_process_id: convertable_transkription_positions.append(transkription_position) elif re.match(r'^\d\+$', stage_version): version_range = [ *range(int(stage_version.replace('+','')), len(WritingProcess.VERSION_DESCRIPTION)) ] for transkription_position in transkription_positions: if transkription_position.writing_process_id in version_range: convertable_transkription_positions.append(transkription_position) elif re.match(r'^\d\-\d$', stage_version): start_stop = [ int(i) for i in re.split(r'-', stage_version) ] version_range = [ *range(start_stop[0], start_stop[1]+1) ] for transkription_position in transkription_positions: if transkription_position.writing_process_id in version_range: convertable_transkription_positions.append(transkription_position) return convertable_transkription_positions def _get_words(self, words, highlighted_words=None): """Return the words that will be hightlighted. """ return highlighted_words if highlighted_words is not None else words def convert(self, output_file=None, stage_version='', highlighted_words=None): """Prints all words. """ first_word_of_line = None out = sys.stdout if output_file is not None: out = open(output_file, 'w') for word in self.page.words: if first_word_of_line is None or first_word_of_line.line_number != word.line_number: out.write('\n') first_word_of_line = word if word.line_number % 2 == 0: out.write(str(word.line_number).zfill(2) + ' ') else: out.write(' ') if stage_version == '' or len(self._get_transkription_positions(word.transkription_positions, stage_version=stage_version)) > 0: if word.text is not None: out.write(word.text + ' ') out.close() @classmethod def CREATE_CONVERTER(cls, page, non_testing=True,converter_type='', show_word_insertion_mark=False): """Returns a converter of type converter_type. [:return:] SVGConverter for 'SVG', HTMLConverter for 'HTML', Converter for None """ cls_dict = { subclass.__name__: subclass for subclass in cls.__subclasses__() } cls_key = converter_type + 'Converter' if bool(cls_dict.get(cls_key)): return cls_dict.get(cls_key)(page, non_testing, show_word_insertion_mark) else: return Converter(page, non_testing, show_word_insertion_mark) class SVGConverter(Converter): """This class can be used to convert a 'svgWordPositions' xml file to a svg file that combines text as path and text-as-text. """ BG_COLOR = 'yellow' OPACITY = '0.2' def __init__(self, page, non_testing=True, show_word_insertion_mark=False, bg_color=BG_COLOR, opacity=OPACITY): Converter.__init__(self, page, non_testing, show_word_insertion_mark) self.bg_color = bg_color self.opacity = opacity def convert(self, output_file=None, stage_version='', highlighted_words=None): """Converts Page to SVG """ title = self.page.title if(self.page.title is not None) else 'Test Page' title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title svg_file = self.page.svg_file if svg_file is None and self.page.svg_image is not None: svg_file = self.page.svg_image.file_name elif svg_file is None: msg = f'ERROR: xml_source_file {self.page.docinfo.URL} does neither have a svg_file nor a svg_image!' raise Exception(msg) transkription_field = TranskriptionField(svg_file) if bool(transkription_field.get_svg_attributes('xmlns')): ET.register_namespace('', transkription_field.get_svg_attributes('xmlns')) if bool(transkription_field.get_svg_attributes('xmlns:xlink')): ET.register_namespace('xlink', transkription_field.get_svg_attributes('xmlns:xlink')) svg_tree = ET.parse(svg_file) transkription_node = ET.SubElement(svg_tree.getroot(), 'g', attrib={'id': 'Transkription'}) colors = [ 'yellow', 'orange' ] if self.bg_color == self.BG_COLOR else [ self.bg_color ] if highlighted_words is not None: colors = ['yellow'] else: highlighted_words = [] color_index = 0 for word in self.page.words: word_id = 'word_' + str(word.id) for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version): transkription_position_id = word_id + '_' + str(transkription_position.id) color = colors[color_index] if word not in highlighted_words else self.bg_color rect_node = ET.SubElement(transkription_node, 'rect',\ attrib={'id': transkription_position_id, 'x': str(transkription_position.left + transkription_field.xmin),\ 'y': str(transkription_position.top + transkription_field.ymin), 'width': str(transkription_position.width),\ 'height': str(transkription_position.height), 'fill': color, 'opacity': self.opacity}) if transkription_position.transform is not None: matrix = transkription_position.transform.clone_transformation_matrix() matrix.matrix[Matrix.XINDEX] = round(transkription_position.transform.matrix[Matrix.XINDEX] + transkription_field.xmin, 3) matrix.matrix[Matrix.YINDEX] = round(transkription_position.transform.matrix[Matrix.YINDEX] + transkription_field.ymin, 3) rect_node.set('transform', matrix.toString()) rect_node.set('x', str(round(transkription_position.left - transkription_position.transform.matrix[Matrix.XINDEX], 3))) rect_node.set('y', str(round((transkription_position.height-1.5)*-1, 3))) ET.SubElement(rect_node, 'title').text = word.text color_index = (color_index + 1) % len(colors) if output_file is not None: svg_tree.write(output_file) class HTMLConverter(Converter): """This class can be used to convert a 'svgWordPositions' xml file to a test HTML file. """ CSS = """ .highlight0 { background-color: yellow; opacity: 0.2; } .highlight1 { background-color: pink; opacity: 0.2; } .foreign { background-color: blue; opacity: 0.4; } .word-insertion-mark { background-color: orange; opacity: 0.2; } .deleted { background-color: grey; opacity: 0.2; } """ def __init__(self, page, non_testing=True, show_word_insertion_mark=False): Converter.__init__(self, page, non_testing, show_word_insertion_mark) def convert(self, output_file=None, stage_version='', highlighted_words=None): """Converts Page to HTML """ title = self.page.title if(self.page.title is not None) else 'Test Page' title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title if stage_version != '': title = title + ', Schreibstufe: ' + stage_version if self.page.svg_image is not None: width = self.page.svg_image.width height = self.page.svg_image.height svg_file = self.page.svg_image.file_name elif self.page.svg_file is not None: svg_file = self.page.svg_file transkription_field = TranskriptionField(svg_file) width = transkription_field.getWidth() height = transkription_field.getHeight() style_content = ' position: relative; width: {}px; height: {}px; background-image: url("{}"); background-size: {}px {}px '\ .format(width, height, path.abspath(svg_file), width, height) style = E.STYLE('#transkription {' + style_content + '}', HTMLConverter.CSS) head = E.HEAD(E.TITLE(title),E.META(charset='UTF-8'), style) transkription = E.DIV(id="transkription") counter = 0 for word in self.page.words: highlight_class = 'highlight' + str(counter)\ if not word.deleted else 'deleted' earlier_text = '' if word.earlier_version is None else word.earlier_version.text if earlier_text == '' and len(word.word_parts) > 0: earlier_versions = [ word for word in word.word_parts if word.earlier_version is not None ] earlier_text = earlier_versions[0].text if len(earlier_versions) > 0 else '' if earlier_text != '': word_title = 'id: {}/line: {}\n0: {}\n1: {}'.format(str(word.id), str(word.line_number), earlier_text, word.text) + if word.edited_text is not None: + word_title += f'\n{word.edited_text}' else: word_title = 'id: {}/line: {}\n{}'.format(str(word.id), str(word.line_number), word.text) for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version): self._append2transkription(transkription, highlight_class, word_title, transkription_position) for part_word in word.word_parts: highlight_class = 'highlight' + str(counter)\ if not part_word.deleted else 'deleted' for part_transkription_position in self._get_transkription_positions(part_word.transkription_positions, stage_version=stage_version): self._append2transkription(transkription, highlight_class, word_title, part_transkription_position) counter = (counter + 1) % 2 word_insertion_mark_class = 'word-insertion-mark' counter = 0 for mark_foreign_hands in self.page.mark_foreign_hands: highlight_class = 'foreign' title = 'id: {}/line: {}\n{} {}'.format(str(mark_foreign_hands.id), str(word.line_number),\ mark_foreign_hands.foreign_hands_text, mark_foreign_hands.pen) for transkription_position in mark_foreign_hands.transkription_positions: self._append2transkription(transkription, highlight_class, title, transkription_position) if self.show_word_insertion_mark: for word_insertion_mark in self.page.word_insertion_marks: wim_title = 'id: {}/line: {}\nword insertion mark'.format(str(word_insertion_mark.id), str(word_insertion_mark.line_number)) style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\ word_insertion_mark.top, word_insertion_mark.left, word_insertion_mark.width, word_insertion_mark.height) link = E.A(' ', E.CLASS(word_insertion_mark_class), title=wim_title, style=style_content) transkription.append(link) html = E.HTML(head,E.BODY(transkription)) bool(self.non_testing) and open_in_browser(html) if output_file is not None: with open(output_file, 'wb') as f: f.write(lxml.html.tostring(html, pretty_print=True, include_meta_content_type=True, encoding='utf-8')) f.closed def _append2transkription(self, transkription, highlight_class, title, transkription_position): """Append content to transkription-div. """ style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\ transkription_position.top, transkription_position.left, transkription_position.width, transkription_position.height) if transkription_position.transform is not None: style_content = style_content + ' transform: {}; '.format(transkription_position.transform.toCSSTransformString()) transform_origin_x = (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1\ if (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1 < 0 else 0 style_content = style_content + ' transform-origin: {}px {}px; '.format(transform_origin_x, transkription_position.height) link = E.A(' ', E.CLASS(highlight_class), title=title, style=style_content) transkription.append(link) def create_pdf_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, pdf_file_name='output.pdf', bg_color=SVGConverter.BG_COLOR): """Creates a pdf file highlighting some words. """ if not pdf_file_name.endswith('pdf'): pdf_file_name = pdf_file_name + '.pdf' tmp_svg_file = pdf_file_name.replace('.pdf', '.svg') create_svg_with_highlighted_words(xml_source_file=xml_source_file, page=page, highlighted_words=highlighted_words,\ svg_file_name=tmp_svg_file, bg_color=bg_color) if isfile(tmp_svg_file): cairosvg.svg2pdf(url=tmp_svg_file, write_to=pdf_file_name) remove(tmp_svg_file) def create_svg_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, svg_file_name='output.svg', bg_color=SVGConverter.BG_COLOR): """Creates a svg file highlighting some words. """ if page is None and xml_source_file is not None: page = Page(xml_source_file) converter = SVGConverter(page, bg_color=bg_color) if not svg_file_name.endswith('svg'): svg_file_name = svg_file_name + '.svg' converter.convert(output_file=svg_file_name, highlighted_words=highlighted_words) def usage(): """prints information on how to use the script """ print(main.__doc__) def main(argv): """This program can be used to convert the word positions to HTML, SVG or TEXT for testing purposes. svgscripts/convert_wordPositions.py OPTIONS OPTIONS: -h|--help: show help -H|--HTML [default] convert to HTML test file -o|--output=outputFile save output to file outputFile -P|--PDF convert to PDF test file -S|--SVG convert to SVG test file -s|--svg=svgFile: svg web file -T|--TEXT convert to TEXT output -t|--testing execute in test mode, do not write to file or open browser -w|--word-insertion-mark show word insertion mark on HTML -v|--version=VERSION show words that belong to writing process VERSION: { 0, 1, 2, 0-1, 0+, etc. } :return: exit code (int) """ convert_to_type = None svg_file = None output_file = None non_testing = True show_word_insertion_mark = False page = None stage_version = '' try: opts, args = getopt.getopt(argv, "htHPSTws:o:v:", ["help", "testing", "HTML", "PDF", "SVG", "TEXT", "word-insertion-mark", "svg=", "output=", "version="]) except getopt.GetoptError: usage() return 2 for opt, arg in opts: if opt in ('-h', '--help') or not args: usage() return 0 elif opt in ('-v', '--version'): if re.match(r'^(\d|\d\+|\d\-\d)$', arg): stage_version = arg else: raise ValueError('OPTION -v|--version=VERSION does not work with "{}" as value for VERSION!'.format(arg)) elif opt in ('-w', '--word-insertion-mark'): show_word_insertion_mark = True elif opt in ('-P', '--PDF'): convert_to_type = 'PDF' elif opt in ('-S', '--SVG'): convert_to_type = 'SVG' elif opt in ('-T', '--TEXT'): convert_to_type = 'TEXT' elif opt in ('-H', '--HTML'): convert_to_type = 'HTML' elif opt in ('-t', '--testing'): non_testing = False elif opt in ('-s', '--svg'): svg_file = arg elif opt in ('-o', '--output'): output_file = arg if len(args) < 1: usage() return 2 if convert_to_type is None: if output_file is not None and len(re.split(r'\.', output_file)) > 1: output_file_part_list = re.split(r'\.', output_file) convert_to_type = output_file_part_list[len(output_file_part_list)-1].upper() else: convert_to_type = 'HTML' for word_position_file in args: if not isfile(word_position_file): print("'{}' does not exist!".format(word_position_file)) return 2 if convert_to_type == 'PDF': if output_file is None: output_file = 'output.pdf' create_pdf_with_highlighted_words(word_position_file, pdf_file_name=output_file) else: if svg_file is not None: if isfile(svg_file): page = PageCreator(word_position_file, svg_file=svg_file) else: print("'{}' does not exist!".format(word_position_file)) return 2 else: page = Page(word_position_file) if page.svg_file is None: print('Please specify a svg file!') usage() return 2 converter = Converter.CREATE_CONVERTER(page, non_testing=non_testing, converter_type=convert_to_type, show_word_insertion_mark=show_word_insertion_mark) converter.convert(output_file=output_file, stage_version=stage_version) return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:])) Index: tests_svgscripts/test_word.py =================================================================== --- tests_svgscripts/test_word.py (revision 84) +++ tests_svgscripts/test_word.py (revision 85) @@ -1,434 +1,455 @@ import unittest from os import sep, path import lxml.etree as ET import sys sys.path.append('svgscripts') from process_words_post_merging import reset_page, update_writing_process_ids from datatypes.box import Box from datatypes.manuscript import ArchivalManuscriptUnity from datatypes.matrix import Matrix import datatypes.page from datatypes.path import Path from datatypes.positional_word_part import PositionalWordPart from datatypes.style import Style from datatypes.transkriptionField import TranskriptionField from datatypes.transkription_position import TranskriptionPosition from datatypes.word import Word, execute_function_on_parts, update_transkription_position_ids from datatypes.word_position import WordPosition sys.path.append('py2ttl') from class_spec import SemanticClass class Page: def __init__(self): self.svg_file = None def get_line_number(self, input=0): return -1 def get_biggest_fontSize4styles(self, style_set={}): return 7 class TestWord(unittest.TestCase): TESTCASE = None def setUp(self): DATADIR = path.dirname(__file__) + sep + 'test_data' self.test_file = DATADIR + sep + 'N_VII_1_page009.xml' self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml' self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg' self.word_part_objs = [{'text': 'a' }, {'text': 'b' }, {'text': 'c' }] x = 0 for dict in self.word_part_objs: dict['class'] = 'st22' dict['x'] = x dict['y'] = 11 x += 1 mylist = {'text': 'abc', 'id': '0', 'line-number': '2', 'deleted': 'true' } word_position = TranskriptionPosition(x=0, y=1, height=10, width=10, matrix=Matrix('matrix(0.94 0.342 -0.342 0.94 0 0)')) self.transkription_positions = [ word_position ] self.word_node = ET.Element('word', attrib=mylist) word_position.attach_object_to_tree(self.word_node) x = 0 for char in mylist['text']: ET.SubElement(self.word_node, 'part', attrib={'text': char, 'x': str(x), 'y': '11', 'class': 'st22' }) x += 1 def test_Word_with_word_part_objs(self): word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10) self.assertEqual(word.id, 0) self.assertEqual(word.transkription_positions[0].bottom, 13) self.assertEqual(word.transkription_positions[0].height, 10) self.assertEqual(word.transkription_positions[0].top, 3) self.assertEqual(word.transkription_positions[0].left, 0) self.assertEqual(word.transkription_positions[0].width, 10) self.assertEqual(word.text, 'abc') def test_Word_with_word_node(self): word = Word.create_cls(self.word_node) self.assertEqual(word.id, 0) self.assertEqual(word.deleted, True) self.assertEqual(word.transkription_positions[0].bottom, 11) self.assertEqual(word.transkription_positions[0].height, 10) self.assertEqual(word.transkription_positions[0].top, 1) self.assertEqual(word.transkription_positions[0].left, 0) self.assertEqual(word.transkription_positions[0].width, 10) self.assertEqual(word.text, 'abc') self.assertEqual(word.line_number, 2) self.assertEqual(word.transkription_positions[0].transform.isRotationMatrix(), True) def test_attach_word_to_tree(self): newWord = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10) empty_tree = ET.ElementTree(ET.Element('page')) newWord.attach_word_to_tree(empty_tree) for word_node in empty_tree.getroot().xpath('//word'): word = Word.CREATE_WORD(word_node=word_node) self.assertEqual(word.id, 0) self.assertEqual(word.deleted, False) self.assertEqual(word.transkription_positions[0].bottom, 13) self.assertEqual(word.transkription_positions[0].height, 10) self.assertEqual(word.transkription_positions[0].top, 3) self.assertEqual(word.transkription_positions[0].left, 0) self.assertEqual(word.transkription_positions[0].width, 10) self.assertEqual(word.text, 'abc') @unittest.skipUnless(TESTCASE is None or TESTCASE == 0, 'Not testing this case') def test_create_correction_history_case0(self): # Case 1: whole word over box box = Box(earlier_text='XYX') word = Word(text='ASDF', transkription_positions=[TranskriptionPosition()]) word.word_box = box word.create_correction_history() self.assertEqual(word.earlier_version is None, True) self.assertEqual(word.overwrites_word is not None, True) @unittest.skipUnless(TESTCASE is None or TESTCASE == 1, 'Not testing this case') def test_create_correction_history_case1(self): # Case 2: part of word over box box = Box(earlier_text='XYX') partA = Word(text='A', transkription_positions=[TranskriptionPosition()]) partA.word_box = box partB = Word(text='SDF', transkription_positions=[TranskriptionPosition()]) word = Word(text='ASDF', word_parts=[ partA, partB]) word.create_correction_history() self.assertEqual(word.earlier_version is None, True) self.assertEqual(word.word_parts[0].overwrites_word is not None, True) @unittest.skipUnless(TESTCASE is None or TESTCASE == 2, 'Not testing this case') def test_create_correction_history_case3(self): # Case 3: part of word over box, word under box is part of earlier version box = Box(earlier_text='XYX') tp0 = TranskriptionPosition() tp0.style = Style(writing_process_id=0) tp1 = TranskriptionPosition() tp1.style = Style(writing_process_id=1) partA = Word(id=0, text='Test', transkription_positions=[ tp0]) partB = Word(id=1, text='er', transkription_positions=[ tp1]) partB.word_box = box word = Word(text='Tester', writing_process_id=1, word_parts=[ partA, partB ] ) word.create_correction_history(box_style=tp0.style) self.assertEqual(word.text, 'Tester') self.assertEqual(word.earlier_version is not None, True) self.assertEqual(word.earlier_version.text, 'TestXYX') self.assertEqual(word.word_parts[1].isTransformationOfWord, word.earlier_version.word_parts[1]) @unittest.skipUnless(TESTCASE is None or TESTCASE == 3, 'Not testing this case') def test_create_correction_history_case4(self): # Case 4: part of word is deleted partA = Word(id=0, text='A', deleted=True, transkription_positions=[TranskriptionPosition()]) partB = Word(id=1, text='SDF', transkription_positions=[TranskriptionPosition()]) word = Word(text='ASDF', word_parts=[ partA, partB]) word.create_correction_history() self.assertEqual(word.earlier_version is not None, True) self.assertEqual(word.word_parts[0].isDeletionOfWord is not None, True) self.assertEqual(word.word_parts[0].isDeletionOfWord, word.earlier_version.word_parts[0]) self.assertEqual(word.edited_text, 'SDF') @unittest.skipUnless(TESTCASE is None or TESTCASE == 4, 'Not testing this case') def test_create_correction_history_case5(self): tp0 = TranskriptionPosition() tp0.style = Style(writing_process_id=0) tp1 = TranskriptionPosition() tp1.style = Style(writing_process_id=1) partA = Word(id=0, text='Test', transkription_positions=[ tp0]) partB = Word(id=1, text='er', transkription_positions=[ tp1]) word = Word(text='Tester', word_parts=[ partA, partB ] ) word.create_correction_history() self.assertEqual(word.earlier_version is not None, True) self.assertEqual(word.word_parts[1].extendsEarlierVersion, True) self.assertEqual(word.word_parts[1].isExtensionOfWord, word.earlier_version) #@unittest.skipUnless(TESTCASE is None or TESTCASE == 5, 'Not testing this case') #@unittest.skip('case tested, relies on a local xml file') def test_create_correction_history_case_full(self): page = datatypes.page.Page('xml/N_VII_1_page138.xml') manuscript = ArchivalManuscriptUnity() reset_page(page) update_writing_process_ids(page) - word = page.words[77] + word = page.words[18] + wordAufBau = page.words[77] + #page.words = [ word ] + page.update_styles(manuscript=manuscript, partition_according_to_styles=True) + word.word_parts[0].transkription_positions[0].has_box = Box(earlier_text='v') + self.assertEqual(len(word.word_parts), 2) + word_over_box = word._get_partial_word_over_box() + update_transkription_position_ids(word) + word.create_correction_history(page) + self.assertEqual(word.writing_process_id, 1) + self.assertEqual(word.earlier_version is not None, True) + self.assertEqual(word.earlier_version.text, 'verschiedenes') + #print(word.earlier_version.id, [ (w.id, w.text) for w in word.earlier_version.word_parts ]) + empty_tree = ET.ElementTree(ET.Element('page')) + word_node = word.attach_word_to_tree(empty_tree) + #print(ET.dump(word_node)) + """ + self.assertEqual(word.word_parts[0].isDeletionOfWord, word.earlier_version.word_parts[0]) + self.assertEqual(word.word_parts[1].isTransformationOfWord, word.earlier_version.word_parts[1]) + self.assertEqual(word.word_parts[1].overwrites_word is not None, True) + """ + word = wordAufBau page.words = [ word ] page.update_styles(manuscript=manuscript, partition_according_to_styles=True) word.word_parts[0].deleted = True word.word_parts[1].transkription_positions[0].has_box = Box(earlier_text='b') self.assertEqual(len(word.word_parts), 3) word_over_box = word._get_partial_word_over_box() self.assertEqual(len(word.word_parts), 3) update_transkription_position_ids(word) word.create_correction_history(page) self.assertEqual(word.writing_process_id, 2) self.assertEqual(word.earlier_version is not None, True) self.assertEqual(word.text, 'AufBau') self.assertEqual(word.edited_text, 'Bau') self.assertEqual(word.earlier_version.text, 'Aufbau') self.assertEqual(word.word_parts[0].isDeletionOfWord, word.earlier_version.word_parts[0]) self.assertEqual(word.word_parts[1].isTransformationOfWord, word.earlier_version.word_parts[1]) self.assertEqual(word.word_parts[1].overwrites_word is not None, True) empty_tree = ET.ElementTree(ET.Element('page')) word_node = word.attach_word_to_tree(empty_tree) #print(ET.dump(word_node)) newWord = Word.create_cls(word_node) #@unittest.skip('') def test_earlier_version(self): partA = Word(id=0, text='A', deleted=True, transkription_positions=[TranskriptionPosition()]) partB = Word(id=1, text='SDF', transkription_positions=[TranskriptionPosition()]) word = Word(text='ASDF', word_parts=[ partA, partB]) earlier_version = word.create_earlier_version() self.assertEqual(earlier_version is not None, True) self.assertEqual(word.word_parts[0].isDeletionOfWord is not None, True) self.assertEqual(word.word_parts[0].isDeletionOfWord, earlier_version.word_parts[0]) def test_undo_partitioning(self): tps = [] for i, xy in enumerate([ 3, 4, 5 ]): tps.append(TranskriptionPosition(id=i, x=xy, y=xy, height=10, width=10)) partA = Word(id=0, text='Auf', writing_process_id=1, deleted=True, transkription_positions=[ tps[0]]) partB = Word(id=1, text='B', writing_process_id=2, transkription_positions=[tps[1]]) partC = Word(id=2, text='au', writing_process_id=1,transkription_positions=[tps[2]]) word = Word(text='Aufbau', writing_process_id=2, word_parts=[ partA, partB, partC ] ) word.undo_partitioning() self.assertEqual(len(word.transkription_positions), len(tps)) self.assertEqual(len(word.word_parts), 0) """ page = datatypes.page.Page('xml/N_VII_1_page138.xml') word = page.words[77] word.undo_partitioning() self.assertEqual(len(word.word_parts), 0) self.assertEqual(len(word.transkription_positions), 3) update_transkription_position_ids(word) empty_tree = ET.ElementTree(ET.Element('page')) word_node = word.attach_word_to_tree(empty_tree) print(ET.dump(word_node)) """ def test_split(self): page = Page() pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, self.word_part_objs) transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps) word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions) previousWord, currentWord, nextWord = word.split('b') self.assertEqual(previousWord.id, 0) self.assertEqual(previousWord.text, 'a') self.assertEqual(currentWord.id, 1) self.assertEqual(nextWord.id, 2) word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions) previousWord, currentWord, nextWord = word.split('bc') self.assertEqual(previousWord.id, 0) self.assertEqual(previousWord.text, 'a') self.assertEqual(currentWord.id, 1) word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions) previousWord, currentWord, nextWord = word.split('ab', start_id=10) self.assertEqual(currentWord.id, 10) self.assertEqual(currentWord.text, 'ab') self.assertEqual(currentWord.transkription_positions[0].width, 2.1) self.assertEqual(nextWord.id, 11) self.assertEqual(nextWord.transkription_positions[0].width, 5.2) word_part_objs=[{'text': 'x', 'class':'st22', 'x': 0, 'y': 0},\ {'text': 'Insofern', 'class':'st22', 'x': 1, 'y': 0},\ {'text': 'x', 'class':'st22', 'x': 10, 'y': 0}] pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs) transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps) word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions) with self.assertWarns(Warning): previousWord, currentWord, nextWord = word.split('Insofer') word_part_objs=[{'text': 'xInsofern', 'class':'st22', 'x': 0, 'y': 0}] pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs) transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps) word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions) with self.assertWarns(Warning): previousWord, currentWord, nextWord = word.split('Insofern') def test_join(self): word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10) other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}]) word.join(other_word) self.assertEqual(word.text, 'abc.') other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}]) word.join(other_word, append_at_end_of_new_word=False) self.assertEqual(word.text, '.abc.') """ tree = ET.ElementTree(ET.Element('page')) word.attach_word_to_tree(tree) print(ET.dump(tree.getroot())) """ def test_get_semanticAndDataDict(self): dictionary = Word.get_semantic_dictionary() info_dict = dictionary['properties'].get('isDeletionOfWord') self.assertEqual(SemanticClass.SUPER_PROPERTY in info_dict.keys(), True) super_info_dict = info_dict[SemanticClass.SUPER_PROPERTY] #print(info_dict[SemanticClass.SUPER_PROPERTY].get(SemanticClass.PROPERTY_NAME)) def test_simplify_transkription_positions(self): node_string = """ """ nodeA = ET.fromstring(node_string) node_string = """ """ nodeB = ET.fromstring(node_string) word = Word(text="Si", transkription_positions=[ TranskriptionPosition(node=nodeA), TranskriptionPosition(node=nodeB) ]) self.assertEqual(len(word.transkription_positions), 2) word.simplify_transkription_positions() self.assertEqual(len(word.transkription_positions), 1) word = Word(text="Si", transkription_positions=[ TranskriptionPosition(node=nodeA), TranskriptionPosition(node=nodeB) ]) word.transkription_positions[1].writing_process_id = -1 word.simplify_transkription_positions() self.assertEqual(len(word.transkription_positions), 1) self.assertEqual(word.transkription_positions[0].writing_process_id, 0) """ tree = ET.ElementTree(ET.Element('page')) word.attach_word_to_tree(tree) print(ET.dump(tree.getroot())) """ def test_partition(self): page = datatypes.page.Page(self.test_file) word = page.words[67] self.assertEqual(word.belongs_to_multiple_writing_processes(), True) word.partition_according_to_writing_process_id() self.assertEqual(len(word.word_parts), 3) self.assertEqual(word.belongs_to_multiple_writing_processes(), False) self.assertEqual(word.belongs_to_multiple_writing_processes(include_parts=True), True) empty_tree = ET.ElementTree(ET.Element('page')) word_node = word.attach_word_to_tree(empty_tree) newWord = Word.create_cls(word_node) self.assertEqual(len(newWord.word_parts), 3) #print(ET.dump(empty_tree.getroot())) def test_partition_deletion(self): page = datatypes.page.Page(self.test_file) word = page.words[67] for transkription_position in word.transkription_positions: transkription_position.deleted = transkription_position.writing_process_id == 1 self.assertEqual(word.has_mixed_status('deleted'), True) word.partition_according_to_deletion() self.assertEqual(len(word.word_parts), 3) self.assertEqual(word.has_mixed_status('deleted'), False) self.assertEqual(word.has_mixed_status('deleted', include_parts=True), True) page = datatypes.page.Page(self.test_file) word = page.words[67] word.partition_according_to_writing_process_id() #print([(word.text, word.deleted) for word in word.word_parts]) word.word_parts[1].transkription_positions[1].deleted = True word.partition_according_to_deletion() self.assertEqual(len(word.word_parts), 4) #print([(word.text, word.deleted) for word in word.word_parts]) partA = Word(text='A', deleted=True) partB = Word(text='SDF', deleted=False) word = Word(text='ASDF', word_parts=[ partA, partB]) self.assertEqual(word.has_mixed_status('deleted', include_parts=True), True) def test_execute_function_on_parts(self): page = datatypes.page.Page(self.test_file) word_parts = [ page.words[67], page.words[68] ] word_parts, none = execute_function_on_parts(word_parts, 'partition_according_to_writing_process_id') self.assertEqual(len(word_parts) == 4, True) def test_process_word_boxes(self): page = datatypes.page.Page(self.pdf_xml) page.source = self.pdf_xml_source page.update_styles(partition_according_to_styles=True) tr = TranskriptionField(page.source) box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\ 'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\ 'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\ 'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\ 'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315'] box_paths = [ Box(d_string=d_string, earlier_text='test') for d_string in box_path_d ] indices = [30, 277, 288, 297, 321] for word_id, index in enumerate(indices): word_over_box = page.words[index].process_boxes(box_paths, tr_xmin=tr.xmin, tr_ymin=tr.ymin) self.assertEqual(word_over_box is not None, True) self.assertEqual(word_over_box == page.words[index] or word_over_box in page.words[index].word_parts, True) #self.assertEqual(word_over_box in page.words[index].word_parts, True) def test_process_word_several_boxesOn1LIne(self): page = datatypes.page.Page(self.pdf_xml) page.source = self.pdf_xml_source for word in page.words: word.set_writing_process_id_to_transkription_positions(page) word.partition_according_to_writing_process_id() tr = TranskriptionField(page.source) box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\ 'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\ 'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\ 'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\ 'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315'] box_paths = [ Box(d_string=d_string, earlier_text='test') for d_string in box_path_d ] indices = [30, 277, 288, 297, 321] empty_tree = ET.ElementTree(ET.Element('page')) for word_id, index in enumerate(indices): word_over_box = page.words[index].process_boxes(box_paths, tr_xmin=tr.xmin, tr_ymin=tr.ymin) self.assertEqual(word_over_box is not None, True) def test_split_according_to_status(self): page = datatypes.page.Page(self.test_file) word = page.words[67] for transkription_position in word.transkription_positions: transkription_position.text = 'asdf'\ if transkription_position.writing_process_id == 1\ else word.text self.assertEqual(word.has_mixed_status('text'), True) new_words = word.split_according_to_status('text') #print([word.text for word in new_words ]) self.assertEqual(len(new_words) > 1, True) self.assertEqual(new_words[0].id, word.id) self.assertEqual(new_words[0].deleted, word.deleted) self.assertEqual(new_words[1].id, word.id+1) manuscript = ArchivalManuscriptUnity() page = datatypes.page.Page(self.test_file) word = page.words[67] page.words = [ word ] page.update_styles(manuscript=manuscript) new_words = word.split_according_to_status('style', splits_are_parts=True) self.assertEqual(len(word.word_parts), 3) def test__create_new_word(self): manuscript = ArchivalManuscriptUnity() page = datatypes.page.Page(self.test_file) word = page.words[67] page.words = [ word ] page.update_styles(manuscript=manuscript) newWord = word._create_new_word([ word.transkription_positions[0] ], 'style') for key in Word.COPY_PROPERTY_KEY: self.assertEqual(newWord.__dict__[key], word.__dict__[key]) self.assertEqual(len(newWord.styles), 1) def test__get_partial_word_over_box(self): word = Word(text='test', transkription_positions=[ TranskriptionPosition(id=0), TranskriptionPosition(id=1) ]) word.transkription_positions[0].has_box = Box(earlier_text='asdf') word._get_partial_word_over_box() self.assertEqual(len(word.word_parts), 2) partA = Word(id=0, text='A', transkription_positions=[TranskriptionPosition()]) partB = Word(id=1, text='SDF', transkription_positions=[TranskriptionPosition(), TranskriptionPosition(id=1)]) partB.transkription_positions[0].has_box = Box(earlier_text='asdf') word = Word(text='ASDF', word_parts=[ partA, partB]) word._get_partial_word_over_box() self.assertEqual(len(word.word_parts), 2) if __name__ == "__main__": unittest.main() Index: tests_svgscripts/test_data/N_VII_1_page006.xml =================================================================== --- tests_svgscripts/test_data/N_VII_1_page006.xml (revision 84) +++ tests_svgscripts/test_data/N_VII_1_page006.xml (revision 85) @@ -1,1276 +1,1276 @@ svgWordPosition 2019-08-02 15:17:37 2019-08-02 15:17:37 2019-08-02 15:30:59 2019-08-02 15:30:59 - 2020-01-15 15:22:27 + 2020-01-22 16:09:30 Index: tests_svgscripts/test_process_words_post_merging.py =================================================================== --- tests_svgscripts/test_process_words_post_merging.py (revision 84) +++ tests_svgscripts/test_process_words_post_merging.py (revision 85) @@ -1,153 +1,168 @@ import unittest from os import sep, path, remove from os.path import isdir, isfile, dirname import shutil import sys import lxml.etree as ET import warnings import sys sys.path.append('svgscripts') import process_words_post_merging from datatypes.faksimile import FaksimilePage from datatypes.mark_foreign_hands import MarkForeignHands from datatypes.page import Page from datatypes.path import Path from datatypes.positional_word_part import PositionalWordPart from datatypes.text_connection_mark import TextConnectionMark from datatypes.transkriptionField import TranskriptionField +from datatypes.word import Word from datatypes.word_position import WordPosition class TestPostMerge(unittest.TestCase): def setUp(self): process_words_post_merging.UNITTESTING = True DATADIR = path.dirname(__file__) + sep + 'test_data' self.faksimile_dir = DATADIR + sep + 'faksimile_svg' self.manuscript = DATADIR + sep + 'N_VII_1.xml' self.manuscript_copy = self.manuscript.replace('.', '_copy.') self.faksimile_file = self.faksimile_dir + sep + 'N-VII-1,5et6.svg' self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml' self.Mp_XIV_1_mytest_421 = DATADIR + sep + 'Mp_XIV_1_mytest_421.xml' self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml' self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml' self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg' @unittest.skip('takes long') def test_main(self): process_words_post_merging.main([self.manuscript]) def test_categorize_paths(self): page = Page(self.pdf_xml) page.source = self.pdf_xml_source tr = TranskriptionField(page.source) page.words = [ word for word in page.words if word.line_number == 33 ] path_dict = process_words_post_merging.categorize_paths(page, tr) self.assertEqual(True in [ word.deleted for word in page.words if word.id == 269 ], False) self.assertEqual(len(path_dict.get('deletion_or_underline_paths')) > 0, True) self.assertEqual(len(path_dict.get('box_paths')), 5) """ words = [ word for word in page.words if len(word.box_paths) > 0 ] self.assertEqual(len(words), 1) self.assertEqual(words[0].word_parts[0].earlier_version is not None, True) self.assertEqual(words[0].word_parts[0].earlier_version.text, ')') """ + #process_words_post_merging.DEBUG_WORD = Word(text="Verschiedenes", line_number=11) + if process_words_post_merging.DEBUG_WORD is not None: + page = Page('xml/N_VII_1_page138.xml') + process_words_post_merging.reset_page(page) + tr = TranskriptionField(page.source) + #page.words = [ word for word in page.words if word.text == 'Werth-' ] + print('starting ...') + process_words_post_merging.find_special_words(page, transkription_field=tr) + page.update_styles(partition_according_to_styles=True) + path_dict = process_words_post_merging.categorize_paths(page, tr) + word = [ word for word in page.words if word.text == process_words_post_merging.DEBUG_WORD.text ][0] + #print(word) + #self.assertEqual(word.deleted, True) + #self.assertEqual(True in [ word.deleted for word in page.words[0].word_parts], True) + #self.assertEqual(page.words[0].word_parts[0].deleted, True) def test_find_special_words(self): page = Page(self.xml_file) process_words_post_merging.find_special_words(page) self.assertEqual(len(page.mark_foreign_hands), 1) self.assertEqual(page.mark_foreign_hands[0].foreign_hands_text, 'x') page.update_and_attach_words2tree() nodes = page.page_tree.xpath('//' + MarkForeignHands.XML_TAG) page = Page(self.test_tcm_xml) process_words_post_merging.find_special_words(page) self.assertEqual(len(page.text_connection_marks), 1) self.assertEqual(page.text_connection_marks[0].text_source.first_line, 2) """ page.update_and_attach_words2tree() nodes = page.page_tree.xpath('//' + TextConnectionMark.XML_TAG) print(ET.dump(nodes[0])) """ def test_process_word_boxes(self): page = Page(self.pdf_xml) # W_I_8_page125.xml page.source = self.pdf_xml_source #page.words = [ page.words[30]] page.update_styles(partition_according_to_styles=True) tr = TranskriptionField(page.source) box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\ 'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\ 'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\ 'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\ 'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315'] box_paths = [ Path(d_string=d_string) for d_string in box_path_d ] process_words_post_merging.process_word_boxes(page, box_paths, tr) words_with_boxes = [ word for word in page.words if word.word_box is not None\ or len([ part for part in word.word_parts if part.word_box is not None]) > 0] expected_values = {'Aber': {'text': 'aber'}, 'seiner': {'text': ')'},\ 'mit': { 'text': ','}, '(–': {'text': ':'}, 'Um': {'text': 'Denn'}} self.assertEqual(len(words_with_boxes), len(expected_values.keys())) references = [ words_with_boxes[0].earlier_version,\ words_with_boxes[1].word_parts[0].overwrites_word,\ words_with_boxes[2].word_parts[0].overwrites_word,\ words_with_boxes[3].word_parts[0].overwrites_word,\ words_with_boxes[4].overwrites_word ] for index, key in enumerate(expected_values.keys()): expected_values[key].update({'reference': references[index]}) for word in words_with_boxes: self.assertEqual(expected_values[word.text].get('reference') is not None, True) @unittest.skip('relies on local file') def test_process_word_boxes_multiple_boxes_perLIne(self): page = Page('xml/N_VII_1_page034.xml') page.update_styles(partition_according_to_styles=True) page.words[205].word_parts[0].deleted = True page.words[205].word_parts[3].deleted = True tr = TranskriptionField(page.source) box_path_d = ['M 69.497,460.726 L 81.959,460.726 L 81.959,467.404 L 69.497,467.404 L 69.497,460.726', 'M 65.997,461.974 L 68.084,461.974 L 68.084,467.277 L 65.997,467.277 L 65.997,461.974', 'M 191.939,423.806 L 197.602,423.806 L 197.602,431.817 L 191.939,431.817 L 191.939,423.806', 'M 47.048,245.659 L 63.779,245.659 L 63.779,252.795 L 47.048,252.795 L 47.048,245.659', 'M 180.995,89.054 L 188.23000000000002,89.054 L 188.23000000000002,95.515 L 180.995,95.515 L 180.995,89.054', 'M 142.367,90.315 L 149.72799999999998,90.315 L 149.72799999999998,95.515 L 142.367,95.515 L 142.367,90.315', 'M 133.745,90.143 L 137.48000000000002,90.143 L 137.48000000000002,95.554 L 133.745,95.554 L 133.745,90.143'] box_paths = [ Path(d_string=d_string) for d_string in box_path_d ] process_words_post_merging.process_word_boxes(page, box_paths, tr) words_with_boxes = [ word for word in page.words if word.word_box is not None\ or word.has_mixed_status('word_box', include_parts=True)] expected_values = { 'großen': {'text': 'größtem'}, 'daß': {'text': 'dem'}, 'seine': {'text': 'ihre'},\ 'Rococo-Geschmack': {'text': 'Rococo-geschmack'}, '(:': {'text': '–'}, 'und': {'text': 'es'} } self.assertEqual(len(words_with_boxes), len(expected_values.keys())) references = [ words_with_boxes[0].earlier_version,\ words_with_boxes[1].earlier_version,\ words_with_boxes[2].overwrites_word,\ words_with_boxes[3].earlier_version,\ words_with_boxes[4].word_parts[1].overwrites_word,\ words_with_boxes[5].overwrites_word ] for index, key in enumerate(expected_values.keys()): expected_values[key].update({'reference': references[index]}) for word in words_with_boxes: if expected_values[word.text].get('reference') is None: print(word.text, len(word.word_parts)) self.assertEqual(expected_values[word.text].get('reference') is not None, True) self.assertEqual(expected_values[word.text].get('reference').text, expected_values[word.text].get('text')) def test_update_writing_process_ids(self): page = Page(self.pdf_xml) page.words = [ word for word in page.words if word.text == 'Aber' and word.line_number == 2 ] process_words_post_merging.update_writing_process_ids(page) self.assertEqual(len(page.words[0].word_parts), 2) self.assertEqual(page.words[0].word_parts[0].writing_process_id, 1) self.assertEqual(page.words[0].word_parts[1].writing_process_id, 0) @unittest.skip('takes long') #@unittest.skipUnless(__name__ == "__main__", 'test takes too long, we do not run it with unittest discover') def test_reset_page(self): page = Page(self.pdf_xml) page.source = self.pdf_xml_source process_words_post_merging.post_merging_processing_and_saving(page=page) numWordParts = 7 process_words_post_merging.post_merging_processing_and_saving(page=page) self.assertEqual(len([ word for word in page.words if len(word.word_parts) > 0 ]), numWordParts) process_words_post_merging.reset_page(page) self.assertEqual(len([ word for word in page.words if word.earlier_version is not None ]), 0) self.assertEqual(len([ word for word in page.words if len(word.word_parts) > 0 ]), 0) - if __name__ == "__main__": unittest.main()