Index: svgscripts/datatypes/box.py
===================================================================
--- svgscripts/datatypes/box.py (revision 99)
+++ svgscripts/datatypes/box.py (revision 100)
@@ -1,140 +1,141 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent svg paths of type 'box'.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
from svgpathtools.parser import parse_path
import warnings
from .matrix import Matrix
from .path import Path
from .transkriptionField import TranskriptionField
class Box(Path):
"""
This represents box svg paths.
Args:
node (lxml.etree.Element) node, containing information
path (svgpathtools.path.Path) svg path representation.
"""
XML_TAG = 'box-path'
- def __init__(self, id=0, node=None, path=None, d_string=None, style_class='', earlier_text='', text_style_class=''):
+ def __init__(self, id=0, node=None, path=None, d_string=None, style_class='', earlier_text='', text_style_class='', earlier_version=False):
super(Box,self).__init__(id=id, node=node, path=path, d_string=d_string, style_class=style_class, tag=Box.XML_TAG)
self.stringKeys += [ 'earlier_text', 'text_style_class' ]
self.earlier_text = earlier_text
self.text_style_class = text_style_class
+ self.earlier_version = earlier_version
if node is not None:
if bool(node.get('earlier-text')):
self.earlier_text = node.get('earlier-text')
if bool(node.get('text-style-class')):
self.text_style_class = node.get('text-style-class')
@classmethod
def create_box(cls, path, margin_boxes_on_line, svg_source=None, svg_tree=None, transkription_field=None, namespaces={}, threshold=1.5):
"""Create a Box from a path and find its corresponding earlier_text outside of transkription_field.
:return: box.Box
"""
if svg_source is not None:
svg_tree = ET.parse(svg_source)
if len(namespaces) == 0:
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
if transkription_field is None:
transkription_field = TranskriptionField(svg_source) if svg_source is not None\
else TranskriptionField(svg_tree.docinfo.URL)
matching_boxes = [ margin_box for margin_box in margin_boxes_on_line\
if abs(margin_box.get_median_y()-path.get_median_y()) < threshold ]
box = None
if len(matching_boxes) > 0:
matching_box = matching_boxes[0]
margin_boxes_on_line.remove(matching_box)
xmin, xmax, ymin, ymax = matching_box.path.bbox()
if ymin == ymax:
ymin = path.path.bbox()[2]
ymax = path.path.bbox()[3]
text_nodes = [ text_node for text_node in svg_tree.xpath('//ns:text', namespaces=namespaces)\
if text_node_is_inside_match_box(text_node, xmin, xmax, ymin, ymax) ]
tspan_nodes = [ tspan_node for tspan_node in svg_tree.xpath('//ns:text/ns:tspan', namespaces=namespaces)\
if tspan_node_is_inside_match_box(tspan_node, xmin, xmax, ymin, ymax) ]
box_text = ''
text_styles = []
if len(text_nodes) > 0:
text_nodes = sorted(text_nodes, key=lambda node: Matrix(transform_matrix_string=node.get('transform')).getX())
for text_node in text_nodes:
if len(text_node.xpath('./ns:tspan', namespaces=namespaces)) == 0:
text_styles += [ text_node.get('class') ]
box_text += text_node.text
else:
matrix = Matrix(transform_matrix_string=text_node.get('transform'))
for tspan_node in text_node.xpath('./ns:tspan', namespaces=namespaces):
if matrix.add2X(add_to_x=tspan_node.get('x')) < xmax:
text_styles.append(tspan_node.get('class'))
box_text += tspan_node.text
elif len(tspan_nodes) > 0:
for tspan_node in tspan_nodes:
text_styles.append(tspan_node.get('class'))
box_text += tspan_node.text
else:
warnings.warn('No text_node found for xmin, xmax, ymin, ymax: {0} {1} {2} {3}'.format(xmin, xmax, ymin, ymax))
text_style_class = ' '.join(list(set([ item for style in text_styles for item in style.split(' ') ])))
box = Box(id=path.id, path=path.path, style_class=path.style_class,\
earlier_text=box_text.replace(' ',''), text_style_class=text_style_class)
else:
#print([ margin_box.path.bbox() for margin_box in margin_boxes_on_line ], len(margin_boxes_on_line))
warnings.warn(f'No margin box found for box with bbox: {path.path.bbox()}, {margin_boxes_on_line} {threshold}')
return box
@classmethod
def get_semantic_dictionary(cls):
""" Creates and returns a semantic dictionary as specified by SemanticClass.
"""
dictionary = super(Box,cls).get_semantic_dictionary()
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('earlier_text', str))
return cls.return_dictionary_after_updating_super_classes(dictionary)
def text_node_is_inside_match_box(text_node, xmin, xmax, ymin, ymax):
"""Return true if text_node is inside xmin, xmax, ymin, ymax.
"""
if not bool(text_node.get('transform')):
return False
matrix = Matrix(transform_matrix_string=text_node.get('transform'))
return matrix.getY() > ymin and matrix.getY() < ymax\
and matrix.getX() > xmin and matrix.getX() < xmax
def tspan_node_is_inside_match_box(tspan_node, xmin, xmax, ymin, ymax):
"""Return true if tspan_node is inside xmin, xmax, ymin, ymax.
"""
if not bool(tspan_node.getparent().get('transform')):
return False
matrix = Matrix(transform_matrix_string=tspan_node.getparent().get('transform'))
tspan_x = matrix.add2X(add_to_x=tspan_node.get('x'))
return matrix.getY() > ymin and matrix.getY() < ymax\
and tspan_x > xmin and tspan_x < xmax
Index: svgscripts/datatypes/transkription_position.py
===================================================================
--- svgscripts/datatypes/transkription_position.py (revision 99)
+++ svgscripts/datatypes/transkription_position.py (revision 100)
@@ -1,200 +1,200 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent a transkription word position.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
import sys
from .debug_message import DebugMessage
from .image import SVGImage
from .positional_word_part import PositionalWordPart
from .word_position import WordPosition
from .matrix import Matrix
sys.path.append('py2ttl')
from class_spec import SemanticClass
class TranskriptionPosition(WordPosition):
"""
This class represents the position of a word on the transkription as it is displayed by a svg image.
@label position of a word on the topological transkription
Args:
id (int): word id
matrix (datatypes.Matrix): matrix containing information about transformation.
height (float): height of word
width (float): width of word
x (float): x position of word
y (float): y position of word
positional_word_parts a list of (datatypes.positional_word_part) PositionalWordPart
debug_message a (datatypes.debug_message) DebugMessage
"""
ADD2X = 0.15
ADD2TOP = 1.0
ADD2BOTTOM = 0.2
HEIGHT_FACTOR = 1.1 # factor that multiplies biggest_font_size -> height
XML_TAG = WordPosition.TRANSKRIPTION
def __init__(self, id=0, node=None, height=0.0, width=0.0, x=0.0, y=0.0, matrix=None, positional_word_parts=None, debug_message=None):
super(TranskriptionPosition, self).__init__(id=id, node=node, height=height, width=width, x=x, y=y, matrix=matrix, tag=WordPosition.TRANSKRIPTION)
self.positional_word_parts = positional_word_parts if positional_word_parts is not None else []
self.debug_message = debug_message
self.deleted = False
self.has_box = None
self.style = None
self.svg_image = None
if node is not None:
self.debug_message = DebugMessage(node=node.xpath('.//' + DebugMessage.XML_TAG)[0])\
if len(node.xpath('.//' + DebugMessage.XML_TAG)) > 0 else None
self.positional_word_parts = [ PositionalWordPart(node=pwp_node) for pwp_node in node.xpath('.//' + PositionalWordPart.XML_TAG) ]
self.attachable_objects += self.positional_word_parts
if self.debug_message is not None:
self.attachable_objects.append(self.debug_message)
@classmethod
def get_semantic_dictionary(cls):
""" Creates a semantic dictionary as specified by SemanticClass.
"""
dictionary = super(TranskriptionPosition,cls).get_semantic_dictionary()
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('svg_image', SVGImage, cardinality=1,\
name='isOnSvgImage', label='transkription position is on svg image'))
return cls.return_dictionary_after_updating_super_classes(dictionary)
def get_text(self):
"""Returns the concatenated text of all positional_word_parts.
"""
return ''.join([pwp.text for pwp in self.positional_word_parts])
def is_mergebale_with(self, other) -> bool:
"""Return whether self and other have same writing_process_id or style.
"""
if self.writing_process_id == other.writing_process_id:
return True
if self.writing_process_id == -1 or other.writing_process_id == -1\
and (len(self.positional_word_parts) > 0 and len(other.positional_word_parts) > 0):
return self.positional_word_parts[0].style_class == other.positional_word_parts[0].style_class
return False
- def split(self, split_position, second_split=-1):
+ def split(self, split_position, second_split=-1) ->list:
"""Split a transkription_position in two at split_position.
:return: a list of the new transkription_positions
"""
transkription_positions = []
left_pwp = [ pwp for pwp in self.positional_word_parts if pwp.left + pwp.width < split_position ]
transkription_positions += TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(left_pwp, transkription_position_id=self.id)
if second_split == -1:
right_pwp = [ pwp for pwp in self.positional_word_parts if pwp not in left_pwp ]
next_id = int(self.id) + 1
transkription_positions += TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(right_pwp, transkription_position_id=str(next_id))
else:
middle_pwp = [ pwp for pwp in self.positional_word_parts if pwp not in left_pwp and pwp.left + pwp.width < second_split ]
next_id = int(self.id) + 1
transkription_positions += TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(middle_pwp, transkription_position_id=str(next_id))
right_pwp = [ pwp for pwp in self.positional_word_parts if pwp not in left_pwp and pwp not in middle_pwp ]
next_id = int(self.id) + 1
transkription_positions += TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(right_pwp, transkription_position_id=str(next_id))
return transkription_positions
def update_positional_word_parts(self, positional_word_parts):
"""Update positional_word_parts.
"""
if len(self.positional_word_parts) > 0 and self.positional_word_parts in self.attachable_objects:
self.attachable_objects.remove(self.positional_word_parts)
self.positional_word_parts = positional_word_parts
self.attachable_objects += self.positional_word_parts
@staticmethod
def CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(positional_word_parts, debug_message=None, debug_msg_string=None, transkription_position_id=0):
"""Creates a list of TranskriptionPosition from a list of (datatypes.positional_word_part) PositionalWordPart.
[:return:] a list of (datatypes.transkription_position) TranskriptionPosition
"""
TOPCORRECTION = 1
debug_message = DebugMessage(message=debug_msg_string)\
if debug_msg_string is not None else debug_message
transkription_positions = []
if len(positional_word_parts) < 1:
return []
matrix = positional_word_parts[0].transform
index = 0
matrices_differ = False
style_class = positional_word_parts[0].style_class
styles_differ = False
while index < len(positional_word_parts) and not matrices_differ and not styles_differ:
if Matrix.DO_CONVERSION_FACTORS_DIFFER(matrix, positional_word_parts[index].transform):
matrices_differ = True
elif style_class != positional_word_parts[index].style_class:
styles_differ = True
else:
index += 1
if (matrices_differ or styles_differ) and index < len(positional_word_parts):
debug_msg_string = 'matrices differ' if matrices_differ else 'styles differ'
transkription_positions += TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(\
positional_word_parts[index:], debug_msg_string=debug_msg_string, transkription_position_id=int(transkription_position_id)+1)
positional_word_parts = positional_word_parts[:index]
height = [ pwp.height for pwp in sorted(positional_word_parts, key=lambda pwp: pwp.height, reverse=True)][0] + 2*TOPCORRECTION
x = positional_word_parts[0].left - TranskriptionPosition.ADD2X
y = [ pwp.top for pwp in sorted(positional_word_parts, key=lambda pwp: pwp.top)][0] - TOPCORRECTION
width = positional_word_parts[len(positional_word_parts)-1].left - x\
+ positional_word_parts[len(positional_word_parts)-1].width + TranskriptionPosition.ADD2X
for pwp_index, pwp in enumerate(positional_word_parts):
pwp.id = pwp_index
transkription_positions.insert(0, TranskriptionPosition(id=transkription_position_id, height=height, width=width, x=x, y=y, matrix=matrix,\
positional_word_parts=positional_word_parts, debug_message=debug_message))
return transkription_positions
@staticmethod
def CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, matrix=None, debug_msg_string=None, transkription_field=None):
"""Creates a list of TranskriptionPosition from word_part_objs (i.e. a list of dictionaries
with the keys: text, x, y, matrix, class).
[:return:] a list of (datatypes.transkription_position) TranskriptionPosition
"""
positional_word_parts = []
debug_message = DebugMessage(message=debug_msg_string)\
if debug_msg_string is not None else None
if page.svg_file is not None and isfile(page.svg_file):
svg_path_tree = ET.parse(page.svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_path_tree.getroot().nsmap.items() }
xmin = 0.0
ymin = 0.0
if transkription_field is not None:
xmin = transkription_field.xmin
ymin = transkription_field.ymin
for part_obj in word_part_objs:
positional_word_parts += PositionalWordPart.CREATE_POSITIONAL_WORD_PART_LIST(\
part_obj, svg_path_tree, namespaces, page, start_id=len(positional_word_parts),\
xmin=xmin, ymin=ymin)
else:
positional_word_parts = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs)
if len(positional_word_parts) > 0:
return TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(positional_word_parts, debug_message=debug_message)
else:
return [ TranskriptionPosition(matrix=matrix, debug_message=debug_message) ]
Index: svgscripts/datatypes/path.py
===================================================================
--- svgscripts/datatypes/path.py (revision 99)
+++ svgscripts/datatypes/path.py (revision 100)
@@ -1,197 +1,200 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This super class can be used to represent all svg path types.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
from svgpathtools.parser import parse_path
from svgpathtools.path import Line
from svgpathtools.path import Path as SVGPath
import sys
from .attachable_object import AttachableObject
sys.path.append('py2ttl')
from class_spec import SemanticClass
class Path(AttachableObject,SemanticClass):
"""
This super class represents all types of svg paths.
Args:
node (lxml.etree.Element) node, containing information
path (svgpathtools.path.Path) svg path representation.
"""
XML_TAG = 'path'
WORD_DELETION_PATH_TAG = 'word-deletion-path'
BOX_TAG = 'box-path'
def __init__(self, id=0, node=None, path=None, parent_path=None, d_string=None, style_class='', tag=XML_TAG):
self.intKeys = [ 'id' ]
self.stringKeys = [ 'style_class' ]
self.floatKeys = []
self.start_line_number = -1
self.parent_path = parent_path
if node is not None:
self.id = int(node.get('id')) if bool(node.get('id')) else 0
self.path = parse_path(node.get('d')) if bool(node.get('d')) else None
self.d_attribute = node.get('d')
self.style_class = node.get('style-class')
self.tag = node.tag
else:
self.tag = tag
self.id = id
self.path = path
if self.path is None\
and d_string is not None\
and d_string != '':
self.path = parse_path(d_string)
self.d_attribute = self.path.d() if self.path is not None else ''
self.style_class = style_class
def attach_object_to_tree(self, target_tree):
"""Attach object to tree.
"""
if target_tree.__class__.__name__ == '_ElementTree':
target_tree = target_tree.getroot()
obj_node = target_tree.xpath('.//' + self.tag + '[@id="%s"]' % self.id)[0] \
if(len(target_tree.xpath('.//' + self.tag + '[@id="%s"]' % self.id)) > 0) \
else ET.SubElement(target_tree, self.tag)
for key in self.floatKeys:
if self.__dict__[key] is not None:
obj_node.set(key.replace('_','-'), str(round(self.__dict__[key], 3)))
for key in self.intKeys + self.stringKeys:
if self.__dict__[key] is not None:
obj_node.set(key.replace('_','-'), str(self.__dict__[key]))
if self.path is not None:
obj_node.set('d', self.path.d())
@classmethod
- def create_cls(cls, id=0, path=None, style_class='', page=None, tag=XML_TAG):
+ def create_cls(cls, id=0, path=None, style_class='', page=None, tag=XML_TAG, stroke_width=0.0):
"""Create and return a cls.
"""
if path is not None\
and path.start.imag <= path.end.imag\
and page is not None\
and style_class != ''\
and len(path._segments) == 1\
and type(path._segments[0]) == Line\
- and style_class in page.style_dict.keys()\
- and 'stroke-width' in page.style_dict[style_class].keys():
+ and ((style_class in page.style_dict.keys()\
+ and 'stroke-width' in page.style_dict[style_class].keys())\
+ or stroke_width > 0.0):
# If path is a Line and its style_class specifies a stroke-width, correct path
- stroke_width_correction = float(page.style_dict[style_class]['stroke-width'])/2
+ stroke_width_correction = float(page.style_dict[style_class]['stroke-width'])/2\
+ if stroke_width == 0.0\
+ else stroke_width
xmin = path.start.real
xmax = path.end.real
ymin = path.start.imag-stroke_width_correction
ymax = path.end.imag+stroke_width_correction
#path = parse_path(f'M {xmin}, {ymin} L {xmax}, {ymin} L {xmax}, {ymax} L {xmin}, {ymax} z')
path = SVGPath(Line(start=(complex(f'{xmin}+{ymin}j')), end=(complex(f'{xmax}+{ymin}j'))),\
Line(start=(complex(f'{xmax}+{ymin}j')), end=(complex(f'{xmax}+{ymax}j'))),\
Line(start=(complex(f'{xmax}+{ymax}j')), end=(complex(f'{xmin}+{ymax}j'))),\
Line(start=(complex(f'{xmin}+{ymax}j')), end=(complex(f'{xmin}+{ymin}j'))))
return cls(id=id, path=path, style_class=style_class, tag=tag)
def contains_path(self, other_path):
"""Returns true if other_path is contained in this path.
"""
this_xmin, this_xmax, this_ymin, this_ymax = self.path.bbox()
other_xmin, other_xmax, other_ymin, other_ymax = other_path.path.bbox()
return other_xmin >= this_xmin and other_xmax <= this_xmax\
and other_ymin >= this_ymin and other_ymax <= this_ymax
def contains_start_of_path(self, other_path):
"""Returns true if start of other_path is contained in this path.
"""
this_xmin, this_xmax, this_ymin, this_ymax = self.path.bbox()
other_xmin, other_xmax, other_ymin, other_ymax = other_path.path.bbox()
return other_xmin >= this_xmin and other_xmin < this_xmax\
and other_ymin >= this_ymin and other_ymax <= this_ymax
def contains_end_of_path(self, other_path):
"""Returns true if end of other_path is contained in this path.
"""
this_xmin, this_xmax, this_ymin, this_ymax = self.path.bbox()
other_xmin, other_xmax, other_ymin, other_ymax = other_path.path.bbox()
return other_xmax >= this_xmin and other_xmax < this_xmax\
and other_ymin >= this_ymin and other_ymax <= this_ymax
@classmethod
def create_path_from_transkription_position(cls, transkription_position, tr_xmin=0.0, tr_ymin=0.0):
"""Create a .path.Path from a .transkription_position.TranskriptionPosition.
"""
if len(transkription_position.positional_word_parts) > 0:
first_pwp = transkription_position.positional_word_parts[0]
last_pwp = transkription_position.positional_word_parts[len(transkription_position.positional_word_parts)-1]
xmin = tr_xmin + first_pwp.left
xmax = tr_xmin + last_pwp.left + last_pwp.width
ymin = tr_ymin + sorted(pwp.top for pwp in transkription_position.positional_word_parts)[0]
ymax = tr_ymin + sorted([pwp.bottom for pwp in transkription_position.positional_word_parts], reverse=True)[0]
else:
xmin = tr_xmin + transkription_position.left
xmax = xmin + transkription_position.width
ymin = tr_ymin + transkription_position.top
ymax = ymin + transkription_position.height
word_path = parse_path('M {}, {} L {}, {} L {}, {} L {}, {} z'.format(xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax))
return cls(path=word_path)
def do_paths_intersect(self, other_path):
"""Returns true if paths intersect, false if not or if there was an exception.
"""
try:
return self.path.intersect(other_path.path, justonemode=True)
except AssertionError:
return False
def get_median_y(self, tr_ymin=0.0):
"""Return the median of ymin + ymax.
"""
return (self.path.bbox()[2] + self.path.bbox()[3])/2 - tr_ymin
def get_x(self, tr_xmin=0.0):
"""Return xmin.
"""
return self.path.bbox()[0] - tr_xmin
@classmethod
def get_semantic_dictionary(cls):
""" Creates and returns a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = {'d_attribute': { 'class': str, 'cardinality': 0,\
'name': 'hasDAttribute', 'label': 'svg path has d attribute',\
'comment': 'The d attribute defines a path to be drawn.'}}
#properties.update(cls.create_semantic_property_dictionary('style_class', str))
dictionary.update({cls.CLASS_KEY: class_dict})
dictionary.update({cls.PROPERTIES_KEY: properties})
return cls.return_dictionary_after_updating_super_classes(dictionary)
def is_partially_contained_by(self, other_path):
"""Returns true if other_path containes this path partially.
"""
return other_path.contains_start_of_path(self) or other_path.contains_end_of_path(self)
Index: svgscripts/datatypes/word.py
===================================================================
--- svgscripts/datatypes/word.py (revision 99)
+++ svgscripts/datatypes/word.py (revision 100)
@@ -1,862 +1,863 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent a word.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
import copy
import inspect
from lxml import etree as ET
from operator import attrgetter
import re
import string
import sys
import warnings
from .box import Box
from .editor_comment import EditorComment
from .matrix import Matrix
from .path import Path
from .simple_word import SimpleWord
from .style import Style
from .word_deletion_path import WordDeletionPath
from .word_position import WordPosition
from .transkription_position import TranskriptionPosition
from .writing_process import WritingProcess
SINGLE_PUNCTUATION_PATTERN = r"^[{}–]$".format(string.punctuation)
def execute_function_on_parts(word_parts, func_name):
"""Execute function on parts and add those parts instead of original word to word_parts.
:return: new word_parts, output from func
"""
copy_parts = word_parts[:]
for word in word_parts:
output = eval('word.{0}()'.format(func_name))
if len(word.word_parts) > 0:
for part_word in word.word_parts:
copy_parts.insert(copy_parts.index(word), part_word)
copy_parts.remove(word)
word.word_parts = []
return copy_parts, output
def update_transkription_position_ids(word):
"""Update transkription_position' ids according to index.
"""
word_part_ids = [ wp.id for wp in word.word_parts ]
if len(word_part_ids) != len(set(word_part_ids)):
for id, wp in enumerate(word.word_parts):
wp.id = id
for index, transkription_position in enumerate(sorted(word.transkription_positions, key=attrgetter('left'))):
transkription_position.id = index
transkription_position.has_box = None
transkription_position.deleted = False
class Word(SimpleWord):
"""
This class represents a word.
"""
COPY_PROPERTY_KEY = [ 'line_number', 'deleted', 'writing_process_id' ]
APPEND_PROPERTY2LIST_SOURCE_TARGET_KEYS = { 'style': 'styles' }
DATA = 'debug-data'
RDFS_SUBCLASSOF_LIST = ['http://www.e-editiones.ch/ontology/text#HandwrittenText']
XML_TAG = 'word'
XML_EARLIER_VERSION = 'earlier-version'
XML_OVERWRITES = 'overwrites'
XML_CORRECTION_DICT = { 'isClarificationOfWord': 'clarifiesWord',\
'isDeletionOfWord': 'deletesEarlierPart',\
'isExtensionOfWord': 'extendsEarlierVersion',\
'isTransformationOfWord': 'transformsEarlierPart' }
def __init__(self, id=0, text='', line_number=-1, deleted=False, transkription_positions=None, faksimile_positions=None, word_part_objs=None, word_parts=None, writing_process_id=-1, earlier_version=None, box_paths=None, styles=None):
super(Word,self).__init__(id=id, text=text, line_number=line_number, transkription_positions=transkription_positions,\
faksimile_positions=faksimile_positions)
self.corrections = []
self.deleted = deleted
self.deletion_paths = []
self.debug_container = {}
self.debug_msg = None
self.earlier_version = earlier_version
self.edited_text = None
self.editor_comment = None
self.isClarificationOfWord = None
self.isDeletionOfWord = None
self.isExtensionOfWord = None
self.isTransformationOfWord = None
if len(self.text) == 0 and len(''.join([ tp.get_text() for tp in self.transkription_positions if type(tp) == TranskriptionPosition ])) > 0:
self.text = ''.join([ tp.get_text() for tp in self.transkription_positions ])
self.overwrites_word = None
self.styles = styles\
if styles is not None\
else []
self.verified = None
self.writing_process_id = writing_process_id
self.writing_processes = []
self.word_insertion_mark = None
self.word_box = None
self.word_parts = word_parts if word_parts is not None else []
self.word_part_objs = word_part_objs if word_part_objs is not None else []
def add_deletion_paths(self, deletion_paths, tr_xmin=0.0, tr_ymin=0.0):
"""Add a word deletion path to word.
"""
if len(self.word_parts) > 0:
for part in self.word_parts: part.add_deletion_paths(deletion_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin)
elif self.deleted and len(self.transkription_positions) > 0:
word_path = Path.create_path_from_transkription_position(self.transkription_positions[0],\
tr_xmin=tr_xmin, tr_ymin=tr_ymin)
self.deletion_paths = [ deletion_path for deletion_path in deletion_paths\
if do_paths_intersect_saveMode(deletion_path, word_path) ]
def attach_word_to_tree(self, target_tree):
"""Attaches word to tree target_tree.
"""
word_node = super(Word,self).attach_word_to_tree(target_tree)
if self.deleted is not None:
word_node.set('deleted', str(self.deleted).lower())
if self.verified is not None:
word_node.set('verified', str(self.verified).lower())
if self.edited_text is not None:
word_node.set('edited-text', self.edited_text)
if self.editor_comment is not None:
self.editor_comment.attach_object_to_tree(word_node)
if self.writing_process_id > -1:
word_node.set('writing-process-id', str(self.writing_process_id))
for index, word_part in enumerate(self.word_parts):
word_part.id = index
word_part.attach_word_to_tree(word_node)
if self.earlier_version is not None:
earlier_node = ET.SubElement(word_node, self.XML_EARLIER_VERSION)
self.earlier_version.attach_word_to_tree(earlier_node)
if self.overwrites_word is not None\
and len(self.overwrites_word.transkription_positions) > 0:
overwrite_node = ET.SubElement(word_node, self.XML_OVERWRITES)
self.overwrites_word.attach_word_to_tree(overwrite_node)
if self.word_box is not None:
self.word_box.attach_object_to_tree(word_node)
if len(self.corrections) > 0:
word_node.set('corrections', ' '.join(set([ str(word.id) for word in self.corrections ])))
for key in self.XML_CORRECTION_DICT.keys():
if self.__dict__[key] is not None:
word_node.set(self.XML_CORRECTION_DICT[key], 'true')
return word_node
def belongs_to_multiple_writing_processes(self, include_parts=False):
"""Returns true if transkription_positions belong to different WritingProcesses.
"""
if len(self.word_parts) > 0 and include_parts:
return len(set(word.writing_process_id for word in self.word_parts)) > 1
return len(set(tp.writing_process_id for tp in self.transkription_positions )) > 1
def set_parent_word_writing_process_id(self):
"""Set writing_process_id for parent word.
"""
ids = set(word.transkription_positions[0].style for word in self.word_parts\
if len(word.transkription_positions) > 0 and word.transkription_positions[0].style is not None)
if len(ids) > 1:
self.writing_process_id = max([style.writing_process_id for style in ids])
if len(set(word.transkription_positions[0].style.create_a_copy_wo_writing_process_id()\
for word in self.word_parts\
if len(word.transkription_positions) > 0 and word.transkription_positions[0].style is not None))\
> 1:
self.writing_process_id += 1
@classmethod
def create_cls(cls, word_node):
"""Creates a word from a (lxml.Element) node.
[:return:] Word
"""
cls = super(Word,cls).create_cls(word_node)
cls.writing_process_id = int(word_node.get('writing-process-id')) if bool(word_node.get('writing-process-id')) else -1
cls.split_strings = None
cls.join_string = word_node.get('join')
if bool(word_node.get('split')):
cls.split_strings = word_node.get('split').split(' ')
if ''.join(cls.split_strings) != cls.text:
error_msg = 'Error in file {0}: word with id="{1}" has split attributes that do not correspond to its text attribute!\n'.\
format(word_node.getroottree().docinfo.URL, str(cls.id))\
+ 'Split attributes: "{0}".\n'.format(' '.join(cls.split_strings))\
+ 'Text attribute: "{0}".\n'.format(cls.text)
raise Exception(error_msg)
cls.verified = word_node.get('verified') == 'true'\
if bool(word_node.get('verified')) else None
cls.deleted = word_node.get('deleted') == 'true'\
if bool(word_node.get('deleted')) else None
cls.edited_text = word_node.get('edited-text')
cls.editor_comment = [ EditorComment.create_cls_from_node(node) for node in word_node.xpath('./' + EditorComment.XML_TAG) ][0]\
if len([ node for node in word_node.xpath('./' + EditorComment.XML_TAG) ]) > 0 else None
cls.word_parts = [ cls.create_cls(node) for node in word_node.xpath('./' + cls.XML_TAG) ]
if bool(word_node.get('corrections')):
for index in [ int(i) for i in word_node.get('corrections').split(' ') ]:
if index < len(cls.word_parts):
cls.corrections.append(cls.word_parts[index])
cls.earlier_version = None
if len(word_node.xpath('./' + cls.XML_EARLIER_VERSION + '/' + cls.XML_TAG)) > 0:
cls.earlier_version = [ cls.create_cls(node) for node in word_node.xpath('./' + cls.XML_EARLIER_VERSION + '/' + cls.XML_TAG) ][0]
for key_value in cls.XML_CORRECTION_DICT.values():
if word_node.get(key_value) == 'true':
cls.__dict__[key_value] = True
if cls.earlier_version is not None:
for word_part in cls.word_parts:
for key in [ key for key, value in cls.XML_CORRECTION_DICT.items() if value.endswith('Part') ]:
if cls.XML_CORRECTION_DICT[key] in word_part.__dict__.keys() and word_part.__dict__[cls.XML_CORRECTION_DICT[key]]\
and len(cls.word_parts) <= len(cls.earlier_version.word_parts):
try:
word_part.__dict__[key] = cls.earlier_version.word_parts[word_part.id]
except Exception:
msg = f'{cls.id} {cls.text}: {word_part.id}'
raise Exception(msg)
for key in [ key for key, value in cls.XML_CORRECTION_DICT.items() if value.endswith('EarlierVersion') ]:
if cls.XML_CORRECTION_DICT[key] in word_part.__dict__.keys() and word_part.__dict__[cls.XML_CORRECTION_DICT[key]]:
word_part.__dict__[key] = cls.earlier_version
for key in [ key for key, value in cls.XML_CORRECTION_DICT.items() if value.endswith('Word') ]:
if cls.XML_CORRECTION_DICT[key] in word_part.__dict__.keys() and word_part.__dict__[cls.XML_CORRECTION_DICT[key]]:
word_part.__dict__[key] = cls
cls.overwrites_word = [ cls.create_cls(node) for node in word_node.xpath('./' + cls.XML_OVERWRITES + '/' + cls.XML_TAG)][0]\
if len(word_node.xpath('./' + cls.XML_OVERWRITES + '/' + cls.XML_TAG)) > 0\
else None
cls.word_box = [ Box(node=node) for node in word_node.xpath('./' + Box.XML_TAG) ][0]\
if len(word_node.xpath('./' + Box.XML_TAG)) > 0\
else None
return cls
@classmethod
def join_words(cls, list_of_words):
"""Creates a word from a list of words.
[:return:] Word
"""
if len(list_of_words) > 1:
deleted = True in [ word.deleted for word in list_of_words ]\
and len(set([ word.deleted for word in list_of_words ])) == 1
line_number = list_of_words[0].line_number\
if len(set([ word.line_number for word in list_of_words ])) == 1\
else -1
for word in list_of_words:
if len(word.word_parts) > 0:
index = list_of_words.index(word)
list_of_words.remove(word)
for part_word in reversed(word.word_parts):
list_of_words.insert(index, part_word)
new_word = cls(id=list_of_words[0].id, text=''.join([word.text for word in list_of_words]),\
line_number=line_number, deleted=deleted, word_parts=list_of_words)
if True in [ word.text.endswith('-') or word.text.endswith('=') for word in new_word.word_parts[:-1]]:
change_text = [ word.text for word in new_word.word_parts[:-1] if word.text.endswith('-') or word.text.endswith('=') ][0]
new_word.edited_text = new_word.text.replace(change_text, change_text[:-1])
for id, word in enumerate(new_word.word_parts): word.id = id
return new_word
if len(list_of_words) > 0:
return list_of_words[0]
else:
return None
def create_earlier_version(self, root_word=None, id=0):
"""Create an earlier version of word.
"""
if root_word is None:
root_word = self
root_word.set_parent_word_writing_process_id()
word_parts = []
non_single_punctuation_word_parts = [ word_part for word_part in self.word_parts\
if not re.match(SINGLE_PUNCTUATION_PATTERN, word_part.text) ]
non_single_punctuation_word_parts_length = len(non_single_punctuation_word_parts)
if non_single_punctuation_word_parts_length > 0\
and len([ word_part for word_part in non_single_punctuation_word_parts\
if word_part.deleted ])\
== non_single_punctuation_word_parts_length:
self.deleted = True
for word_part in non_single_punctuation_word_parts: word_part.deleted = False
for id, word_part in enumerate(self.word_parts):
earlierWordPart = word_part.create_earlier_version(root_word=root_word, id=id)
if word_part.deleted:
word_part.isDeletionOfWord = earlierWordPart
word_parts.append(earlierWordPart)
if word_part not in self.corrections:
self.corrections.append(word_part)
elif word_part.overwrites_word is not None\
- and (len(word_part.transkription_positions) > 0\
+ and ((len(word_part.transkription_positions) > 0\
and word_part.overwrites_word.transkription_positions[0].style is not None\
and word_part.transkription_positions[0].style is not None\
and word_part.transkription_positions[0].style\
- != word_part.overwrites_word.transkription_positions[0].style):
+ != word_part.overwrites_word.transkription_positions[0].style)
+ or word_part.word_box.earlier_version):
word_part.overwrites_word.id = word_part.id
word_parts.append(word_part.overwrites_word)
word_part.isTransformationOfWord = word_part.overwrites_word
#print(f'transform: {self.text}')
if word_part not in self.corrections:
self.corrections.append(word_part)
elif root_word.writing_process_id > -1\
and (len(word_part.transkription_positions) > 0\
and word_part.transkription_positions[0].style is not None\
and word_part.transkription_positions[0].style.writing_process_id\
== root_word.writing_process_id):
word_part.extendsEarlierVersion = True
#print('extends')
if word_part not in self.corrections:
self.corrections.append(word_part)
else:
if word_part.deleted:
word_part.isDeletionOfWord = earlierWordPart
word_parts.append(earlierWordPart)
if word_part not in self.corrections:
self.corrections.append(word_part)
else:
#print(f'default: {self.text}')
word_parts.append(earlierWordPart)
text = ''.join([ word.text for word in word_parts ])\
if len(word_parts) > 0\
else self.text
if len(word_parts) == 1:
self.transkription_positions += word_parts[0].transkription_positions
self.faksimile_positions += word_parts[0].faksimile_positions
word_parts = []
new_transkription_positions = copy.deepcopy(self.transkription_positions)
if len(self.transkription_positions) > 0\
and self.transkription_positions[0].style is not None:
writing_process_id = self.transkription_positions[0].style.writing_process_id
for new_tp in new_transkription_positions:
new_tp.style.writing_process_id = writing_process_id
return Word(id=id, text=text, transkription_positions=new_transkription_positions,\
faksimile_positions=self.faksimile_positions, line_number=self.line_number,\
word_parts=word_parts)
def create_correction_history(self, page=None, box_style=None):
"""Create correction history.
"""
if self.word_box is not None:
manuscript = self.transkription_positions[0].style.manuscript\
if len(self.transkription_positions) > 0\
and self.transkription_positions[0].style is not None\
else None
style = Style()
if box_style is not None:
style = box_style
if page is not None:
style = Style.create_cls(page, self.word_box.text_style_class, manuscript=manuscript)
for font_key in [ font_key for font_key in self.word_box.text_style_class.split(' ') if font_key in page.fontsizekey2stage_mapping.keys() ]:
style.writing_process_id = page.fontsizekey2stage_mapping.get(font_key)
transkription_positions = TranskriptionPosition.copy_list_of_cls(self.transkription_positions)
for transkription_position in transkription_positions:
transkription_position.style = style
self.overwrites_word = Word(text=self.word_box.earlier_text, transkription_positions=transkription_positions,\
line_number=self.line_number)
for word_part in self.word_parts:
word_part.create_correction_history(page=page, box_style=box_style)
if len(self.word_parts) > 0:
earlier_version = self.create_earlier_version()
extending_words = self._get_parts_with_property_key('extendsEarlierVersion')
if len(extending_words) > 0:
for word in extending_words:
word.isExtensionOfWord = earlier_version
if self.has_mixed_status('deleted', include_parts=True):
self.edited_text = ''.join([ word.text for word in self.word_parts if not word.deleted ])
if len(self.corrections) > 0:
self.earlier_version = earlier_version
@staticmethod
def CREATE_WORD(word_node=None, page=None, word_part_objs=[], id=0, height=0, endX=0, endSign=None, matrix=None, line_number=-1, debug_msg=None):
"""Creates a word from a (lxml.Element) node or word_part_objs.
[:return:] Word
"""
if word_node is not None: # init word from xml node
id = int(word_node.get('id'))
line_number = int(word_node.get('line-number')) if bool(word_node.get('line-number')) else line_number
text = word_node.get('text')
deleted = bool(word_node.get('deleted')) and word_node.get('deleted') == 'true'
transkription_positions = [ TranskriptionPosition(node=node) for node in word_node.findall('.//' + WordPosition.TRANSKRIPTION) ]
faksimile_positions = [ WordPosition(node=node) for node in word_node.findall('.//' + WordPosition.FAKSIMILE) ]
word_part_objs = [ item.attrib for item in word_node.findall('.//' + Word.DATA + '/part')]\
if len(word_node.findall('.//' + Word.DATA)) > 0\
else [ item.attrib for item in word_node.findall('.//part')]
return Word(id=id, text=text, deleted=deleted, line_number=line_number, transkription_positions=transkription_positions,\
faksimile_positions=faksimile_positions, word_part_objs=word_part_objs)
elif len(word_part_objs) > 0: # init word from word_part_obj that has been extracted from svg file
WIDTH = 5
TOPCORRECTION = 2.0
FONTWIDTHFACTOR = 0.7 # factor that multiplies lastCharFontSize
height = height
x = round(float(word_part_objs[0]['x']), 3)
if(page is not None and bool(page.style_dict)):
HEIGHT_FACTOR = 1.1 # factor that multiplies biggest_font_size -> height
style_set = set(' '.join(set( dict['class'] for dict in word_part_objs)).split(' '))
biggest_font_size = page.get_biggest_fontSize4styles(style_set=style_set)
height = round(biggest_font_size * HEIGHT_FACTOR + HEIGHT_FACTOR / biggest_font_size, 3)
TOPCORRECTION = 1 + HEIGHT_FACTOR / biggest_font_size
if endSign is not None and '%' in endSign:
lastCharFontSizeList = [ float(page.style_dict[key]['font-size'].replace('px',''))\
for key in word_part_objs[len(word_part_objs)-1]['class'].split(' ')\
if bool(page.style_dict[key].get('font-size'))]
lastCharFontSize = lastCharFontSizeList[0] if len(lastCharFontSizeList) > 0 else 1
endX = float(endX) + lastCharFontSize * FONTWIDTHFACTOR
elif endSign is not None and '%' in endSign:
endX = float(endX) + WIDTH
bottom = round(float(word_part_objs[0]['y']), 3)
y = round(bottom - height + TOPCORRECTION, 3)
width = round(float(endX) - x, 3)
transkription_positions = [ WordPosition(height=height, width=width, x=x, y=y, matrix=matrix, tag=WordPosition.TRANSKRIPTION) ]
text = ''.join([ dict['text'] for dict in word_part_objs])
line_number = page.get_line_number( (y + bottom)/2) if page is not None else line_number
word = Word(id=id, text=text, line_number=line_number, transkription_positions=transkription_positions, word_part_objs=word_part_objs)
word.debug_msg = debug_msg
return word
else:
error_msg = 'word_node has not been defined' if (word_node is None) else 'word_part_objs is empty'
raise Exception('Error: {}'.format(error_msg))
@classmethod
def get_semantic_dictionary(cls):
""" Creates and returns a semantic dictionary as specified by SemanticClass.
"""
dictionary = super(Word,cls).get_semantic_dictionary()
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('styles', Style,\
cardinality=1, cardinality_restriction='minCardinality',\
name='wordHasStyle', label='word has style', comment='Word has an appearance that is characterized by this style.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('corrections', Word,\
name='wordHasCorrection', label='word has corrections', comment='Word has a correction made by the author.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('deletion_paths', WordDeletionPath,\
name='wordIsDeletedByPath', label='word has been deleted with a deletion path',\
comment='Word has been deleted by the author using a deletion path.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('editor_comment', EditorComment,\
name='wordHasEditorComment', label='word has a comment by the editors', comment='Word has been commented by the editors.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('earlier_version', Word,\
name='wordHasEarlierVersion', label='word has an earlier version', comment='There is a earlier version of this word.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('edited_text', str,\
name='hasEditedText', label='word has an edited text', comment='Word has a text that is edited automatically by removing deleted parts or hyphens.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isClarificationOfWord', Word,\
name='isClarificationOfWord', label='word is a clarification of word',\
comment='The author has used this part of the word in order to clarify the appearance of that word.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isDeletionOfWord', Word,\
name='isDeletionOfWord', label='word is a deletion of word',\
comment='The author has used this part of a word in order to delete the corresponding part of an earlier version of this word.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isExtensionOfWord', Word,\
name='isExtensionOfWord', label='word is a extension of word',\
comment='The author has used this part of a word in order to extend an earlier version of this word.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('isTransformationOfWord', Word,\
name='isTransformationOfWord', label='word is a transformation of word',\
comment='The author has used this part of a word in order to transform the corresponding part of an earlier version of this word.'))
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('overwrites_word', Word,\
name='overwritesWord', label='word overwrites word',\
comment='The author has used this word in order to overwrite that word.'))
# This makes wordHasWordParts a subproperty of cls.HAS_HOMOTYPIC_PARTS_URL_STRING,
# cls.return_dictionary_after_updating_super_classes will subclass Word under the corresponding super class.
dictionary[cls.PROPERTIES_KEY].update(cls.create_semantic_property_dictionary('word_parts', list,\
name='wordHasWordParts', label='word has word parts', comment='Word consists of a list of words.',\
subPropertyOf=cls.HAS_HOMOTYPIC_PARTS_URL_STRING))
super_property_dictionary = cls.create_semantic_property_dictionary(cls.SUPER_PROPERTY, Word,\
name='isCorrectionOfWord', label='word is a correction of word',\
comment='The author has used this word in order to correct that word.')
for key in cls.XML_CORRECTION_DICT.keys():
correction_dict = dictionary[cls.PROPERTIES_KEY].get(key)
correction_dict.update(super_property_dictionary)
dictionary[cls.PROPERTIES_KEY].update({key: correction_dict})
return cls.return_dictionary_after_updating_super_classes(dictionary)
def has_mixed_status(self, property_key, include_parts=False, concerns_word=True):
"""Returns true if transkription_positions have mixed status concerning the property_key in their __dict__.
"""
if False in set(property_key in tp.__dict__.keys() for tp in self.transkription_positions):
return False
if len(self.word_parts) > 0 and include_parts:
if concerns_word:
if False in set(property_key in word.__dict__.keys() for word in self.word_parts):
return False
return len(set(word.__dict__[property_key] for word in self.word_parts)) > 1
else:
return len(set(word.transkription_positions[0].__dict__[property_key] for word in self.word_parts\
if len(word.transkription_positions) > 0 and property_key in word.transkription_positions[0].__dict__.keys())) > 1
return len(set(tp.__dict__[property_key] for tp in self.transkription_positions )) > 1
def init_word(self, page):
"""Initialize word with objects from page.
"""
super(Word,self).init_word(page)
if self.writing_process_id > -1:
self.writing_processes += [ wp for wp in page.writing_processes if wp.id == self.writing_process_id ]
writing_processes = self.writing_processes
for word_part in self.word_parts:
word_part.init_word(page)
self.lines += word_part.lines
self.writing_processes += word_part.writing_processes
self.lines = [ line for line in set(self.lines) ]
self.writing_processes = [ wp for wp in set(self.writing_processes)]
if self.overwrites_word is not None:
self.overwrites_word.init_word(page)
if self.earlier_version is not None:
if self.earlier_version.writing_process_id == -1:
self.earlier_version.writing_process_id = self.writing_process_id-1
if self.earlier_version.line_number == -1:
self.earlier_version.line_number = self.line_number
self.earlier_version.init_word(page)
def join(self, other_word, append_at_end_of_new_word=True):
"""Joins other_word with this word by changing the text of current word and adding other_word.transkription_positions.
"""
if append_at_end_of_new_word:
self.text = self.text + other_word.text
for position in other_word.transkription_positions:
position.id = str(len(self.transkription_positions))
self.transkription_positions.append(position)
else:
self.text = other_word.text + self.text
index = 0
for position in other_word.transkription_positions:
self.transkription_positions.insert(index, position)
index += 1
while index < len(self.transkription_positions):
self.transkription_positions[index].id = str(index)
index += 1
self.simplify_transkription_positions()
def partition_according_to_deletion(self):
"""Partition a word according to its transkription_positions' deletion status
->split word and add partial words as its parts.
"""
if self.has_mixed_status('deleted'):
transkription_positions = []
last_status = None
for transkription_position in self.transkription_positions:
if transkription_position.deleted != last_status\
and len(transkription_positions) > 0:
newWord = Word(id=len(self.word_parts), line_number=self.line_number,\
transkription_positions=transkription_positions, deleted=last_status, writing_process_id=self.writing_process_id)
self.word_parts.append(newWord)
transkription_positions = []
transkription_positions.append(transkription_position)
last_status = transkription_position.deleted
if len(transkription_positions) > 0:
newWord = Word(id=len(self.word_parts), line_number=self.line_number,\
transkription_positions=transkription_positions, deleted=last_status, writing_process_id=self.writing_process_id)
self.word_parts.append(newWord)
self.transkription_positions = []
self.line_number = -1
self.deleted = False
elif len(self.word_parts) > 0:
self.word_parts, none = execute_function_on_parts(self.word_parts, 'partition_according_to_deletion')
elif not self.deleted\
and len(self.transkription_positions) > 0\
and self.transkription_positions[0].deleted:
self.deleted = True
def partition_according_to_writing_process_id(self):
"""Partition a word according to its transkription_positions' writing_process_ids
->split word and add partial words as its parts.
"""
if self.belongs_to_multiple_writing_processes():
last_writing_process_id = -1
transkription_positions = []
for transkription_position in self.transkription_positions:
if transkription_position.writing_process_id != last_writing_process_id\
and len(transkription_positions) > 0:
newWord = Word(id=len(self.word_parts), line_number=self.line_number,\
transkription_positions=transkription_positions, writing_process_id=last_writing_process_id)
self.word_parts.append(newWord)
transkription_positions = []
transkription_positions.append(transkription_position)
last_writing_process_id = transkription_position.writing_process_id
if len(transkription_positions) > 0:
newWord = Word(id=len(self.word_parts), line_number=self.line_number,\
transkription_positions=transkription_positions, writing_process_id=last_writing_process_id)
self.word_parts.append(newWord)
self.transkription_positions = []
elif len(self.word_parts) > 0:
self.word_parts, none = execute_function_on_parts(self.word_parts, 'partition_according_to_writing_process_id')
if self.belongs_to_multiple_writing_processes(include_parts=True):
self.writing_process_id = sorted(set([ word.writing_process_id for word in self.word_parts ]), reverse=True)[0]
elif len(self.transkription_positions) > 0:
self.writing_process_id = self.transkription_positions[0].writing_process_id
def process_boxes(self, box_paths, tr_xmin=0.0, tr_ymin=0.0, previous_word_has_box=False):
"""Determines whether word is over a word box.
"""
word_over_box = None
if len(self.word_parts) > 0:
for word in self.word_parts:
current_word = word.process_boxes(box_paths, tr_xmin=tr_xmin, tr_ymin=tr_ymin, previous_word_has_box=(word_over_box is not None))
if current_word is not None and current_word.word_box is not None:
word_over_box = current_word
else:
new_tp_dict = {}
for index, transkription_position in enumerate(self.transkription_positions):
if previous_word_has_box and index == 0:
if len(transkription_position.positional_word_parts) > 0:
transkription_position.positional_word_parts[0].left += transkription_position.positional_word_parts[0].width/2
#print(f'{self.text}: {transkription_position.positional_word_parts[0].left}')
else:
transkription_position.left += 1
word_path = Path.create_path_from_transkription_position(transkription_position,\
tr_xmin=tr_xmin, tr_ymin=tr_ymin)
containing_boxes = [ box_path for box_path in box_paths\
if word_path.is_partially_contained_by(box_path)\
or box_path.do_paths_intersect(word_path) ]
if len(containing_boxes) > 0:
if previous_word_has_box:
print(f'{self.text}: {word_path.path.bbox()} {containing_boxes[0].path.bbox()}')
self._set_box_to_transkription_position(containing_boxes[0], word_path,\
transkription_position, new_tp_dict, tr_xmin)
box_paths.remove(containing_boxes[0])
for replace_tp in new_tp_dict.keys():
for tp in new_tp_dict.get(replace_tp):
self.transkription_positions.insert(self.transkription_positions.index(replace_tp), tp)
self.transkription_positions.remove(replace_tp)
word_over_box = self._get_partial_word_over_box()
update_transkription_position_ids(self)
return word_over_box
def set_word_insertion_mark(self, word_insertion_mark):
"""Sets word_insertion_mark
"""
self.word_insertion_mark = word_insertion_mark
def set_writing_process_id_to_transkription_positions(self, page):
"""Determines the writing process id of the transkription_positions.
"""
for transkription_position in self.transkription_positions:
if len(transkription_position.positional_word_parts) > 0:
for font_key in transkription_position.positional_word_parts[0].style_class.split(' '):
if font_key in page.fontsizekey2stage_mapping.keys():
transkription_position.writing_process_id = page.fontsizekey2stage_mapping.get(font_key)
def simplify_transkription_positions(self):
"""Merge transkription_positions if possible.
"""
index = len(self.transkription_positions)-1
while index > 0\
and False not in [ 'positional_word_parts' in tp.__dict__.keys() for tp in self.transkription_positions ]:
current_tp = self.transkription_positions[index]
index -= 1
previous_tp = self.transkription_positions[index]
if previous_tp.is_mergebale_with(current_tp):
positional_word_parts = previous_tp.positional_word_parts
positional_word_parts += current_tp.positional_word_parts
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(\
positional_word_parts, debug_msg_string='simplifying transkription positions', transkription_position_id=previous_tp.id)
if len(transkription_positions) == 1:
transkription_positions[0].writing_process_id = previous_tp.writing_process_id\
if previous_tp.writing_process_id != -1\
else current_tp.writing_process_id
self.transkription_positions.pop(index+1)
self.transkription_positions[index] = transkription_positions[0]
#print(self.text, len(self.transkription_positions))
def split(self, split_string, start_id=0):
"""Splits the word and returns an 3-tuple of new words.
"""
previousString, currentString, nextString = self.text.partition(split_string)
currentWord = None
previousWord = None
nextWord = None
previousIndex = 0
current_id = start_id
all_positional_word_parts = []
for position in self.transkription_positions:
all_positional_word_parts += position.positional_word_parts
if len(all_positional_word_parts) == 0:
warnings.warn('ATTENTION: Word: {} {} with Strings "{}, {}, {}": there are no parts!'.format(self.id, self.text, previousString, currentString, nextString))
if len(previousString) > 0:
previous_pwps = []
while previousIndex < len(all_positional_word_parts) and previousString != ''.join([ pwp.text for pwp in previous_pwps ]):
previous_pwps.append(all_positional_word_parts[previousIndex])
previousIndex += 1
if previousString != ''.join([ pwp.text for pwp in previous_pwps ]):
warnings.warn('ATTENTION: "{}" does not match a word_part_obj!'.format(previousString))
else:
previous_transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(previous_pwps, debug_msg_string='word.split')
previous_text = ''.join([ pwp.text for pwp in previous_pwps ])
previousWord = Word(text=previous_text, id=current_id, line_number=self.line_number, transkription_positions=previous_transkription_positions)
current_id += 1
all_positional_word_parts = all_positional_word_parts[previousIndex:]
if len(nextString) > 0:
tmp_pwps = []
index = 0
while index < len(all_positional_word_parts) and currentString != ''.join([ pwp.text for pwp in tmp_pwps ]):
tmp_pwps.append(all_positional_word_parts[index])
index += 1
if currentString != ''.join([ pwp.text for pwp in tmp_pwps ]):
warnings.warn('ATTENTION: "{}" does not match a word_part_obj!'.format(currentString))
else:
next_pwps = all_positional_word_parts[index:]
next_transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(next_pwps, debug_msg_string='word.split')
next_text = ''.join([ pwp.text for pwp in next_pwps ])
nextWord = Word(text=next_text, id=current_id+1, line_number=self.line_number, transkription_positions=next_transkription_positions)
all_positional_word_parts = all_positional_word_parts[:index]
current_transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(all_positional_word_parts, debug_msg_string='word.split')
current_text = ''.join([ pwp.text for pwp in all_positional_word_parts ])
currentWord = Word(text=current_text, id=current_id, line_number=self.line_number, transkription_positions=current_transkription_positions)
return previousWord, currentWord, nextWord
def split_according_to_status(self, status, splits_are_parts=False):
"""Split a word according to its transkription_positions' text.
:return: a list of new word.Word
"""
new_words = []
if self.has_mixed_status(status):
last_status = None
transkription_positions = []
for transkription_position in self.transkription_positions:
if transkription_position.__dict__[status] != last_status\
and len(transkription_positions) > 0:
new_words.append(\
self._create_new_word(transkription_positions, status, new_id=self.id+len(new_words)))
transkription_positions = []
transkription_positions.append(transkription_position)
last_status = transkription_position.__dict__[status]
if len(transkription_positions) > 0:
new_words.append(\
self._create_new_word(transkription_positions, status, new_id=self.id+len(new_words)))
if splits_are_parts:
self.word_parts += new_words
if len(self.word_parts) > 0:
self.transkription_positions = []
return new_words
def undo_partitioning(self):
"""Undo partitioning.
"""
if len(self.word_parts) > 0:
for word_part in self.word_parts:
word_part.undo_partitioning()
if self.text != ''.join([ tp.get_text() for tp in self.transkription_positions ]):
self.transkription_positions += word_part.transkription_positions
self.earlier_version = None
self.edited_text = None
self.word_box = None
self.word_parts = []
self.corrections = []
self.earlier_versions = []
self.box_paths = []
def _create_new_word(self, transkription_positions, status, new_id=0):
"""Create a new word from self and transkription_positions.
"""
newWord = Word(id=new_id, transkription_positions=transkription_positions)
for key in self.COPY_PROPERTY_KEY:
if key != status and key in self.__dict__.keys():
newWord.__dict__[key] = self.__dict__[key]
if status in self.APPEND_PROPERTY2LIST_SOURCE_TARGET_KEYS.keys():
newWord.__dict__[self.APPEND_PROPERTY2LIST_SOURCE_TARGET_KEYS[status]].append(transkription_positions[0].__dict__[status])
else:
newWord.__dict__[status] = transkription_positions[0].__dict__[status]
return newWord
def _get_parts_with_property_key(self, property_key):
"""Return a list of word_parts with property == property_key.
"""
word_parts = []
for word_part in self.word_parts:
if property_key in word_part.__dict__.keys():
word_parts.append(word_part)
else:
word_parts += word_part._get_parts_with_property_key(property_key)
return word_parts
def _get_partial_word_over_box(self):
"""Partition a word according to its transkription_positions' has_box
->split word and add partial words as its parts.
:return: word over box or self
"""
word_over_box = None
if self.has_mixed_status('has_box'):
transkription_positions = []
last_word_box = None
for transkription_position in self.transkription_positions:
if transkription_position.has_box != last_word_box\
and len(transkription_positions) > 0:
newWord = Word(id=len(self.word_parts), line_number=self.line_number,\
transkription_positions=transkription_positions, deleted=self.deleted, writing_process_id=self.writing_process_id)
self.word_parts.append(newWord)
if last_word_box is not None:
word_over_box = newWord
word_over_box.word_box = last_word_box
transkription_positions = []
transkription_positions.append(transkription_position)
last_word_box = transkription_position.has_box
if len(transkription_positions) > 0:
newWord = Word(id=len(self.word_parts), line_number=self.line_number,\
transkription_positions=transkription_positions, deleted=self.deleted, writing_process_id=self.writing_process_id)
self.word_parts.append(newWord)
if last_word_box is not None:
word_over_box = newWord
word_over_box.word_box = last_word_box
self.transkription_positions = []
elif len(self.word_parts) > 0:
#self.word_parts, word_over_box = execute_function_on_parts(self.word_parts, inspect.currentframe().f_code.co_name) #'get_partial_word_over_box')
for word_part in self.word_parts:
if word_over_box is None:
word_over_box = word_part._get_partial_word_over_box()
else:
break
elif len([ tp for tp in self.transkription_positions if tp.has_box is not None]) == 1:
word_over_box = self
word_over_box.word_box = [ tp for tp in self.transkription_positions if tp.has_box is not None][0].has_box
return word_over_box
def _set_box_to_transkription_position(self, box_path, word_path, transkription_position, new_transkription_positions_dictionary, tr_xmin):
"""Set box_path to transkription_position that is contained by box_path.
Create new transkription_positions by splitting old ones if necessaryand add them to new_transkription_positions_dictionary.
"""
if box_path.contains_path(word_path):
transkription_position.has_box = box_path
elif box_path.contains_start_of_path(word_path):
split_position = box_path.path.bbox()[1] - tr_xmin
new_tps = transkription_position.split(split_position)
if len(new_tps) == 2:
new_tps[0].has_box = box_path
new_transkription_positions_dictionary.update({ transkription_position: new_tps })
else:
transkription_position.has_box = box_path
elif box_path.contains_end_of_path(word_path):
split_position = box_path.path.bbox()[0] - tr_xmin
new_tps = transkription_position.split(split_position)
if len(new_tps) == 2:
new_tps[1].has_box = box_path
new_transkription_positions_dictionary.update({ transkription_position: new_tps })
else:
transkription_position.has_box = box_path
else: # box_path in the middle of word_pathz
split_position1 = box_path.path.bbox()[0] - tr_xmin
split_position2 = box_path.path.bbox()[1] - tr_xmin
new_tps = transkription_position.split(split_position1, split_position2)
if len(new_tps) >= 2:
new_tps[1].has_box = box_path
new_transkription_positions_dictionary.update({ transkription_position: new_tps })
else:
transkription_position.has_box = box_path
def do_paths_intersect_saveMode(mypath1, mypath2):
"""Returns true if paths intersect, false if not or if there was an exception.
"""
try:
return mypath1.path.intersect(mypath2.path, justonemode=True)\
or mypath1.is_partially_contained_by(mypath2)
except AssertionError:
return False
Index: svgscripts/extract_line_continuation.py
===================================================================
--- svgscripts/extract_line_continuation.py (revision 99)
+++ svgscripts/extract_line_continuation.py (revision 100)
@@ -1,219 +1,222 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to extract line continuations.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
import getopt
import lxml.etree as ET
import re
import sys
from os import listdir, sep, path
from os.path import isfile, isdir, dirname
import warnings
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from datatypes.box import text_node_is_inside_match_box, tspan_node_is_inside_match_box
from datatypes.line import Line
from datatypes.line_continuation import LineContinuation
from datatypes.matrix import Matrix
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.reference import Reference
from datatypes.transkriptionField import TranskriptionField
from util import back_up
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
UNITTESTING = False
DEBUG = False
def extract_line_continuations(page: Page, svg_file=None, warning_message='WARNING'):
"""Extract line continuations.
"""
if svg_file is None:
if page.source is None or not isfile(page.source):
raise Exception('Function "extract_line_continuations" needs a page with a valid source or a svg_file!')
svg_file = page.source
if not UNITTESTING:
print(Fore.CYAN + f'Extracting line continuations on {page.title}, {page.number} ...' + Style.RESET_ALL)
svg_tree = ET.parse(svg_file)
transkription_field = TranskriptionField(svg_file)
page.update_line_number_area(transkription_field, svg_tree=svg_tree)
for line in page.lines: line.editor_comments = []
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
arrow_style_key = [ key for key, value in page.style_dict.items() if value.get('font-family') == 'Frutiger-Europeen'][0]\
if len([ key for key, value in page.style_dict.items() if value.get('font-family') == 'Frutiger-Europeen']) > 0\
else None
if arrow_style_key is not None:
for arrow in _extract_arrow_nodes(svg_tree, arrow_style_key, transkription_field, namespaces):
matrix = Matrix(transform_matrix_string=arrow.get('transform'))\
if not arrow.tag.endswith('tspan')\
else Matrix(transform_matrix_string=arrow.getparent().get('transform'))
line = _get_line_of_arrow(arrow, page, transkription_field)
if line is not None:
reference_counter = 0
reference = None
while reference is None and reference_counter < 2:
reference = _get_reference(svg_tree, arrow, matrix, transkription_field, namespaces, is_from_reference=(reference_counter==0))
reference_counter += 1
if reference is not None:
line.editor_comments.append(LineContinuation(reference=reference, to_reference=(reference_counter>1)))
else:
to_reference = (matrix.getX() > transkription_field.xmax)
line.editor_comments.append(LineContinuation(reference=Reference(), to_reference=to_reference))
else:
y = round(matrix.getY() - transkription_field.ymin, 2)
warnings.warn(f'{warning_message}: There is no line for {y}')
for line in page.lines: line.attach_object_to_tree(page.page_tree)
if not UNITTESTING:
write_pretty(xml_element_tree=page.page_tree, file_name=page.page_tree.docinfo.URL,\
script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
def _extract_arrow_nodes(svg_tree: ET.ElementTree, arrow_style_key: str, transkription_field=None, namespaces=None) ->list:
"""Extract arrow nodes from svg_tree.
"""
if transkription_field is None:
transkription_field = TranskriptionField(svg_tree.docinfo.URL)
if namespaces is None:
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
return [ arrow for arrow in svg_tree.xpath('//ns:text[contains(@class, "{0}")]'.format(arrow_style_key)\
+ '|//ns:tspan[contains(@class, "{0}")]'.format(arrow_style_key),\
namespaces=namespaces)\
if arrow.text == ')' and node_is_on_marginalia(arrow, transkription_field) ]
def _get_arrow_y(arrow: ET.Element, matrix=None) ->float:
"""Return y of arrow node.
"""
if matrix is None:
matrix = Matrix(transform_matrix_string=arrow.get('transform'))\
if not arrow.tag.endswith('tspan')\
else Matrix(transform_matrix_string=arrow.getparent().get('transform'))
if arrow.tag.endswith('tspan'):
return matrix.add2Y(add_to_y=arrow.get('y'))
else:
return matrix.getY()
def _get_line_of_arrow(arrow: ET.Element, page: Page, transkription_field: TranskriptionField, matrix=None) ->Line:
"""Return Line next to arrow.
"""
arrow_y = _get_arrow_y(arrow, matrix=matrix)
line_number = page.get_line_number(round(arrow_y - transkription_field.ymin, 2) -.5)
lines = [ line for line in page.lines if line.id == line_number ]
if len(lines) > 0:
return lines[0]
return None
def _get_reference(svg_tree: ET.ElementTree, arrow: ET.Element, arrow_matrix: Matrix, transkription_field: TranskriptionField, namespaces: dict, is_from_reference=True) ->Reference:
"""Return reference.
"""
reference = None
arrow_left = arrow_matrix.add2X(add_to_x=arrow.get('x'))\
if arrow.tag.endswith('tspan')\
else arrow_matrix.getX()
arrow_y = _get_arrow_y(arrow, matrix=arrow_matrix)
xmin = 0\
if arrow_left < transkription_field.xmin\
else transkription_field.xmax + transkription_field.line_number_area_width
xmax = arrow_left
ymin = arrow_y -5
ymax = arrow_y +5
if not is_from_reference:
xmin = xmax
xmax = transkription_field.xmin - transkription_field.line_number_area_width\
if arrow_left < transkription_field.xmin\
else transkription_field.documentWidth + transkription_field.line_number_area_width
text_nodes_on_arrow_line = sorted([ text_node for text_node in svg_tree.xpath('//ns:text', namespaces=namespaces)\
if text_node != arrow and text_node_is_inside_match_box(text_node, xmin, xmax, ymin, ymax) ],\
key=lambda node: Matrix(transform_matrix_string=node.get('transform')).getX())
reference_string = ''
for text_node in text_nodes_on_arrow_line:
reference_string += ''.join([ child.text for child in text_node.getchildren()])\
if len(text_node.getchildren()) > 0\
else text_node.text
if reference_string != '':
- reference = Reference.create_cls(reference_string=reference_string)
+ try:
+ reference = Reference.create_cls(reference_string=reference_string)
+ except Exception:
+ print(reference_string)
return reference
def node_is_on_marginalia(node: ET.Element, transkription_field: TranskriptionField) ->bool:
"""Return true if node is on marginalia.
"""
if node.tag.endswith('tspan'):
return tspan_node_is_inside_match_box(node, 0, transkription_field.xmin, transkription_field.ymin, transkription_field.ymax)\
or tspan_node_is_inside_match_box(node, transkription_field.xmax, transkription_field.documentWidth, transkription_field.ymin, transkription_field.ymax)
return text_node_is_inside_match_box(node, 0, transkription_field.xmin, transkription_field.ymin, transkription_field.ymax)\
or text_node_is_inside_match_box(node, transkription_field.xmax, transkription_field.documentWidth, transkription_field.ymin, transkription_field.ymax)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to extract the line continuations.
svgscripts/extract_line_continuation.py [OPTIONS]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help" ])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
if len(args) < 1:
usage()
return 2
exit_status = 0
file_a = args[0]
if isfile(file_a):
manuscript_file = file_a\
if xml_has_type(FILE_TYPE_XML_MANUSCRIPT, xml_source_file=file_a)\
else None
counter = 0
for page in Page.get_pages_from_xml_file(file_a, status_contains=STATUS_MERGED_OK):
if not UNITTESTING:
back_up(page, page.xml_file)
extract_line_continuations(page)
counter += 1
not UNITTESTING and print(Style.RESET_ALL + f'[{counter} pages processed]')
else:
raise FileNotFoundError('File {} does not exist!'.format(file_a))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/convert_wordPositions.py
===================================================================
--- svgscripts/convert_wordPositions.py (revision 99)
+++ svgscripts/convert_wordPositions.py (revision 100)
@@ -1,557 +1,557 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to convert the word positions to HTML for testing purposes.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import cairosvg
import getopt
import json
from lxml.html import builder as E
from lxml.html import open_in_browser
import lxml
from pathlib import Path as PathLibPath
from os import sep, listdir, mkdir, path, remove
from os.path import exists, isfile, isdir, dirname
import re
import sys
from svgpathtools import svg_to_paths
import xml.etree.ElementTree as ET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.matrix import Matrix
from datatypes.page import Page
from datatypes.page_creator import PageCreator
from datatypes.transkriptionField import TranskriptionField
from datatypes.writing_process import WritingProcess
from datatypes.word import Word
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
class Converter:
"""The converter super class.
"""
def __init__(self, page, non_testing=True, show_word_insertion_mark=False):
self.page = page
self.non_testing = non_testing
self.show_word_insertion_mark = show_word_insertion_mark
def _get_transkription_positions(self, transkription_positions, stage_version=''):
"""Returns the transkription_positions of the indicated stage_version.
"""
convertable_transkription_positions = transkription_positions
if stage_version != '':
convertable_transkription_positions = []
if re.match(r'^\d$', stage_version):
writing_process_id = int(stage_version)
for transkription_position in transkription_positions:
if transkription_position.writing_process_id == writing_process_id:
convertable_transkription_positions.append(transkription_position)
elif re.match(r'^\d\+$', stage_version):
version_range = [ *range(int(stage_version.replace('+','')), len(WritingProcess.VERSION_DESCRIPTION)) ]
for transkription_position in transkription_positions:
if transkription_position.writing_process_id in version_range:
convertable_transkription_positions.append(transkription_position)
elif re.match(r'^\d\-\d$', stage_version):
start_stop = [ int(i) for i in re.split(r'-', stage_version) ]
version_range = [ *range(start_stop[0], start_stop[1]+1) ]
for transkription_position in transkription_positions:
if transkription_position.writing_process_id in version_range:
convertable_transkription_positions.append(transkription_position)
return convertable_transkription_positions
def _get_words(self, words, highlighted_words=None):
"""Return the words that will be hightlighted.
"""
return highlighted_words if highlighted_words is not None else words
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Prints all words.
"""
first_word_of_line = None
out = sys.stdout
if output_file is not None:
out = open(output_file, 'w')
for word in self.page.words:
if first_word_of_line is None or first_word_of_line.line_number != word.line_number:
out.write('\n')
first_word_of_line = word
if word.line_number % 2 == 0:
out.write(str(word.line_number).zfill(2) + ' ')
else:
out.write(' ')
if stage_version == '' or len(self._get_transkription_positions(word.transkription_positions, stage_version=stage_version)) > 0:
if word.text is not None:
out.write(word.text + ' ')
out.close()
return 0
@classmethod
def CREATE_CONVERTER(cls, page, non_testing=True, converter_type='', show_word_insertion_mark=False, key=''):
"""Returns a converter of type converter_type.
[:return:] SVGConverter for 'SVG', HTMLConverter for 'HTML', Converter for None
"""
cls_dict = { subclass.__name__: subclass for subclass in cls.__subclasses__() }
cls_key = converter_type + 'Converter'
if bool(cls_dict.get(cls_key)):
converter_cls = cls_dict[cls_key]
if converter_cls == JSONConverter:
return converter_cls(page, non_testing, key=key)
return converter_cls(page, non_testing, show_word_insertion_mark)
else:
return Converter(page, non_testing, show_word_insertion_mark)
class JSONConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a json file.
"""
PY2TS_DICT = { float: 'number', int: 'number', bool: 'boolean', str: 'string' }
def __init__(self, page, non_testing=True, key=''):
Converter.__init__(self, page, non_testing, False)
self.key = key
self.interface_output_dir = PathLibPath('ts_interfaces')
if not self.interface_output_dir.is_dir():
self.interface_output_dir.mkdir()
elif len(list(self.interface_output_dir.glob('*.ts'))) > 0:
for ts_file in self.interface_output_dir.glob('*.ts'):
remove(ts_file)
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to JSON.
"""
if output_file is None:
output_file = 'output.json'
class_dict = {}
if self.key != '':
object_instance = self.page.__dict__.get(self.key)
if object_instance is not None:
json_dict = self.add_object2dict(object_instance, class_dict)
if type(json_dict) == list:
json_dict = { self.key : json_dict }
else:
print(f'Page initialized from {self.page.page_tree.docinfo.URL} does not have an object at "{self.key}"!')
return 2
else:
json_dict = self.add_object2dict(self.page, class_dict)
json_file = open(output_file, "w+")
try:
json.dump(json_dict, json_file)
except Exception:
raise Exception('Error in json.dump')
json_file.close()
self.create_imports(class_dict)
return 0
def add_object2dict(self, object_instance, class_dict):
"""Add an object to json_dict and generate json data and interfaces.
[:return:] json dict or object_instance
"""
json_dict = {}
interface_list = []
object_type = type(object_instance)
if object_type.__module__ == 'builtins':
if object_type != list:
return object_instance
else:
items = []
for item in object_instance:
items.append(self.add_object2dict(item, class_dict))
if len(items) > 0:
return { self.key: items }
else:
return { self.key: 'null' }
semantic_dictionary = object_type.get_semantic_dictionary()
for key, content_type in [ (key, content.get('class')) for key, content in semantic_dictionary['properties'].items()]:
content = object_instance.__dict__.get(key)
if content_type == list\
and content is not None\
and len(content) > 0\
and type(content[0]).__module__ != 'builtins':
content_list = []
for content_item in content:
content_list.append(self.add_object2dict(content_item, class_dict))
json_dict.update({key: content_list})
interface_list.append(f'{key}: {type(content[0]).__name__}[];')
elif content_type.__module__ == 'builtins':
if content_type != list:
ts_type = self.PY2TS_DICT[content_type]\
if content_type in self.PY2TS_DICT.keys()\
else 'string'
interface_list.append(f'{key}: {ts_type};')
json_dict.update({key: content})
else:
if content is not None and type(content) == list:
interface_list.append(f'{key}: {content_type.__name__}[];')
content_list = []
for content_item in content:
content_list.append(self.add_object2dict(content_item, class_dict))
json_dict.update({key: content_list})
else:
interface_list.append(f'{key}: {content_type.__name__};')
if content is not None:
json_dict.update({key: self.add_object2dict(content, class_dict)})
if object_type not in class_dict.keys():
class_dict.update({object_type: self.create_interface(object_type.__name__, interface_list)})
return json_dict
def create_imports(self, class_dict):
"""Create an ts interface from a list of key and content_types.
[:return:] file_name of interface
"""
ts_file = PathLibPath('ts_imports.ts')
file = open(ts_file, "w+")
file.write(f'//import all interfaces from {self.interface_output_dir} ' + '\n')
for interface_name, path_name in class_dict.items() :
file.write('import {' + interface_name.__name__ + '} from \'./' + str(self.interface_output_dir.joinpath(path_name.stem)) + '\';\n')
file.close()
return ts_file
def create_interface(self, class_name, interface_list) -> PathLibPath:
"""Create an ts interface from a list of key and content_types.
[:return:] file_name of interface
"""
ts_file = self.interface_output_dir.joinpath(PathLibPath(f'{class_name.lower()}.ts'))
import_list = [ import_class_name for import_class_name in\
[ import_class_name.split(': ')[1].replace(';','').replace('[]','') for import_class_name in interface_list ]\
if import_class_name not in set(self.PY2TS_DICT.values()) ]
file = open(ts_file, "w")
for import_class_name in set(import_list):
file.write('import {' + import_class_name + '} from \'./' + import_class_name.lower() + '\';\n')
file.write(f'export interface {class_name} ' + '{\n')
for interace_string in interface_list:
file.write(f'\t' + interace_string + '\n')
file.write('}')
file.close()
return ts_file
class SVGConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a svg file that combines text as path and text-as-text.
"""
BG_COLOR = 'yellow'
OPACITY = '0.2'
def __init__(self, page, non_testing=True, show_word_insertion_mark=False, bg_color=BG_COLOR, opacity=OPACITY):
Converter.__init__(self, page, non_testing, show_word_insertion_mark)
self.bg_color = bg_color
self.opacity = opacity
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to SVG
"""
title = self.page.title if(self.page.title is not None) else 'Test Page'
title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title
svg_file = self.page.svg_file
if svg_file is None and self.page.svg_image is not None:
svg_file = self.page.svg_image.file_name
elif svg_file is None:
msg = f'ERROR: xml_source_file {self.page.docinfo.URL} does neither have a svg_file nor a svg_image!'
raise Exception(msg)
transkription_field = TranskriptionField(svg_file)
if bool(transkription_field.get_svg_attributes('xmlns')):
ET.register_namespace('', transkription_field.get_svg_attributes('xmlns'))
if bool(transkription_field.get_svg_attributes('xmlns:xlink')):
ET.register_namespace('xlink', transkription_field.get_svg_attributes('xmlns:xlink'))
svg_tree = ET.parse(svg_file)
transkription_node = ET.SubElement(svg_tree.getroot(), 'g', attrib={'id': 'Transkription'})
colors = [ 'yellow', 'orange' ] if self.bg_color == self.BG_COLOR else [ self.bg_color ]
if highlighted_words is not None:
colors = ['yellow']
else:
highlighted_words = []
color_index = 0
for word in self.page.words:
word_id = 'word_' + str(word.id)
for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version):
transkription_position_id = word_id + '_' + str(transkription_position.id)
color = colors[color_index] if word not in highlighted_words else self.bg_color
rect_node = ET.SubElement(transkription_node, 'rect',\
attrib={'id': transkription_position_id, 'x': str(transkription_position.left + transkription_field.xmin),\
'y': str(transkription_position.top + transkription_field.ymin), 'width': str(transkription_position.width),\
'height': str(transkription_position.height), 'fill': color, 'opacity': self.opacity})
if transkription_position.transform is not None:
matrix = transkription_position.transform.clone_transformation_matrix()
matrix.matrix[Matrix.XINDEX] = round(transkription_position.transform.matrix[Matrix.XINDEX] + transkription_field.xmin, 3)
matrix.matrix[Matrix.YINDEX] = round(transkription_position.transform.matrix[Matrix.YINDEX] + transkription_field.ymin, 3)
rect_node.set('transform', matrix.toString())
rect_node.set('x', str(round(transkription_position.left - transkription_position.transform.matrix[Matrix.XINDEX], 3)))
rect_node.set('y', str(round((transkription_position.height-1.5)*-1, 3)))
ET.SubElement(rect_node, 'title').text = word.text
color_index = (color_index + 1) % len(colors)
if output_file is not None:
svg_tree.write(output_file)
return 0
class HTMLConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a test HTML file.
"""
CSS = """ .highlight0 { background-color: yellow; opacity: 0.2; }
.highlight1 { background-color: pink; opacity: 0.2; }
.highlight2 { background-color: red; opacity: 0.2; }
.foreign { background-color: blue; opacity: 0.4; }
.overwritten { background-color: green; opacity: 0.4; }
.word-insertion-mark { background-color: orange; opacity: 0.2; }
.deleted { background-color: grey; opacity: 0.2; }
"""
def __init__(self, page, non_testing=True, show_word_insertion_mark=False):
Converter.__init__(self, page, non_testing, show_word_insertion_mark)
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to HTML
"""
title = self.page.title if(self.page.title is not None) else 'Test Page'
title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title
if stage_version != '':
title = title + ', Schreibstufe: ' + stage_version
if self.page.svg_image is not None:
width = self.page.svg_image.width
height = self.page.svg_image.height
svg_file = self.page.svg_image.file_name
elif self.page.svg_file is not None:
svg_file = self.page.svg_file
transkription_field = TranskriptionField(svg_file)
width = transkription_field.getWidth()
height = transkription_field.getHeight()
style_content = ' position: relative; width: {}px; height: {}px; background-image: url("{}"); background-size: {}px {}px '\
.format(width, height, path.abspath(svg_file), width, height)
style = E.STYLE('#transkription {' + style_content + '}', HTMLConverter.CSS)
head = E.HEAD(E.TITLE(title),E.META(charset='UTF-8'), style)
transkription = E.DIV(id="transkription")
counter = 0
for word in self.page.words:
highlight_class = 'highlight' + str(counter)\
if not word.deleted else 'deleted'
if highlighted_words is not None\
and word in highlighted_words:
highlight_class = 'highlight2'
earlier_text = '' if word.earlier_version is None else word.earlier_version.text
if earlier_text == '' and len(word.word_parts) > 0:
earlier_versions = [ word for word in word.word_parts if word.earlier_version is not None ]
earlier_text = earlier_versions[0].text if len(earlier_versions) > 0 else ''
if earlier_text != '':
word_title = 'id: {}/line: {}\n0: {}\n1: {}'.format(str(word.id), str(word.line_number), earlier_text, word.text)
else:
word_title = 'id: {}/line: {}\n{}'.format(str(word.id), str(word.line_number), word.text)
if word.edited_text is not None:
word_title += f'\n>{word.edited_text}'
for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version):
self._append2transkription(transkription, highlight_class, word_title, transkription_position)
- if word.overwrites_word is not None and word.word_box is None:
+ if word.overwrites_word is not None:
overwritten_title = f'{word.text} overwrites {word.overwrites_word.text}'
for overwritten_transkription_position in word.overwrites_word.transkription_positions:
self._append2transkription(transkription, 'overwritten', overwritten_title, overwritten_transkription_position)
for part_word in word.word_parts:
highlight_class = 'highlight' + str(counter)\
if not part_word.deleted else 'deleted'
for part_transkription_position in self._get_transkription_positions(part_word.transkription_positions, stage_version=stage_version):
self._append2transkription(transkription, highlight_class, word_title, part_transkription_position)
if part_word.overwrites_word is not None:
overwritten_title = f'{word.text} overwrites {part_word.overwrites_word.text}'
for overwritten_transkription_position in part_word.overwrites_word.transkription_positions:
self._append2transkription(transkription, 'overwritten', overwritten_title, overwritten_transkription_position)
counter = (counter + 1) % 2
word_insertion_mark_class = 'word-insertion-mark'
counter = 0
for mark_foreign_hands in self.page.mark_foreign_hands:
highlight_class = 'foreign'
title = 'id: {}/line: {}\n{} {}'.format(str(mark_foreign_hands.id), str(word.line_number),\
mark_foreign_hands.foreign_hands_text, mark_foreign_hands.pen)
for transkription_position in mark_foreign_hands.transkription_positions:
self._append2transkription(transkription, highlight_class, title, transkription_position)
if self.show_word_insertion_mark:
for word_insertion_mark in self.page.word_insertion_marks:
wim_title = 'id: {}/line: {}\nword insertion mark'.format(str(word_insertion_mark.id), str(word_insertion_mark.line_number))
style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\
word_insertion_mark.top, word_insertion_mark.left, word_insertion_mark.width, word_insertion_mark.height)
link = E.A(' ', E.CLASS(word_insertion_mark_class), title=wim_title, style=style_content)
transkription.append(link)
html = E.HTML(head,E.BODY(transkription))
bool(self.non_testing) and open_in_browser(html)
if output_file is not None:
with open(output_file, 'wb') as f:
f.write(lxml.html.tostring(html, pretty_print=True, include_meta_content_type=True, encoding='utf-8'))
f.closed
return 0
def _append2transkription(self, transkription, highlight_class, title, transkription_position):
"""Append content to transkription-div.
"""
style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\
transkription_position.top, transkription_position.left, transkription_position.width, transkription_position.height)
if transkription_position.transform is not None:
style_content = style_content + ' transform: {}; '.format(transkription_position.transform.toCSSTransformString())
transform_origin_x = (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1\
if (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1 < 0 else 0
style_content = style_content + ' transform-origin: {}px {}px; '.format(transform_origin_x, transkription_position.height)
link = E.A(' ', E.CLASS(highlight_class), title=title, style=style_content)
transkription.append(link)
def create_pdf_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, pdf_file_name='output.pdf', bg_color=SVGConverter.BG_COLOR):
"""Creates a pdf file highlighting some words.
"""
if not pdf_file_name.endswith('pdf'):
pdf_file_name = pdf_file_name + '.pdf'
tmp_svg_file = pdf_file_name.replace('.pdf', '.svg')
create_svg_with_highlighted_words(xml_source_file=xml_source_file, page=page, highlighted_words=highlighted_words,\
svg_file_name=tmp_svg_file, bg_color=bg_color)
if isfile(tmp_svg_file):
cairosvg.svg2pdf(url=tmp_svg_file, write_to=pdf_file_name)
remove(tmp_svg_file)
def create_svg_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, svg_file_name='output.svg', bg_color=SVGConverter.BG_COLOR):
"""Creates a svg file highlighting some words.
"""
if page is None and xml_source_file is not None:
page = Page(xml_source_file)
converter = SVGConverter(page, bg_color=bg_color)
if not svg_file_name.endswith('svg'):
svg_file_name = svg_file_name + '.svg'
converter.convert(output_file=svg_file_name, highlighted_words=highlighted_words)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to convert the word positions to HTML, SVG or TEXT for testing purposes.
svgscripts/convert_wordPositions.py OPTIONS
OPTIONS:
-h|--help: show help
-H|--HTML [default] convert to HTML test file
-k|--key=key option for json converter:
only convert object == page.__dict__[key]
-o|--output=outputFile save output to file outputFile
-P|--PDF convert to PDF test file
-S|--SVG convert to SVG test file
-s|--svg=svgFile: svg web file
-T|--TEXT convert to TEXT output
-t|--text=text highlight word
-w|--word-insertion-mark show word insertion mark on HTML
-v|--version=VERSION show words that belong to writing process VERSION: { 0, 1, 2, 0-1, 0+, etc. }
-x|--testing execute in test mode, do not write to file or open browser
:return: exit code (int)
"""
convert_to_type = None
key = ''
non_testing = True
output_file = None
page = None
show_word_insertion_mark = False
stage_version = ''
svg_file = None
text = None
try:
opts, args = getopt.getopt(argv, "hk:t:HPSTws:o:v:x", ["help", "key=", "text=", "HTML", "PDF", "SVG", "TEXT", "word-insertion-mark", "svg=", "output=", "version=", "testing"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help') or not args:
usage()
return 0
elif opt in ('-v', '--version'):
if re.match(r'^(\d|\d\+|\d\-\d)$', arg):
stage_version = arg
else:
raise ValueError('OPTION -v|--version=VERSION does not work with "{}" as value for VERSION!'.format(arg))
elif opt in ('-w', '--word-insertion-mark'):
show_word_insertion_mark = True
elif opt in ('-P', '--PDF'):
convert_to_type = 'PDF'
elif opt in ('-S', '--SVG'):
convert_to_type = 'SVG'
elif opt in ('-T', '--TEXT'):
convert_to_type = 'TEXT'
elif opt in ('-H', '--HTML'):
convert_to_type = 'HTML'
elif opt in ('-x', '--testing'):
non_testing = False
elif opt in ('-s', '--svg'):
svg_file = arg
elif opt in ('-o', '--output'):
output_file = arg
elif opt in ('-k', '--key'):
key = arg
elif opt in ('-t', '--text'):
text = arg
print(arg)
if len(args) < 1:
usage()
return 2
if convert_to_type is None:
if output_file is not None and len(re.split(r'\.', output_file)) > 1:
output_file_part_list = re.split(r'\.', output_file)
convert_to_type = output_file_part_list[len(output_file_part_list)-1].upper()
else:
convert_to_type = 'HTML'
exit_code = 0
for word_position_file in args:
if not isfile(word_position_file):
print("'{}' does not exist!".format(word_position_file))
return 2
if convert_to_type == 'PDF':
if output_file is None:
output_file = 'output.pdf'
highlighted_words = None
if text is not None:
page = Page(word_position_file)
highlighted_words = [ word for word in page.words if word.text == text ]
create_pdf_with_highlighted_words(word_position_file, pdf_file_name=output_file, highlighted_words=highlighted_words)
else:
if svg_file is not None:
if isfile(svg_file):
page = PageCreator(word_position_file, svg_file=svg_file)
else:
print("'{}' does not exist!".format(word_position_file))
return 2
else:
page = Page(word_position_file)
if page.svg_file is None:
print('Please specify a svg file!')
usage()
return 2
highlighted_words = None
if text is not None:
highlighted_words = [ word for word in page.words if word.text == text ]
print([ (word.id, word.text) for word in highlighted_words ])
converter = Converter.CREATE_CONVERTER(page, non_testing=non_testing, converter_type=convert_to_type, show_word_insertion_mark=show_word_insertion_mark, key=key)
exit_code = converter.convert(output_file=output_file, stage_version=stage_version, highlighted_words=highlighted_words)
return exit_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: tests_svgscripts/test_process_words_post_merging.py
===================================================================
--- tests_svgscripts/test_process_words_post_merging.py (revision 99)
+++ tests_svgscripts/test_process_words_post_merging.py (revision 100)
@@ -1,171 +1,172 @@
import unittest
from os import sep, path, remove
from os.path import isdir, isfile, dirname
import shutil
import sys
import lxml.etree as ET
import warnings
import sys
sys.path.append('svgscripts')
import process_words_post_merging
from datatypes.faksimile import FaksimilePage
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page
from datatypes.path import Path
from datatypes.positional_word_part import PositionalWordPart
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.word import Word
from datatypes.word_position import WordPosition
class TestPostMerge(unittest.TestCase):
def setUp(self):
process_words_post_merging.UNITTESTING = True
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.faksimile_dir = DATADIR + sep + 'faksimile_svg'
self.manuscript = DATADIR + sep + 'N_VII_1.xml'
self.manuscript_copy = self.manuscript.replace('.', '_copy.')
self.faksimile_file = self.faksimile_dir + sep + 'N-VII-1,5et6.svg'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.Mp_XIV_1_mytest_421 = DATADIR + sep + 'Mp_XIV_1_mytest_421.xml'
self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.xml_merged = DATADIR + sep + 'N_VII_1_page005_faksimile_merged.xml'
@unittest.skip('takes long')
def test_main(self):
process_words_post_merging.main([self.manuscript])
#@unittest.skip('produces error')
def test_categorize_paths(self):
## :map :w:!python3 -m unittest tests_svgscripts.test_process_words_post_merging.TestPostMerge.test_categorize_paths
page = Page(self.pdf_xml)
page.source = self.pdf_xml_source
tr = TranskriptionField(page.source)
page.words = [ word for word in page.words if word.line_number == 33 ]
path_dict = process_words_post_merging.categorize_paths(page, tr)
self.assertEqual(True in [ word.deleted for word in page.words if word.id == 269 ], False)
self.assertEqual(len(path_dict.get('deletion_or_underline_paths')) > 0, True)
self.assertEqual(len(path_dict.get('box_paths')), 5)
words = [ word for word in page.words if word.text == 'seiner' ]
self.assertEqual(len(words), 1)
self.assertTrue(words[0].word_parts[0].overwrites_word is not None)
self.assertEqual(words[0].word_parts[0].overwrites_word.text, ')')
"""
print('starting ...')
page = Page('xml/W_II_1_page131.xml')
transkription_field = TranskriptionField(page.source)
process_words_post_merging.reset_page(page)
process_words_post_merging.find_special_words(page, transkription_field=transkription_field)
page.update_styles(partition_according_to_styles=True)
path_dict = process_words_post_merging.categorize_paths(page, transkription_field)
print('...ending')
"""
def test_find_special_words(self):
page = Page(self.xml_file)
process_words_post_merging.find_special_words(page)
self.assertEqual(len(page.mark_foreign_hands), 1)
self.assertEqual(page.mark_foreign_hands[0].foreign_hands_text, 'x')
page.update_and_attach_words2tree()
nodes = page.page_tree.xpath('//' + MarkForeignHands.XML_TAG)
page = Page(self.test_tcm_xml)
process_words_post_merging.find_special_words(page)
self.assertEqual(len(page.text_connection_marks), 1)
self.assertEqual(page.text_connection_marks[0].text_source.first_line, 2)
"""
page.update_and_attach_words2tree()
nodes = page.page_tree.xpath('//' + TextConnectionMark.XML_TAG)
print(ET.dump(nodes[0]))
"""
def test_process_word_boxes(self):
page = Page(self.pdf_xml) # W_I_8_page125.xml
page.source = self.pdf_xml_source
#page.words = [ page.words[30]]
page.update_styles(partition_according_to_styles=True)
tr = TranskriptionField(page.source)
box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\
'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\
'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\
'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\
'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315']
box_paths = [ Path(d_string=d_string) for d_string in box_path_d ]
process_words_post_merging.process_word_boxes(page, box_paths, tr)
words_with_boxes = [ word for word in page.words if word.word_box is not None\
or len([ part for part in word.word_parts if part.word_box is not None]) > 0]
expected_values = {'Aber': {'text': 'aber'}, 'seiner': {'text': ')'},\
'mit': { 'text': ','}, '(–': {'text': ':'}, 'Um': {'text': 'Denn'}}
self.assertEqual(len(words_with_boxes), len(expected_values.keys()))
references = [ words_with_boxes[0].earlier_version,\
words_with_boxes[1].word_parts[0].overwrites_word,\
words_with_boxes[2].word_parts[0].overwrites_word,\
words_with_boxes[3].word_parts[0].overwrites_word,\
words_with_boxes[4].overwrites_word ]
for index, key in enumerate(expected_values.keys()):
expected_values[key].update({'reference': references[index]})
for word in words_with_boxes:
self.assertEqual(expected_values[word.text].get('reference') is not None, True)
+ page = Page('xml/Mp_XIV_page416.xml')
@unittest.skip('relies on local file')
def test_process_word_boxes_multiple_boxes_perLIne(self):
page = Page('xml/N_VII_1_page034.xml')
page.update_styles(partition_according_to_styles=True)
page.words[205].word_parts[0].deleted = True
page.words[205].word_parts[3].deleted = True
tr = TranskriptionField(page.source)
box_path_d = ['M 69.497,460.726 L 81.959,460.726 L 81.959,467.404 L 69.497,467.404 L 69.497,460.726', 'M 65.997,461.974 L 68.084,461.974 L 68.084,467.277 L 65.997,467.277 L 65.997,461.974', 'M 191.939,423.806 L 197.602,423.806 L 197.602,431.817 L 191.939,431.817 L 191.939,423.806', 'M 47.048,245.659 L 63.779,245.659 L 63.779,252.795 L 47.048,252.795 L 47.048,245.659', 'M 180.995,89.054 L 188.23000000000002,89.054 L 188.23000000000002,95.515 L 180.995,95.515 L 180.995,89.054', 'M 142.367,90.315 L 149.72799999999998,90.315 L 149.72799999999998,95.515 L 142.367,95.515 L 142.367,90.315', 'M 133.745,90.143 L 137.48000000000002,90.143 L 137.48000000000002,95.554 L 133.745,95.554 L 133.745,90.143']
box_paths = [ Path(d_string=d_string) for d_string in box_path_d ]
process_words_post_merging.process_word_boxes(page, box_paths, tr)
words_with_boxes = [ word for word in page.words if word.word_box is not None\
or word.has_mixed_status('word_box', include_parts=True)]
expected_values = { 'großen': {'text': 'größtem'}, 'daß': {'text': 'dem'}, 'seine': {'text': 'ihre'},\
'Rococo-Geschmack': {'text': 'Rococo-geschmack'}, '(:': {'text': '–'}, 'und': {'text': 'es'} }
self.assertEqual(len(words_with_boxes), len(expected_values.keys()))
references = [ words_with_boxes[0].earlier_version,\
words_with_boxes[1].earlier_version,\
words_with_boxes[2].overwrites_word,\
words_with_boxes[3].earlier_version,\
words_with_boxes[4].word_parts[1].overwrites_word,\
words_with_boxes[5].overwrites_word ]
for index, key in enumerate(expected_values.keys()):
expected_values[key].update({'reference': references[index]})
for word in words_with_boxes:
if expected_values[word.text].get('reference') is None:
print(word.text, len(word.word_parts))
self.assertEqual(expected_values[word.text].get('reference') is not None, True)
self.assertEqual(expected_values[word.text].get('reference').text, expected_values[word.text].get('text'))
def test_update_faksimile_line_positions(self):
page = Page(self.xml_merged)
process_words_post_merging.update_faksimile_line_positions(page)
#for line_number in page.line_numbers: print(f'{line_number.id}: {line_number.faksimile_inner_top} {line_number.faksimile_inner_bottom}')
def test_update_writing_process_ids(self):
page = Page(self.pdf_xml)
page.words = [ word for word in page.words if word.text == 'Aber' and word.line_number == 2 ]
process_words_post_merging.update_writing_process_ids(page)
self.assertEqual(len(page.words[0].word_parts), 2)
self.assertEqual(page.words[0].word_parts[0].writing_process_id, 1)
self.assertEqual(page.words[0].word_parts[1].writing_process_id, 0)
@unittest.skip('takes long')
#@unittest.skipUnless(__name__ == "__main__", 'test takes too long, we do not run it with unittest discover')
def test_reset_page(self):
page = Page(self.pdf_xml)
page.source = self.pdf_xml_source
process_words_post_merging.post_merging_processing_and_saving(page=page)
numWordParts = 7
process_words_post_merging.post_merging_processing_and_saving(page=page)
self.assertEqual(len([ word for word in page.words if len(word.word_parts) > 0 ]), numWordParts)
process_words_post_merging.reset_page(page)
self.assertEqual(len([ word for word in page.words if word.earlier_version is not None ]), 0)
self.assertEqual(len([ word for word in page.words if len(word.word_parts) > 0 ]), 0)
if __name__ == "__main__":
unittest.main()
Index: fixes/test_fix_boxes.py
===================================================================
--- fixes/test_fix_boxes.py (revision 0)
+++ fixes/test_fix_boxes.py (revision 100)
@@ -0,0 +1,68 @@
+import lxml.etree as ET
+from os import sep, path, remove
+from os.path import isdir, isfile, dirname, basename
+import shutil
+import sys
+import tempfile
+import unittest
+import warnings
+
+import fix_boxes
+
+sys.path.append('svgscripts')
+from datatypes.faksimile import FaksimilePage
+from datatypes.mark_foreign_hands import MarkForeignHands
+from datatypes.box import Box
+from datatypes.page import Page
+from datatypes.path import Path
+from datatypes.positional_word_part import PositionalWordPart
+from datatypes.text_connection_mark import TextConnectionMark
+from datatypes.transkriptionField import TranskriptionField
+from datatypes.word import Word
+from datatypes.word_position import WordPosition
+from process_words_post_merging import MERGED_DIR
+
+
+class TestFixBoxes(unittest.TestCase):
+ def setUp(self):
+ fix_boxes.UNITTESTING = True
+ DATADIR = path.dirname(__file__) + sep + 'test_data'
+ self.fix_boxes = DATADIR + sep + 'Mp_XIV_page416.xml'
+ self.fix_boxes_src = DATADIR + sep + '03.svg'
+
+ def test_fix_boxes(self):
+ page = Page(self.fix_boxes)
+ page.source = self.fix_boxes_src
+ fixed_word_ids = sorted([ int(id) for id in set([ node.getparent().get('id') for node in page.page_tree.xpath('//' + Word.XML_TAG + f'/debug[@msg="{fix_boxes.DEBUG_MSG}"]')])])
+ self.assertEqual(len(fixed_word_ids), 2)
+ self.assertEqual(fix_boxes.fix_boxes(page), 0)
+ self.assertEqual(page.words[fixed_word_ids[0]].earlier_version.text, 'Wink.')
+ self.assertEqual(page.words[fixed_word_ids[1]].overwrites_word.text, 'ist')
+
+ def test_init_word_with_box(self):
+ page = Page(self.fix_boxes)
+ word_node = [ node.getparent() for node in page.page_tree.xpath('//' + Word.XML_TAG + f'/debug[@msg="{fix_boxes.DEBUG_MSG}"]')][0]
+ word = fix_boxes.WordWithBoxes.create_cls(word_node)
+ self.assertEqual(word.word_parts[1].text, 'endung')
+ self.assertTrue(word.word_parts[1].word_box is not None)
+ self.assertEqual(word.word_parts[2].text, ',')
+ self.assertTrue(word.word_parts[2].word_box is not None)
+ word_node = [ node.getparent() for node in page.page_tree.xpath('//' + Word.XML_TAG + f'/debug[@msg="{fix_boxes.DEBUG_MSG}"]')][2]
+ word = fix_boxes.WordWithBoxes.create_cls(word_node)
+ self.assertEqual(len(word.word_parts), 0)
+ """
+ tree = ET.Element('page')
+ word.attach_word_to_tree(tree)
+ print(ET.dump(tree))
+ """
+
+ def test_split_and_attach(self):
+ page = Page(self.fix_boxes)
+ word = [ word for word in page.words if word.text == 'Wendung,' ][0]
+ fix_boxes._split_into_parts_and_attach_box(word, 0, 'ink', True, 'endung')
+ self.assertEqual(word.word_parts[1].text, 'endung')
+ self.assertTrue(word.word_parts[1].word_box is not None)
+
+
+if __name__ == "__main__":
+ unittest.main()
Index: fixes/fix_boxes.py
===================================================================
--- fixes/fix_boxes.py (revision 0)
+++ fixes/fix_boxes.py (revision 100)
@@ -0,0 +1,215 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+""" This program can be used to process words after they have been merged with faksimile data.
+"""
+# Copyright (C) University of Basel 2019 {{{1
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see 1}}}
+
+from colorama import Fore, Style
+from deprecated import deprecated
+from functools import cmp_to_key
+import getopt
+import inspect
+import lxml.etree as ET
+import re
+import shutil
+import string
+from svgpathtools import svg2paths2, svg_to_paths
+from svgpathtools.path import Path as SVGPath
+from svgpathtools.path import Line
+import sys
+import tempfile
+from operator import attrgetter
+import os
+from os import listdir, sep, path, setpgrp, devnull
+from os.path import exists, isfile, isdir, dirname, basename
+from progress.bar import Bar
+import warnings
+
+from fix_old_data import save_page
+
+sys.path.append('svgscripts')
+from convert_wordPositions import HTMLConverter
+from datatypes.box import Box
+from datatypes.faksimile import FaksimilePage
+from datatypes.manuscript import ArchivalManuscriptUnity
+from datatypes.mark_foreign_hands import MarkForeignHands
+from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
+from datatypes.path import Path
+from datatypes.text_connection_mark import TextConnectionMark
+from datatypes.transkriptionField import TranskriptionField
+from datatypes.transkription_position import TranskriptionPosition
+from datatypes.word import Word, update_transkription_position_ids
+from join_faksimileAndTranskription import sort_words
+from util import back_up, back_up_svg_file, copy_faksimile_svg_file
+from process_files import update_svgposfile_status
+from process_words_post_merging import update_faksimile_line_positions, MERGED_DIR
+
+sys.path.append('shared_util')
+from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
+from main_util import create_function_dictionary
+
+
+__author__ = "Christian Steiner"
+__maintainer__ = __author__
+__copyright__ = 'University of Basel'
+__email__ = "christian.steiner@unibas.ch"
+__status__ = "Development"
+__license__ = "GPL v3"
+__version__ = "0.0.1"
+
+UNITTESTING = False
+MAX_SVG_XY_THRESHOLD = 10
+BOX_ERROR_STATUS = 'box error'
+DEBUG_MSG = 'TODO: should have a box'
+
+class WordWithBoxes(Word):
+
+ @classmethod
+ def create_cls(cls, word_node):
+ """Creates a word from a (lxml.Element) node.
+
+ [:return:] WordWithBoxes
+ """
+ word = super(WordWithBoxes,cls).create_cls(word_node)
+ word.missing_boxes = []
+ for index, debug_node in enumerate(word_node.xpath('./debug')):
+ missing_text = debug_node.get('text')
+ is_earlier_version = bool(debug_node.get('earlier-version'))\
+ and debug_node.get('earlier-version') == 'true'
+ overwritten_by = debug_node.get('overwritten-by')
+ if overwritten_by is not None:
+ _split_into_parts_and_attach_box(word, index, missing_text, is_earlier_version, overwritten_by)
+ else:
+ _attach_box(word, 0, missing_text, False)
+ word.create_correction_history()
+ if len(word.corrections):
+ for wp in word.word_parts:
+ wp.overwrites_word = None
+ return word
+
+def _attach_box(target_word, box_index, earlier_text, is_earlier_version):
+ """Attach box to word.
+ """
+ transkription_position = target_word.transkription_positions[0]
+ if len(target_word.transkription_positions) > 1:
+ positional_word_parts = []
+ for tp in target_word.transkription_positions:
+ positional_word_parts += tp.positional_word_parts
+ transkription_position = TranskriptionPosition(positional_word_parts=positional_word_parts)
+ target_word.word_box = Box(id=box_index, path=Path.create_path_from_transkription_position(transkription_position).path,\
+ earlier_text=earlier_text, earlier_version=is_earlier_version)
+
+def _split_into_parts_and_attach_box(target_word, box_index, missing_text, is_earlier_version, overwritten_by, child_process=False)->list:
+ """Split word into word parts and attach a box to the part with text == overwritten_by.
+ """
+ if len(target_word.word_parts) > 0:
+ index = 0
+ if True in [ wp.word_box is not None for wp in target_word.word_parts ]:
+ latest_word_with_box = [ wp for wp in target_word.word_parts if wp.word_box is not None ][-1]
+ index = target_word.word_parts.index(latest_word_with_box)+1
+ child_word_parts = []
+ for wp in target_word.word_parts[index:]:
+ word_parts = _split_into_parts_and_attach_box(wp, box_index, missing_text, is_earlier_version, overwritten_by, child_process=True)
+ if child_process:
+ child_word_parts += word_parts
+ elif len(word_parts) > 0:
+ old_index = target_word.word_parts.index(wp)
+ target_word.word_parts[old_index] = word_parts[0]
+ for new_wp in word_parts[1:]:
+ target_word.word_parts.insert(old_index+1, new_wp)
+ if overwritten_by in [ new_wp.text for new_wp in word_parts ]:
+ break
+ if child_process:
+ return child_word_parts
+ return target_word.word_parts
+ elif overwritten_by in target_word.text:
+ new_words_triple = target_word.split(overwritten_by)
+ word_with_box = [ wp for wp in new_words_triple if wp is not None and wp.text == overwritten_by ][0]
+ _attach_box(word_with_box, box_index, missing_text, is_earlier_version)
+ if not child_process:
+ if len(new_words_triple) > 1:
+ target_word.word_parts = [ i for i in new_words_triple if i is not None ]
+ target_word.transkription_positions = []
+ else:
+ target_word.word_box = word_with_box.word_box
+ return [ i for i in new_words_triple if i is not None ]
+ return []
+
+def fix_boxes(page)->int:
+ """Fix boxes and return exit code
+ """
+ exit_status = 0
+ for word_node in set([ node.getparent() for node in page.page_tree.xpath('//' + Word.XML_TAG + f'/debug[@msg="{DEBUG_MSG}"]')]):
+ word = WordWithBoxes.create_cls(word_node)
+ try:
+ replace_word = [ w for w in page.words if w.id == word.id and w.text == word.text ][0]
+ page.words[page.words.index(replace_word)] = word
+ except IndexError:
+ return 2
+ if not UNITTESTING:
+ save_page(page, attach_first=True)
+ return exit_status
+
+def usage():
+ """prints information on how to use the script
+ """
+ print(main.__doc__)
+
+def main(argv):
+ """This program can be used to fix boxes.
+
+ svgscripts/fix_boxes.py [OPTIONS]
+
+ a xml file about a manuscript, containing information about its pages.
+ a xml file about a page, containing information about svg word positions.
+
+ OPTIONS:
+ -h|--help show help
+
+ :return: exit code (int)
+ """
+ try:
+ opts, args = getopt.getopt(argv, "h", ["help"])
+ except getopt.GetoptError:
+ usage()
+ return 2
+ for opt, arg in opts:
+ if opt in ('-h', '--help'):
+ usage()
+ return 0
+ if len(args) < 1:
+ usage()
+ return 2
+ exit_status = 0
+ xml_file = args[0]
+ if isfile(xml_file):
+ counter = 0
+ for page in Page.get_pages_from_xml_file(xml_file, status_contains=BOX_ERROR_STATUS):
+ counter = 0
+ if not UNITTESTING:
+ print(Fore.CYAN + f'Fixing boxes of {page.title}, {page.number} ...' + Style.RESET_ALL)
+ back_up(page, page.xml_file)
+ if fix_boxes(page) == 0:
+ counter += 1
+ if not UNITTESTING:
+ print(Style.RESET_ALL + f'[{counter} pages changed]')
+ else:
+ raise FileNotFoundError('File {} does not exist!'.format(xml_file))
+ return exit_status
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))