Index: tests_svgscripts/test_positional_word_part.py
===================================================================
--- tests_svgscripts/test_positional_word_part.py (revision 72)
+++ tests_svgscripts/test_positional_word_part.py (revision 73)
@@ -1,102 +1,102 @@
import unittest
from os import sep, path
from os.path import isdir, dirname, basename
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.matrix import Matrix
from datatypes.positional_word_part import PositionalWordPart
from datatypes.page import Page
from datatypes.positional_object import PositionalObject
class TestPositionalWordPart(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.test_svg_file = DATADIR + sep + 'path_svg.svg'
self.test_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.word_part_objs = [{'text': 'a' }, {'text': 'b' }, {'text': 'c' }]
x = 0
for dict in self.word_part_objs:
dict['class'] = 'st15'
dict['x'] = x
dict['y'] = 11
x += 1
def test_init(self):
pwp = PositionalWordPart(text='test')
self.assertEqual(pwp.text, 'test')
def test_attach_object_to_tree(self):
pwp = PositionalWordPart(text='test', symbol_id='glyph-32-1', style_class='st1 st2 st3')
empty_tree = ET.ElementTree(ET.Element('page'))
pwp.attach_object_to_tree(empty_tree)
for node in empty_tree.getroot().xpath('//' + pwp.tag):
self.assertEqual(node.get('id'), '0')
self.assertEqual(node.get('symbol-id'), 'glyph-32-1')
def test_init_node(self):
pwp = PositionalWordPart(text='test', symbol_id='glyph-32-1', style_class='st1 st2 st3')
empty_tree = ET.ElementTree(ET.Element('page'))
pwp.attach_object_to_tree(empty_tree)
pwp2 = PositionalWordPart(node=empty_tree.getroot().find('./' + pwp.tag))
self.assertEqual(pwp2.id, pwp.id)
self.assertEqual(pwp2.text, pwp.text)
def test_CREATE_POSITIONAL_WORD_PART(self):
svg_tree = ET.parse(self.test_svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
xmin = 311.8125
ymin = 158.0117
text = 'es'
style_class = 'st5 st6'
x = 258.148
y = 8.5
svg_x = x + xmin
svg_y = y + ymin
use_nodes = svg_tree.xpath('//ns:use[@x>="{0}" and @x<="{1}" and @y>="{2}" and @y<="{3}"]'\
.format(svg_x-.1, svg_x+.1,svg_y-0.1, svg_y+.1), namespaces=namespaces)
self.assertEqual(len(use_nodes), 1)
pwp = PositionalWordPart.CREATE_POSITIONAL_WORD_PART(text[0], use_nodes[0], namespaces, xmin=xmin, ymin=ymin, style_class=style_class)
self.assertEqual(pwp.height, 3.672)
self.assertEqual(pwp.width, 2.594)
def test_CREATE_POSITIONAL_WORD_PART_LIST(self):
- page = Page(xml_source_file=self.test_xml)
+ page = Page(self.test_xml)
svg_tree = ET.parse(self.test_svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
xmin = 311.8125
ymin = 158.0117
text = 'es'
style_class = 'st5 st6'
x = 258.148
y = 8.5
word_part_obj = { 'text': text, 'x': x, 'y': y, 'matrix': None, 'class': style_class }
pwp_list = PositionalWordPart.CREATE_POSITIONAL_WORD_PART_LIST(word_part_obj, svg_tree, namespaces, page, xmin=xmin, ymin=ymin)
self.assertEqual(len(pwp_list), len(text))
self.assertEqual(pwp_list[0].height, 3.672)
self.assertEqual(pwp_list[0].width, 2.594)
text = 'ergleicher'
word_part_obj = { 'text': text, 'x': 174.619, 'y': 189.6, 'matrix': None, 'class': style_class }
pwp_list = PositionalWordPart.CREATE_POSITIONAL_WORD_PART_LIST(word_part_obj, svg_tree, namespaces, page, xmin=xmin, ymin=ymin)
self.assertEqual(len(pwp_list), len(text))
def test_CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(self):
- page = Page(xml_source_file=self.test_xml)
+ page = Page(self.test_xml)
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, self.word_part_objs)
self.assertEqual(len(pwps), 3)
self.assertEqual(pwps[0].text, 'a')
self.assertEqual(pwps[0].style_class, 'st15')
self.assertEqual(pwps[0].width, 0.8)
self.assertEqual(pwps[2].width, 3.85)
def test_get_semanticAndDataDict(self):
dictionary = PositionalWordPart.get_semantic_dictionary()
#print(dictionary)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_text_connection_mark.py
===================================================================
--- tests_svgscripts/test_text_connection_mark.py (revision 72)
+++ tests_svgscripts/test_text_connection_mark.py (revision 73)
@@ -1,76 +1,76 @@
import unittest
from os import sep, path
from os.path import dirname, isdir
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.matrix import Matrix
from datatypes.page import Page
from datatypes.reference import Reference
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.word import Word
class TestTextConnectionMark(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
self.xml_file = DATADIR + sep + 'N_VII_1_page008.xml'
mylist = {'text': '*', 'id': '0', 'line-number': '2' }
self.node = ET.Element(TextConnectionMark.XML_TAG, attrib=mylist)
word_position = TranskriptionPosition(x=0, y=1, height=10, width=10, matrix=Matrix('matrix(0.94 0.342 -0.342 0.94 0 0)'))
self.transkription_positions = [ word_position ]
word_position.attach_object_to_tree(self.node)
self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page013.xml'
def test_create_cls(self):
text_connection_mark = TextConnectionMark.create_cls(self.node)
self.assertEqual(text_connection_mark.id, 0)
self.assertEqual(text_connection_mark.transkription_positions[0].bottom, 11)
self.assertEqual(text_connection_mark.transkription_positions[0].height, 10)
self.assertEqual(text_connection_mark.transkription_positions[0].top, 1)
self.assertEqual(text_connection_mark.transkription_positions[0].left, 0)
self.assertEqual(text_connection_mark.transkription_positions[0].width, 10)
self.assertEqual(text_connection_mark.text, '*')
self.assertEqual(text_connection_mark.line_number, 2)
self.assertEqual(text_connection_mark.transkription_positions[0].transform.isRotationMatrix(), True)
def test_attach_word_to_tree(self):
text_connection_mark = TextConnectionMark.create_cls(self.node)
text_connection_mark.text_source = Reference(first_line=1, title='ASDF', page_number='5c')
empty_tree = ET.ElementTree(ET.Element('page'))
text_connection_mark.attach_word_to_tree(empty_tree)
#print(ET.dump(empty_tree.getroot()))
for node in empty_tree.xpath('//' + TextConnectionMark.XML_TAG):
mark = TextConnectionMark.create_cls(node)
self.assertEqual(mark.id, 0)
self.assertEqual(mark.transkription_positions[0].bottom, 11)
self.assertEqual(mark.transkription_positions[0].height, 10)
self.assertEqual(mark.transkription_positions[0].top, 1)
self.assertEqual(mark.transkription_positions[0].left, 0)
self.assertEqual(mark.transkription_positions[0].width, 10)
self.assertEqual(mark.text, '*')
self.assertEqual(mark.line_number, 2)
self.assertEqual(mark.transkription_positions[0].transform.isRotationMatrix(), True)
self.assertEqual(mark.text_source.first_line, text_connection_mark.text_source.first_line)
self.assertEqual(mark.text_source.page_number, text_connection_mark.text_source.page_number)
def test_get_semanticAndDataDict(self):
dictionary = TextConnectionMark.get_semantic_dictionary()
#print(dictionary)
def test_find_content(self):
- page = Page(xml_source_file=self.test_tcm_xml)
+ page = Page(self.test_tcm_xml)
transkription_field = TranskriptionField(page.source)
svg_tree = ET.parse(page.source)
text_connection_marks = [ TextConnectionMark.create_cls_from_word(word) for word in page.words if word.text == TextConnectionMark.SPECIAL_CHAR_LIST[1]]
TextConnectionMark.find_content_in_footnotes(text_connection_marks, transkription_field, svg_tree, title=page.title, page_number=page.number)
self.assertEqual(len(text_connection_marks), 4)
for tcm in text_connection_marks:
self.assertEqual(tcm.text_source is not None, True)
self.assertEqual(tcm.text_source.first_line > -1, True)
self.assertEqual(tcm.text_source.page_number, '14')
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_util.py
===================================================================
--- tests_svgscripts/test_util.py (revision 72)
+++ tests_svgscripts/test_util.py (revision 73)
@@ -1,217 +1,217 @@
import unittest
from os import sep, path, remove, listdir
from os.path import isdir, isfile, dirname, basename
import shutil
import sys
import lxml.etree as ET
import sys
import tempfile
import warnings
sys.path.append('svgscripts')
import util
from local_config import FAKSIMILE_LOCATION, PDF_READER, SVG_EDITOR, USER_ROOT_LOCATION_DICT
from datatypes.faksimile import FaksimilePage
from datatypes.page import Page
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkriptionField import TranskriptionField
from datatypes.word_position import WordPosition
sys.path.append('shared_util')
from myxmlwriter import write_pretty, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
class TestCopy(unittest.TestCase):
def setUp(self):
util.UNITTESTING = True
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.test_dir = DATADIR
self.faksimile_dir = DATADIR + sep + 'faksimile_svg'
self.faksimile_file = self.faksimile_dir + sep + 'N-VII-1,5et6.svg'
self.image = DATADIR + sep + 'image.jpg'
self.svg_testrecord = DATADIR + sep + 'TESTRECORD.svg'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.tmp_dir = tempfile.mkdtemp()
def test_copy(self):
tmp_image = self.tmp_dir + sep + basename(self.image)
target_file = 'asdf.svg'
shutil.copy(self.image, self.tmp_dir)
util.copy_faksimile_svg_file(target_file, faksimile_source_file=self.faksimile_file,\
target_directory=self.tmp_dir, local_image_path=tmp_image)
self.assertEqual(isfile(self.tmp_dir + sep + target_file), True)
util.copy_faksimile_svg_file(faksimile_source_file=self.faksimile_file,\
target_directory=self.tmp_dir, local_image_path=tmp_image)
self.assertEqual(isfile(self.tmp_dir + sep + basename(self.faksimile_file)), True)
with self.assertRaises(Exception):
util.copy_faksimile_svg_file()
with self.assertRaises(Exception):
util.copy_faksimile_svg_file(faksimile_source_file=self.faksimile_source_file)
def test_copy_xml(self):
- old_page = Page(xml_source_file=self.xml_file)
+ old_page = Page(self.xml_file)
xml_file = util.copy_xml_file_word_pos_only(self.xml_file, self.tmp_dir)
self.assertEqual(isfile(xml_file), True)
- page = Page(xml_source_file=xml_file)
+ page = Page(xml_file)
self.assertEqual(len(page.words), len(old_page.words))
self.assertEqual(len(page.line_numbers), 0)
def test_create_highlighted_svg_file(self):
target_file = self.tmp_dir + sep + basename(self.faksimile_file)
tmp_image = self.tmp_dir + sep + basename(self.image)
faksimile_tree = ET.parse(self.faksimile_file)
namespaces = { k if k is not None else 'ns': v for k, v in faksimile_tree.getroot().nsmap.items() }
node_ids = ['rect947', 'rect951', 'rect953', 'rect955', 'rect959', 'rect961', 'rect963']
highlight_color = 'blue'
util.create_highlighted_svg_file(faksimile_tree, node_ids, target_directory=self.tmp_dir, highlight_color=highlight_color, namespaces=namespaces)
self.assertEqual(isfile(target_file), True)
new_tree = ET.parse(target_file)
for node in new_tree.xpath('//ns:rect[@fill="{0}"]|//ns:path[@fill="{0}"]'.format(highlight_color), namespaces=namespaces):
node_ids.remove(node.get('id'))
self.assertEqual(len(node_ids), 0)
def test_get_empty_node_ids(self):
faksimile_tree = ET.parse(self.faksimile_file)
faksimile_page = FaksimilePage.GET_FAKSIMILEPAGES(faksimile_tree)[0]
empty_node_ids = util.get_empty_node_ids(faksimile_tree, faksimile_page=faksimile_page)
self.assertEqual('rect1085' in empty_node_ids, True)
def test_record_changes(self):
new_tree = ET.parse(self.faksimile_file)
old_tree = ET.parse(self.faksimile_file)
empty_node_id = 'rect1085'
title_node_id = 'test001'
namespaces = { k if k is not None else 'ns': v for k, v in new_tree.getroot().nsmap.items() }
node = new_tree.xpath('//ns:rect[@id="{0}"]'.format(empty_node_id), namespaces=namespaces)[0]
title = ET.SubElement(node, 'title', attrib={ 'id': title_node_id })
title.text = 'test'
new_file = self.tmp_dir + sep + 'new.svg'
old_file = self.tmp_dir + sep + 'old.svg'
util.copy_faksimile_svg_file(target_file=new_file, faksimile_tree=new_tree)
util.copy_faksimile_svg_file(target_file=old_file, faksimile_tree=old_tree)
util.record_changes(old_file, new_file, [ empty_node_id ], namespaces=namespaces)
test_tree = ET.parse(old_file)
self.assertEqual(len(test_tree.xpath('//ns:rect[@id="{0}"]/ns:title[@id="{1}"]'.format(empty_node_id, title_node_id), namespaces=namespaces)), 1)
def test_replace_chars(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
faksimile_tree = ET.parse(self.faksimile_file)
namespaces = { k if k is not None else 'ns': v for k, v in faksimile_tree.getroot().nsmap.items() }
word_position = WordPosition(id='rect1159', text='„Gedächtniß"')
wps, texts = util.replace_chars(page.words, [ word_position ])
self.assertEqual(texts[0].endswith('“'), True)
self.assertEqual(wps[0].text.endswith('“'), True)
word_position = WordPosition(id='rect1173', text='-')
wps, texts = util.replace_chars(page.words, [ word_position ])
self.assertEqual(wps[0].text.endswith('–'), True)
def test_mismatch_words(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
faksimile_tree = ET.parse(self.faksimile_file)
faksimile_page = FaksimilePage.GET_FAKSIMILEPAGES(faksimile_tree)[0]
- page = Page(xml_source_file='xml/N_VII_1_page174.xml')
+ page = Page('xml/N_VII_1_page174.xml')
faksimile_tree = ET.parse('faksimile_svg/N-VII-1,173et174.svg')
faksimile_page = FaksimilePage.GET_FAKSIMILEPAGES(faksimile_tree)[0]
self.assertEqual('-' in [ tp.text for tp in faksimile_page.word_positions], True)
wps, texts = util.replace_chars(page.words,faksimile_page.word_positions)
self.assertEqual('–' in texts, True)
self.assertEqual(len([ faksimile_position for faksimile_position in wps\
if faksimile_position.text == '–' ]), 4)
mismatching_words, mismatching_faksimile_positions = util.get_mismatching_ids(page.words, faksimile_page.word_positions)
self.assertEqual(len([word for word in mismatching_words if word.text.endswith('“') ]), 0)
self.assertEqual(len([word for word in mismatching_words if word.text.endswith('–') ]), 0)
@unittest.skip('test uses external program, has been tested')
def test_show_files(self):
list_of_files = [ self.test_dir + sep + file for file in listdir(self.test_dir) if file.endswith('pdf') ][0:2]
util.ExternalViewer.show_files(single_file=self.faksimile_file, list_of_files=list_of_files)
def test_record_changes_to_page(self):
page = util.record_changes_on_svg_file_to_page(self.xml_file, self.svg_testrecord, [ 1 ])
old_length = len(page.words)
self.assertEqual(page.words[1].text, 'asdf')
self.assertEqual(page.words[1].transkription_positions[0].width, 353)
page = util.record_changes_on_svg_file_to_page(self.xml_file, self.svg_testrecord, [ 13 ])
self.assertEqual(page.words[13].text, 'er')
self.assertEqual(page.words[14].text, '=')
self.assertEqual(len(page.words), old_length+1)
page = util.record_changes_on_svg_file_to_page(self.xml_file, self.svg_testrecord, [ 64 ])
self.assertEqual(page.words[64].text, 'Simplifications-apparat')
self.assertEqual(len(page.words[64].transkription_positions), 3)
self.assertEqual(len(page.words), old_length-1)
@unittest.skipUnless(__name__ == "__main__", 'tests all words')
def test_extended__record_changes_to_page(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
old_length = len(page.words)
page = util.record_changes_on_svg_file_to_page(self.xml_file, self.svg_testrecord)
self.assertEqual(page.words[1].text, 'asdf')
self.assertEqual(page.words[13].text, 'er')
self.assertEqual(page.words[14].text, '=')
self.assertEqual(page.words[65].text, 'Simplifications-apparat')
self.assertEqual(len(page.words), old_length)
def test_copy_faksimile_update_image_location(self):
test_dir = self.tmp_dir #FAKSIMILE_LOCATION + '/Myriam/Fertig/'
util.copy_faksimile_update_image_location(self.faksimile_file, target_directory=test_dir)
with self.assertWarns(UserWarning):
util.copy_faksimile_update_image_location(self.faksimile_file, target_directory=test_dir)
def test_record_changes_on_xml(self):
- old_page = Page(xml_source_file=self.xml_file)
+ old_page = Page(self.xml_file)
xml_file = util.copy_xml_file_word_pos_only(self.xml_file, self.tmp_dir)
tree = ET.parse(xml_file)
node = tree.xpath('//word[@id="135"]')[0]
counter =0
while node.get('text') != 'gar' or counter > 5:
counter += 1
nextnode = node.getnext()
node.set('text', node.get('text') + nextnode.get('text'))
for element in nextnode.getchildren():
node.append(element)
nextnode.getparent().remove(nextnode)
write_pretty(xml_element_tree=tree, file_name=xml_file,\
script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
new_page = util.record_changes_on_xml_file_to_page(self.xml_file, xml_file)
self.assertEqual(len(new_page.words), len(old_page.words)-2)
self.assertEqual(len([ word for word in new_page.words if word.text == 'gar']), 1)
- old_page = Page(xml_source_file=self.xml_file)
+ old_page = Page(self.xml_file)
xml_file = util.copy_xml_file_word_pos_only(self.xml_file, self.tmp_dir)
tree = ET.parse(xml_file)
node = tree.xpath('//word[@id="138"]')[0]
counter =0
while node.get('text') != 'nichtvorkommt.' or counter > 5:
counter += 1
nextnode = node.getnext()
node.set('text', node.get('text') + nextnode.get('text'))
for element in nextnode.getchildren():
node.append(element)
nextnode.getparent().remove(nextnode)
node.set('split', 'nicht vorkommt.')
write_pretty(xml_element_tree=tree, file_name=xml_file,\
script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
- joined_page = Page(xml_source_file=xml_file)
+ joined_page = Page(xml_file)
self.assertEqual(len([word for word in joined_page.words if word.text == 'nichtvorkommt.']), 1)
self.assertEqual(len([word for word in joined_page.words if word.text == 'nichtvorkommt.'][0].split_strings), 2)
self.assertEqual(len(joined_page.words), len(old_page.words)-1)
new_page = util.record_changes_on_xml_file_to_page(self.xml_file, xml_file)
self.assertEqual(len(new_page.words), len(old_page.words))
self.assertEqual(len([word for word in new_page.words if word.text == 'vorkommt.']), 1)
self.assertEqual(len([word for word in old_page.words if word.text == 'nicht']),\
len([word for word in new_page.words if word.text == 'nicht']))
#print(ET.dump(node))
#for node in [ node for node in new_page.page_tree.xpath('//word') if node.get('text') == 'gar' ]:
# print(ET.dump(node))
def test_back_up(self):
test_dir = self.tmp_dir
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
target_file_name = util.back_up(page, self.xml_file, bak_dir=test_dir)
self.assertEqual(isfile(target_file_name), True)
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
pass
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_image.py
===================================================================
--- tests_svgscripts/test_image.py (revision 72)
+++ tests_svgscripts/test_image.py (revision 73)
@@ -1,50 +1,53 @@
import unittest
from os import sep, path
from os.path import isdir, dirname, basename
import lxml.etree as ET
import sys
import sys
sys.path.append('svgscripts')
+from datatypes.faksimile_image import FaksimileImage
from datatypes.image import Image, SVGImage
from datatypes.text_field import TextField
class TestImage(unittest.TestCase):
def test_init(self):
tf = TextField()
image = Image(file_name='test.jpg', height=10, width=10, text_field=tf)
self.assertEqual(image.tag, 'image')
self.assertEqual(image.width, 10)
self.assertEqual(image.text_field.width, 0)
node = ET.Element('svg', attrib={'file': 'test.svg', 'height': '10', 'width': '10'})
image = SVGImage(node=node)
self.assertEqual(image.tag, 'svg-image')
self.assertEqual(image.width, 10)
self.assertEqual(image.file_name, 'test.svg')
def test_attach_object_to_tree(self):
tag = 'faksimile-image'
tf = TextField()
image = Image(file_name='test.jpg', URL='https://www.google.com', height=10, width=10, text_field=tf, tag=tag)
empty_tree = ET.ElementTree(ET.Element('faksimile'))
image.attach_object_to_tree(empty_tree)
self.assertEqual(image.tag, tag)
for node in empty_tree.getroot().xpath('//' + image.tag):
self.assertEqual(node.get('file-name'), 'test.jpg')
self.assertEqual(node.get('height'), '10')
self.assertEqual(node.get('width'), '10')
self.assertEqual(len(node.findall(TextField.XML_TAG)), 1)
+ image = FaksimileImage(file_name='test.jpg', URL='https://www.google.com', height=10, width=10, text_field=tf)
+ image.attach_object_to_tree(empty_tree)
def test_get_semantic_dict(self):
#tf = TextField()
#image = Image(file_name='test.jpg', height=10, width=10, text_field=tf)
pass
#print(SVGImage.get_semantic_dictionary())
#self.assertEqual(image.get_data_dictionary()['body'].get('height'), 10)
#self.assertEqual(image.get_data_dictionary()['body'].get('width'), 10)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_convert_wordPositions.py
===================================================================
--- tests_svgscripts/test_convert_wordPositions.py (revision 72)
+++ tests_svgscripts/test_convert_wordPositions.py (revision 73)
@@ -1,54 +1,55 @@
import unittest
from os import sep, path, remove
import lxml.etree as ET
import lxml.html
import sys
sys.path.append('svgscripts')
import convert_wordPositions
from convert_wordPositions import Converter, SVGConverter, HTMLConverter
from datatypes.page import Page
+from datatypes.page_creator import PageCreator
class TestConverter(unittest.TestCase):
def setUp(self):
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.test_file = DATADIR + sep + 'test.xml'
self.test_svg_file = DATADIR + sep + 'test421.svg'
self.outputfile_txt = 'test.txt'
self.outputfile_html = 'test.html'
self.outputfile_svg = 'test.svg'
def test_main(self):
argv = ['-t', '-s', self.test_svg_file, self.test_file]
self.assertEqual(convert_wordPositions.main(argv), 0)
argv = ['-t', '-s', self.test_svg_file, '-o', self.outputfile_txt, self.test_file]
self.assertEqual(convert_wordPositions.main(argv), 0)
self.assertEqual(path.isfile(self.outputfile_txt), True)
argv = ['-t', '-s', self.test_svg_file, '-o', self.outputfile_html, self.test_file]
self.assertEqual(convert_wordPositions.main(argv), 0)
self.assertEqual(path.isfile(self.outputfile_html), True)
html_tree = lxml.html.parse(self.outputfile_html)
self.assertEqual(html_tree.getroot().tag, 'html')
argv = ['-t', '-s', self.test_svg_file, '-o', self.outputfile_svg, self.test_file]
self.assertEqual(convert_wordPositions.main(argv), 0)
self.assertEqual(path.isfile(self.outputfile_svg), True)
svg_tree = ET.parse(self.outputfile_svg)
self.assertEqual(svg_tree.getroot().tag, '{http://www.w3.org/2000/svg}svg')
def test_create_converter(self):
- page = Page(xml_source_file=self.test_file, svg_file=self.test_svg_file)
+ page = PageCreator(self.test_file, svg_file=self.test_svg_file)
converter = Converter.CREATE_CONVERTER(page, False, 'SVG')
self.assertEqual(isinstance(converter, SVGConverter), True)
converter = Converter.CREATE_CONVERTER(page, False, 'HTML')
self.assertEqual(isinstance(converter, HTMLConverter), True)
converter = Converter.CREATE_CONVERTER(page, False)
self.assertEqual(isinstance(converter, Converter), True)
def tearDown(self):
bool(path.isfile(self.outputfile_txt)) and remove(self.outputfile_txt)
bool(path.isfile(self.outputfile_html)) and remove(self.outputfile_html)
bool(path.isfile(self.outputfile_svg)) and remove(self.outputfile_svg)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_create_task.py
===================================================================
--- tests_svgscripts/test_create_task.py (revision 72)
+++ tests_svgscripts/test_create_task.py (revision 73)
@@ -1,100 +1,100 @@
import unittest
from os import sep, path, remove, listdir
from os.path import isdir, isfile, dirname, basename
import shutil
import sys
import lxml.etree as ET
import sys
import tempfile
import warnings
sys.path.append('svgscripts')
from create_task import Task, CorrectWords
from datatypes.faksimile import FaksimilePage
from datatypes.page import Page
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkriptionField import TranskriptionField
from datatypes.word_position import WordPosition
class TestTask(Task):
"""This is a test."""
def get_node_ids(self):
return [ word.faksimile_positions[0].id for word in self.words if len(word.faksimile_positions) > 0 ]
def select_words(self, words):
self.words = words
return words
class TestCreateTask(unittest.TestCase):
def setUp(self):
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.xml_source = DATADIR + sep + 'N_VII_1_page005_faksimile_merged.xml'
self.xml_unmerged = DATADIR + sep + 'N_VII_1_page005.xml'
self.faksimile_svgFile = DATADIR + sep + 'faksimile_svg' + sep + 'N-VII-1,5et6.svg'
def test_task(self):
tmp_dir = tempfile.mkdtemp()
task = TestTask(self.xml_source, tmp_dir)
task.create()
dir_elements = listdir(tmp_dir)
self.assertEqual(task.description, TestTask.__doc__)
self.assertEqual(len(dir_elements), 3)
self.assertEqual(basename(self.xml_source).replace('.xml', '.pdf') in dir_elements, True)
#print(tmp_dir)
shutil.rmtree(tmp_dir)
def test_correction_task(self):
tmp_dir = tempfile.mkdtemp()
unmatched_strings = [ 'Das', 'Muster', 'einer' ]
unmatched_node_ids = []
svg_tree = ET.parse(self.faksimile_svgFile)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
for string in unmatched_strings:
unmatched_node_ids += [ node.getparent().get('id') for node in svg_tree.xpath('//ns:rect/ns:title[text() = "{0}"]'.format(string), namespaces=namespaces) ]
- page = Page(xml_source_file=self.xml_unmerged)
+ page = Page(self.xml_unmerged)
unmatched_word_ids = [ word.id for word in page.words if word.text in unmatched_strings ]
task = CorrectWords(self.xml_unmerged, self.faksimile_svgFile, tmp_dir, unmatched_node_ids=unmatched_node_ids)
note = task.create_note_about_missing_words()
self.assertEqual('orgänge.' in note, True)
faksimile_svg_file = task.create_file_name(page)
self.assertEqual(faksimile_svg_file, 'N-VII-1,5.svg')
transkription_svg = task.create_file_name(page, is_faksimile_svg=False)
self.assertEqual(transkription_svg, basename(self.xml_unmerged.replace('.xml', '.svg')))
xml_file = task.create_file_name(page, is_faksimile_svg=False, suffix='.xml')
self.assertEqual(xml_file, basename(self.xml_unmerged))
self.assertEqual(task.has_been_created(page), False)
task.create()
self.assertEqual(task.has_been_created(page), True)
self.assertEqual(task.contains_file(faksimile_svg_file), True)
self.assertEqual(task.contains_file(transkription_svg), True)
self.assertEqual(task.contains_file(xml_file), True)
task2 = CorrectWords(self.xml_unmerged, self.faksimile_svgFile, tmp_dir, unmatched_node_ids=unmatched_node_ids)
self.assertEqual(task2.has_been_created(page), True)
self.assertEqual(task2.contains_file(faksimile_svg_file), True)
self.assertEqual(task2.contains_file(transkription_svg), True)
self.assertEqual(task2.contains_file(xml_file), True)
shutil.rmtree(tmp_dir)
def test_ids(self):
tmp_dir = tempfile.mkdtemp()
- page = Page(xml_source_file=self.xml_unmerged)
+ page = Page(self.xml_unmerged)
unmatched_strings = [ 'Das', 'Muster', 'einer' ]
unmatched_node_ids = []
svg_tree = ET.parse(self.faksimile_svgFile)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
for string in unmatched_strings:
unmatched_node_ids += [ node.getparent().get('id') for node in svg_tree.xpath('//ns:rect/ns:title[text() = "{0}"]'.format(string), namespaces=namespaces) ]
task = CorrectWords(self.xml_unmerged, self.faksimile_svgFile, tmp_dir, unmatched_node_ids=unmatched_node_ids)
task.create()
xml_file = task.get_target_filepath(page, is_faksimile_svg=False, suffix='.xml')
self.assertEqual(isfile(xml_file), True)
- new_page = Page(xml_source_file=xml_file)
+ new_page = Page(xml_file)
for word in task.unmatched_words:
new_words = [ new_word for new_word in new_page.words if new_word.id == word.id ]
self.assertEqual(len(new_words), 1)
self.assertEqual(new_words[0].text, word.text)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_super_page.py
===================================================================
--- tests_svgscripts/test_super_page.py (revision 72)
+++ tests_svgscripts/test_super_page.py (revision 73)
@@ -1,31 +1,97 @@
import unittest
from os import sep, path
-from os.path import isdir, dirname, basename
+from os.path import isdir, isfile, dirname, basename
import lxml.etree as ET
+import shutil
import sys
-import sys
+import tempfile
sys.path.append('svgscripts')
from datatypes.super_page import SuperPage
+from datatypes.faksimile_image import FaksimileImage
+from datatypes.text_field import TextField
+
+sys.path.append('shared_util')
+from myxmlwriter import write_pretty
class TestSuperPage(unittest.TestCase):
- def test_init(self):
- sp = SuperPage(title='test', page_number=1)
- self.assertEqual(sp.page_tree.getroot().get('title'), 'test')
- self.assertEqual(sp.page_tree.getroot().get('page-number'), '1')
- self.assertEqual(sp.tag, 'super-page')
-
- def test_remove_tags(self):
- sp = SuperPage(title='test', page_number=1)
- tag = 'test'
- numTags = 3
- for i in range(numTags):
- ET.SubElement(sp.page_tree.getroot(), tag)
- self.assertEqual(len(sp.page_tree.xpath('//' + tag)), numTags)
- sp.remove_tags_from_page_tree([tag])
- self.assertEqual(len(sp.page_tree.xpath('//' + tag)), 0)
+ def setUp(self):
+ DATADIR = dirname(__file__) + sep + 'test_data'
+ if not isdir(DATADIR):
+ DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
+ self.test_file = DATADIR + sep + 'test.xml'
+ self.test_svg_file = DATADIR + sep + 'test421.svg'
+ self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
+ self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
+ self.xml_fileB = DATADIR + sep + 'N_VII_1_page006.xml'
+ self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
+ self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml'
+ self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
+ self.tmp_dir = tempfile.mkdtemp()
+
+ def test_init_property(self):
+ xml_file = self.tmp_dir + sep + 'asdf.xml'
+ sp = SuperPage(xml_file, title='test', page_number=1)
+ sp.init_property('title')
+ sp.init_property('number')
+ self.assertEqual(sp.title, 'test')
+ self.assertEqual(sp.number, '1')
+ self.assertEqual(len(sp.page_tree.xpath('//' +sp.simple_properties_dictionary.get('title')[0])), 1)
+ text_field = TextField()
+ image = FaksimileImage(file_name='test.jpg', height=10, width=20, text_field=text_field)
+ sp.init_property('faksimile_image', value=image)
+ write_pretty(xml_element_tree=sp.page_tree, file_name=xml_file, script_name='test', file_type=SuperPage.FILE_TYPE_SVG_WORD_POSITION)
+ sp = SuperPage(xml_file)
+ sp.init_tree()
+ #print(ET.dump(sp.page_tree.getroot()))
+ sp.init_property('title')
+ sp.init_property('number')
+ self.assertEqual(sp.title, 'test')
+ self.assertEqual(sp.number, '1')
+ sp.init_property('faksimile_image')
+ self.assertEqual(sp.faksimile_image.file_name, 'test.jpg')
+ self.assertEqual(sp.faksimile_image.text_field.id, '0')
+ sp.init_property('text_field')
+ self.assertEqual(sp.text_field.id, '0')
+ image = FaksimileImage(file_name='asdf.jpg')
+ sp.init_property('title', value='asdf', overwrite=True)
+ sp.init_property('number', value=2, overwrite=True)
+ sp.init_property('faksimile_image', value=image, overwrite=True)
+ self.assertEqual(sp.title, 'asdf')
+ self.assertEqual(sp.number, '2')
+ self.assertEqual(sp.faksimile_image.file_name, 'asdf.jpg')
+
+ def test_init_property(self):
+ xml_file = self.tmp_dir + sep + 'asdf.xml'
+ sp = SuperPage(xml_file, title='test', page_number=1)
+ sp.init_all_properties()
+ self.assertEqual(sp.title, 'test')
+ self.assertEqual(sp.number, '1')
+ self.assertEqual(sp.faksimile_image, None)
+ self.assertEqual(sp.svg_image, None)
+
+ def test_init_tree(self):
+ sp = SuperPage('asdf.xml', title='test', page_number=1)
+ self.assertEqual(sp.xml_file, 'asdf.xml')
+ with self.assertRaises(Exception):
+ sp = SuperPage(self.test_manuscript, title='test', page_number=1)
+ sp = SuperPage('asdf.xml', title='test', page_number=1)
+ with self.assertRaises(Exception):
+ sp.init_tree(should_exist=True)
+
+ def test_update_property_dictionary(self):
+ xml_file = self.tmp_dir + sep + 'asdf.xml'
+ sp = SuperPage(xml_file, title='test', page_number=1)
+ sp.update_property_dictionary('title', 'Hello World')
+ sp.init_property('title')
+ self.assertEqual(sp.title, 'Hello World')
+
+
+ def tearDown(self):
+ shutil.rmtree(self.tmp_dir, ignore_errors=True)
+
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_extractWordPosition.py
===================================================================
--- tests_svgscripts/test_extractWordPosition.py (revision 72)
+++ tests_svgscripts/test_extractWordPosition.py (revision 73)
@@ -1,198 +1,195 @@
import unittest
import os
from os import sep, path
from os.path import isfile, isdir, dirname
import re
import shutil
import tempfile
import lxml.etree as ET
from lxml.etree import XMLSyntaxError
import sys
sys.path.append('svgscripts')
import extractWordPosition
from myxmlwriter import write_pretty
from datatypes.transkriptionField import TranskriptionField
from datatypes.matrix import Matrix
-from datatypes.page import Page
+from datatypes.page_creator import PageCreator, FILE_TYPE_SVG_WORD_POSITION
from datatypes.pdf import PDFText
from datatypes.word import Word
from datatypes.lineNumber import LineNumber
from datatypes.word_insertion_mark import WordInsertionMark
+def test_write(xml_element_tree=None, file_name=None):
+ write_pretty(xml_element_tree=xml_element_tree, file_name=None, script_name='test', file_type=FILE_TYPE_SVG_WORD_POSITION)
+
+
class TestExtractor(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
self.test_file_find_word = DATADIR + sep + 'test_find_word.xml'
self.test_dir = tempfile.mkdtemp()
self.title = 'ABC 111'
self.matrix_string = 'matrix(1 0 0 1 183.6558 197.9131)'
self.test_file = DATADIR + sep + 'Mp_XIV_1_mytest_421.svg'
self.test_empty_file = DATADIR + sep + 'my_empty_test.svg'
self.test_source = DATADIR + sep + 'Mp_XIV_1_mytest_421.xml'
self.xml420 = DATADIR + sep + 'Mp_XIV_1_page420.xml'
self.pdf420 = DATADIR + sep + 'Mp_XIV_1_online_420.pdf'
self.pdf_file = DATADIR + sep + 'W_I_8_page125.pdf'
self.faulty_xml = DATADIR + sep + 'W_I_8_faulty_page125.xml'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
-
- def test_main(self):
- extractWordPosition.Extractor.UNITTESTING = True
- Page.UNITTESTING = True
- argv = ['-d', self.test_dir, '-o', '--title=My Hero', '--page=1', self.test_empty_file]
- with self.assertRaises(XMLSyntaxError):
- extractor = extractWordPosition.Extractor()
- extractor.extract_information(self.test_empty_file)
+ self.testA = DATADIR + sep + 'testA.xml'
def test_update_title(self):
extractor = extractWordPosition.Extractor(xml_dir=self.test_dir)
extractor.update_title_and_manuscript('test')
self.assertEqual(extractor.title, 'test')
self.assertEqual(extractor.manuscript_file, '{}/test.xml'.format(self.test_dir))
self.assertEqual(isfile('{}/test.xml'.format(self.test_dir)), True)
def test_get_page_number(self):
extractor = extractWordPosition.Extractor()
self.assertEqual(extractor.get_page_number(self.test_file, page_number='1'), '001')
self.assertEqual(extractor.get_page_number(self.test_file), '421')
def test_get_file_name(self):
extractor = extractWordPosition.Extractor()
self.assertEqual(extractor.get_file_name(self.test_file), 'xml/Mp_XIV_1_mytest_421.xml')
extractor = extractWordPosition.Extractor(title=self.title)
self.assertEqual(extractor.get_file_name(self.test_file), 'xml/{}_page421.xml'.format(self.title.replace(' ', '_')))
extractorA = extractWordPosition.Extractor(title=self.title)
extractorB = extractWordPosition.Extractor(manuscript_file=extractorA.manuscript_file)
self.assertEqual(extractorB.get_file_name(self.test_file), 'xml/{}_page421.xml'.format(self.title.replace(' ', '_')))
def test_get_style(self):
extractor = extractWordPosition.Extractor()
svg_tree = ET.parse(self.test_file)
sonderzeichen_list, letterspacing_list, style_dict = extractor.get_style(svg_tree.getroot())
self.assertEqual(sonderzeichen_list, [ 'st21', 'st23'])
self.assertEqual(style_dict.get('st11').get('font-family'), 'Frutiger-Europeen')
self.assertEqual(style_dict.get('st5').get('stroke'), '#CED5CE')
def test_get_word_from_part_obj(self):
extractor = extractWordPosition.Extractor()
mylist = [{'text': 'a', 'class': 'asdf' }, {'text': 'b', 'endX': 0 }, {'text': 'c'}]
self.assertEqual(extractor.get_word_from_part_obj(mylist), 'abc')
def test_get_bottoms(self):
svg_tree = ET.parse(self.test_file)
extractor = extractWordPosition.Extractor()
mybottoms = extractor.get_bottoms(svg_tree.getroot())
self.assertEqual(mybottoms[0], '57.1914')
self.assertEqual(len(mybottoms), 106)
self.assertEqual(mybottoms[len(mybottoms)-1], '1155.6899')
mybottoms = extractor.get_bottoms(svg_tree.getroot(), from_position=100.0, to_position=800.0)
self.assertEqual(mybottoms[0], '100.5132')
self.assertEqual(len(mybottoms), 84)
self.assertEqual(mybottoms[len(mybottoms)-1], '792.8218')
tf = TranskriptionField(self.test_file)
mybottoms = extractor.get_bottoms(svg_tree.getroot(), transkription_field=tf)
self.assertEqual(mybottoms[0], '91.7134')
self.assertEqual(len(mybottoms), 75)
self.assertEqual(mybottoms[len(mybottoms)-1], '681.7134')
def test_get_text_items(self):
svg_tree = ET.parse(self.test_file)
extractor = extractWordPosition.Extractor()
mytest_items = [ x for x in extractor.get_text_items(svg_tree.getroot()) ]
self.assertEqual(len(mytest_items), 300)
self.assertEqual(mytest_items[0].get('transform'), 'matrix(1 0 0 1 386.8218 57.1914)')
tf = TranskriptionField(self.test_file)
mytest_itemsTF = [ x for x in extractor.get_text_items(svg_tree.getroot(), transkription_field=tf) ]
self.assertEqual(mytest_itemsTF[0].get('transform'), 'matrix(1 0 0 1 204.8618 91.7134)')
def test_init_tree_and_target_file(self):
- target_file = 'xml/testA.xml'
- page = Page(xml_target_file=target_file, title=self.title)
+ target_file = self.testA
+ page = PageCreator(target_file, title=self.title)
tree = page.page_tree
self.assertEqual(tree.getroot().get('title'), self.title)
self.assertEqual(tree.getroot().findall('./style'), [])
- write_pretty(xml_element_tree=tree, file_name=target_file)
- page = Page(xml_target_file=target_file)
+ test_write(xml_element_tree=tree, file_name=target_file)
+ page = PageCreator(target_file)
tree = page.page_tree
self.assertEqual(tree.getroot().get('title'), self.title)
self.assertEqual(tree.getroot().findall('./style'), [])
isfile(target_file) and os.remove(target_file)
def test_add_style(self):
extractor = extractWordPosition.Extractor()
svg_tree = ET.parse(self.test_file)
sonderzeichen_list, letterspacing_list, style_dict = extractor.get_style(svg_tree.getroot())
- target_file = 'xml/testA.xml'
- page = Page(xml_target_file=target_file,title=self.title)
+ target_file = self.testA
+ page = PageCreator(target_file,title=self.title)
page.add_style(sonderzeichen_list=sonderzeichen_list, style_dict=style_dict)
- write_pretty(xml_element_tree=page.page_tree, file_name=target_file)
+ test_write(xml_element_tree=page.page_tree, file_name=target_file)
fromTarget_xml_tree = ET.parse(target_file)
self.assertEqual(fromTarget_xml_tree.getroot().get('title'), self.title)
self.assertEqual(fromTarget_xml_tree.getroot().find("style").get('Sonderzeichen'), "st21 st23")
self.assertEqual(fromTarget_xml_tree.getroot().find("style").find("class[@name='st5']").get('stroke'), '#CED5CE')
self.assertEqual(fromTarget_xml_tree.getroot().find("style").find("class[@name='st11']").get('font-family'), 'Frutiger-Europeen')
- page = Page(xml_target_file=target_file)
+ page = PageCreator(target_file)
page.add_style(sonderzeichen_list=sonderzeichen_list, style_dict=style_dict)
- write_pretty(xml_element_tree=page.page_tree, file_name=target_file)
+ test_write(xml_element_tree=page.page_tree, file_name=target_file)
fromTarget_xml_tree = ET.parse(target_file)
self.assertEqual(fromTarget_xml_tree.getroot().get('title'), self.title)
self.assertEqual(fromTarget_xml_tree.getroot().find("style").get('Sonderzeichen'), "st21 st23")
self.assertEqual(fromTarget_xml_tree.getroot().find("style").find("class[@name='st5']").get('stroke'), '#CED5CE')
self.assertEqual(fromTarget_xml_tree.getroot().find("style").find("class[@name='st11']").get('font-family'), 'Frutiger-Europeen')
isfile(target_file) and os.remove(target_file)
def test_add_word(self):
extractor = extractWordPosition.Extractor()
svg_tree = ET.parse(self.test_file)
mylist = [{'text': 'a' }, {'text': 'b' }, {'text': 'c' }]
matrix = Matrix(self.matrix_string)
for dict in mylist:
dict['class'] = 'st22'
dict['x'] = matrix.add2X(0)
dict['y'] = matrix.getY()
target_file = self.test_dir + sep + 'asdfasdf.xml'
- page = Page(xml_target_file=target_file)
+ page = PageCreator(target_file)
sonderzeichen_list, letterspacing_list, style_dict = extractor.get_style(svg_tree.getroot())
page.add_style(sonderzeichen_list=sonderzeichen_list, letterspacing_list=letterspacing_list, style_dict=style_dict)
self.assertEqual(extractor.add_word(page, 0, mylist, '%', 0), 1)
mylist[1]['text'] = 'A'
mylist[1]['class'] = 'st21'
mylist[1]['x'] = matrix.add2X(1)
self.assertEqual(extractor.add_word(page, 0, mylist, '%', 0), 2)
page.update_and_attach_words2tree()
self.assertEqual(page.page_tree.getroot().xpath('//word[@id="1"]')[0].get('text'), 'a')
self.assertEqual(page.page_tree.getroot().xpath('//word[@id="2"]')[0].get('text'), 'c')
self.assertEqual(page.page_tree.getroot().xpath('//word[@id="2"]/transkription-position')[0].get('left'), '183.506')
self.assertEqual(page.page_tree.getroot().xpath('//word[@id="2"]/transkription-position')[0].get('height'), '8.25')
def test_extractor(self):
extractor = extractWordPosition.Extractor()
self.assertEqual(extractor.title, None)
self.assertEqual(extractor.manuscript_file, None)
self.assertEqual(extractor.xml_dir, 'xml/')
self.assertEqual(extractor.manuscript_tree, None)
def test_write_title_to_manuscript_file(self):
extractor = extractWordPosition.Extractor(xml_dir=self.test_dir, title=self.title)
self.assertEqual(isfile(extractor.manuscript_file), True)
extractor = extractWordPosition.Extractor(manuscript_file=extractor.manuscript_file)
self.assertEqual(extractor.title, self.title)
def test_extract_line_numbers(self):
svg_tree = ET.parse(self.test_file)
tf = TranskriptionField(self.test_file)
extractor = extractWordPosition.Extractor()
line_numbers = extractor.extract_line_numbers(svg_tree, tf)
self.assertEqual(line_numbers[0].id, 2)
self.assertEqual(len(line_numbers), 24)
self.assertEqual(line_numbers[0].top, 45.163)
def tearDown(self):
isdir(self.test_dir) and shutil.rmtree(self.test_dir)
isfile('{}/{}.xml'.format('xml', self.title.replace(' ', '_'))) and os.remove('{}/{}.xml'.format('xml', self.title.replace(' ', '_')))
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_transkription_position.py
===================================================================
--- tests_svgscripts/test_transkription_position.py (revision 72)
+++ tests_svgscripts/test_transkription_position.py (revision 73)
@@ -1,101 +1,102 @@
import unittest
from os import sep, path
from os.path import dirname, isdir, isfile
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.debug_message import DebugMessage
from datatypes.matrix import Matrix
from datatypes.page import Page
+from datatypes.page_creator import PageCreator
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkription_position import TranskriptionPosition
from datatypes.transkriptionField import TranskriptionField
from datatypes.word_position import WordPosition
class TestTranskriptionPosition(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.test_svg_file = DATADIR + sep + 'W_I_8_page125_web.svg'
self.test_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.dir = DATADIR
def test_init(self):
dmsg = DebugMessage(message='test')
word_position = TranskriptionPosition(id=1, height=10, width=10, x=0, y=10, debug_message=dmsg)
self.assertEqual(word_position.tag, WordPosition.TRANSKRIPTION)
self.assertEqual(word_position.id, '1')
self.assertEqual(word_position.debug_message.message, 'test')
self.assertEqual(word_position.height, 10)
self.assertEqual(word_position.top, 10)
self.assertEqual(word_position.bottom, 20)
self.assertEqual(word_position.left, 0)
self.assertEqual(word_position.isOnTranskription(), True)
self.assertEqual(word_position.isOnFaksimile(), False)
def test_attach_object_to_tree(self):
matrix = Matrix('matrix(0 0 0 0 0 0)')
dmsg = DebugMessage(message='test')
pwps = [ PositionalWordPart(text='test') ]
word_position = TranskriptionPosition(id=1, height=10, width=10, x=0, y=10, matrix=matrix, debug_message=dmsg, positional_word_parts=pwps)
empty_tree = ET.ElementTree(ET.Element('page'))
word_position.attach_object_to_tree(empty_tree)
#print(ET.dump(empty_tree.getroot()))
for node in empty_tree.getroot().xpath('//' + word_position.tag):
self.assertEqual(node.get('id'), '1')
self.assertEqual(node.get('bottom'), '20')
self.assertEqual(node.get('transform'), matrix.toString())
self.assertEqual(node.get('writing-process-id'), '-1')
word_position = TranskriptionPosition(node=empty_tree.getroot().find('.//' + word_position.tag))
self.assertEqual(word_position.height, 10)
self.assertEqual(word_position.debug_message is not None, True)
self.assertEqual(word_position.debug_message.message, 'test')
self.assertEqual(len(word_position.positional_word_parts), 1)
def test_CREATE_TRANSKRIPTION_POSITION_LIST(self):
- page = Page(xml_source_file=self.test_xml, svg_file=self.test_svg_file)
+ page = PageCreator(self.test_xml, svg_file=self.test_svg_file)
tf = TranskriptionField(page.svg_file)
word_part_objs = [{'text': 'es', 'class': 'st5 st6', 'x': 258.148, 'y': '8.5' }]
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, transkription_field=tf)
self.assertEqual(transkription_positions[0].top, 3.829)
self.assertEqual(transkription_positions[0].height, 5.672)
word_part_objs = [{'text': 'Meine', 'class': 'st5 st8', 'x': 8.504, 'y': 70.5 }]
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, transkription_field=tf)
self.assertEqual(transkription_positions[0].height, 9.125)
self.assertEqual(transkription_positions[0].top, 62.376)
self.assertEqual(transkription_positions[0].bottom, 71.501)
def test_CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(self):
- page = Page(xml_source_file=self.test_xml, svg_file=self.test_svg_file)
+ page = PageCreator(self.test_xml, svg_file=self.test_svg_file)
tf = TranskriptionField(page.svg_file)
word_part_objs = [{'text': 'Meine', 'class': 'st5 st8', 'x': 8.504, 'y': 70.5 }]
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, transkription_field=tf)
transkription_positions[0].positional_word_parts[2].transform = Matrix('rotate(20)')
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(transkription_positions[0].positional_word_parts)
self.assertEqual(len(transkription_positions), 3)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, transkription_field=tf)
transkription_positions[0].positional_word_parts[0].style_class = 'st5 st10'
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(transkription_positions[0].positional_word_parts)
self.assertEqual(len(transkription_positions), 2)
def test_get_semantic_dictionary(self):
dictionary = TranskriptionPosition.get_semantic_dictionary()
#print(dictionary)
#self.assertEqual(TranskriptionPosition.XML_TAG in dictionary['properties'].get('writing_process_id').get('xpath'), True)
def test_split(self):
- page = Page(xml_source_file=self.test_xml, svg_file=self.test_svg_file)
+ page = PageCreator(self.test_xml, svg_file=self.test_svg_file)
tf = TranskriptionField(page.svg_file)
word_part_objs = [{'text': 'Meine', 'class': 'st5 st8', 'x': 8.504, 'y': 70.5 }]
transkription_position = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, transkription_field=tf)[0]
tr_positions = transkription_position.split(transkription_position.left + transkription_position.width/2)
self.assertEqual(tr_positions[0] is not None, True)
self.assertEqual(tr_positions[1] is not None, True)
transkription_position = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, transkription_field=tf)[0]
tr_positions = transkription_position.split(transkription_position.left + transkription_position.width/2, transkription_position.left + transkription_position.width - 4)
self.assertEqual(len(tr_positions), 3)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_join_faksimileAndTranskription.py
===================================================================
--- tests_svgscripts/test_join_faksimileAndTranskription.py (revision 72)
+++ tests_svgscripts/test_join_faksimileAndTranskription.py (revision 73)
@@ -1,115 +1,115 @@
import unittest
from os import sep, path, remove
from os.path import isdir, isfile, dirname
import shutil
import sys
import lxml.etree as ET
import warnings
import sys
sys.path.append('svgscripts')
import join_faksimileAndTranskription
from datatypes.faksimile import FaksimilePage
from datatypes.page import Page
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkriptionField import TranskriptionField
from datatypes.word_position import WordPosition
class TestJoin(unittest.TestCase):
def setUp(self):
join_faksimileAndTranskription.UNITTESTING = True
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.faksimile_dir = DATADIR + sep + 'faksimile_svg'
self.manuscript = DATADIR + sep + 'N_VII_1.xml'
self.manuscript_copy = self.manuscript.replace('.', '_copy.')
self.faksimile_file = self.faksimile_dir + sep + 'N-VII-1,5et6.svg'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.Mp_XIV_1_mytest_421 = DATADIR + sep + 'Mp_XIV_1_mytest_421.xml'
def test_sort_words(self):
- page = Page(xml_source_file=self.Mp_XIV_1_mytest_421)
+ page = Page(self.Mp_XIV_1_mytest_421)
words_line7 = [ word for word in page.words if word.line_number == 7 ]
page.words = words_line7
sorted_words = join_faksimileAndTranskription.sort_words(page)
self.assertEqual(len(sorted_words), len(words_line7))
for index, word in enumerate(words_line7):
self.assertEqual(sorted_words[index], word)
def test_sort_faksimile_positions(self):
faksimile_tree = ET.parse(self.faksimile_file)
namespaces = { k if k is not None else 'ns': v for k, v in faksimile_tree.getroot().nsmap.items() }
faksimile_pages = FaksimilePage.GET_FAKSIMILEPAGES(faksimile_tree, namespaces=namespaces)
self.assertEqual(len(faksimile_pages), 2)
svg_pos_file, manuscript_file = join_faksimileAndTranskription.get_svgPosFile_and_manuscriptFile(faksimile_pages[0], manuscript_file=self.manuscript, redo_ok=True)
sorted_positions = join_faksimileAndTranskription.sort_faksimile_positions(faksimile_pages[0].word_positions)
- page = Page(xml_source_file=svg_pos_file)
+ page = Page(svg_pos_file)
for index in range(0, 10):
id = sorted_positions[index].id
if len(faksimile_tree.getroot().xpath('//ns:rect[@id="{0}"]/ns:title/text()|//ns:path[@id="{0}"]/ns:title/text()'\
.format(id), namespaces=namespaces)) > 0:
word_text = faksimile_tree.getroot().xpath('//ns:rect[@id="{0}"]/ns:title/text()|//ns:path[@id="{0}"]/ns:title/text()'\
.format(id), namespaces=namespaces)[0]
#print(sorted_positions[index].left, sorted_positions[index].top, word_text, page.words[index].text)
self.assertEqual(word_text, page.words[index].text)
def test_get_filelist_and_manuscript_file(self):
file_list, manuscript_file = join_faksimileAndTranskription.get_filelist_and_manuscript_file(self.faksimile_dir, self.manuscript)
self.assertEqual(len(file_list), 1)
self.assertEqual(file_list[0], self.faksimile_file)
self.assertEqual(manuscript_file, self.manuscript)
file_list, manuscript_file = join_faksimileAndTranskription.get_filelist_and_manuscript_file(self.manuscript, self.faksimile_file)
self.assertEqual(len(file_list), 1)
self.assertEqual(file_list[0], self.faksimile_file)
self.assertEqual(manuscript_file, self.manuscript)
@unittest.skipUnless(__name__ == "__main__", 'test uses path from within dir')
def test_get_svgPosFile_and_manuscriptFile(self):
faksimile_tree = ET.parse(self.faksimile_file)
faksimile_pages = FaksimilePage.GET_FAKSIMILEPAGES(faksimile_tree)
self.assertEqual(len(faksimile_pages), 2)
svg_pos_file, manuscript_file = join_faksimileAndTranskription.get_svgPosFile_and_manuscriptFile(faksimile_pages[0], manuscript_file=self.manuscript, redo_ok=True)
self.assertEqual(svg_pos_file, self.manuscript.replace('.', '_page00{}.'.format(faksimile_pages[0].page_number)))
self.assertEqual(manuscript_file, self.manuscript)
def test_join_faksimileAndTranskription(self):
self.assertEqual(join_faksimileAndTranskription.join_faksimileAndTranskription(self.faksimile_file, self.manuscript), 0)
#self.assertEqual(join_faksimileAndTranskription.join_faksimileAndTranskription(self.faksimile_file, self.manuscript, test_word_text='gar'), 0)
@unittest.skip('function update_writing_process is deprecated')
def testupdate_writing_process(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
word = page.words[12]
self.assertEqual(len(word.faksimile_positions), 1)
self.assertEqual(word.faksimile_positions[0].writing_process_id, -1)
join_faksimileAndTranskription.update_writing_process(word)
self.assertEqual(word.faksimile_positions[0].writing_process_id, 0)
#@unittest.skipUnless(__name__ == "__main__", 'test takes too long, we do not run it with unittest discover')
@unittest.skip('test takes too long, has been tested')
def test_fix_errors(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
word_position = WordPosition(id='rect945', text='Lenken')
exit_status = join_faksimileAndTranskription.fix_errors(self.faksimile_file, [ word_position], [page.words[12]], xml_source_file=self.xml_file, manuscript_file=self.manuscript )
self.assertEqual(exit_status, 0)
@unittest.skip('tested with local file')
def test_join_single_chars(self):
- page = Page(xml_source_file='xml/N_VII_1_page016.xml')
+ page = Page('xml/N_VII_1_page016.xml')
words = join_faksimileAndTranskription.sort_words(page)
join_faksimileAndTranskription.join_single_char_words(words)
new_words = [ word for word in words if word.text == 'selber' ]
self.assertEqual(len(new_words), 1)
new_words = [ word for word in words if word.text == 's' ]
self.assertEqual(len(new_words), 0)
def test_get_mismatching_ids(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
word_position = WordPosition(id='rect945', text='Lenken')
mwords, mfps = join_faksimileAndTranskription.get_mismatching_ids([ page.words[12]], [ word_position ])
self.assertEqual(mwords[0].text, 'Denken')
self.assertEqual(mfps[0].text, 'Lenken')
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_faksimile.py
===================================================================
--- tests_svgscripts/test_faksimile.py (revision 72)
+++ tests_svgscripts/test_faksimile.py (revision 73)
@@ -1,69 +1,68 @@
import unittest
from os import sep, path
from os.path import isdir, dirname, basename
import lxml.etree as ET
import sys
import sys
sys.path.append('svgscripts')
from datatypes.faksimile import FaksimilePage, get_paths_inside_rect
from datatypes.faksimile_image import FaksimileImage
from datatypes.text_field import TextField
class TestFaksimilePage(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.svg_file = DATADIR + sep + 'W-II-1,49et50.svg'
self.svg_testmatrix = DATADIR + sep + 'TESTMATRIX_1.svg'
self.faksimile_dir = DATADIR + sep + 'faksimile_svg'
self.faksimile_file = self.faksimile_dir + sep + 'N-VII-1,5et6.svg'
def test_init(self):
image = FaksimileImage(file_name='test.jpg', height=10, width=10)
text_field = TextField(width=10, height=10, x=10, y=10)
faksimile = FaksimilePage(title='test', page_number=1, faksimile_image=image, text_field=text_field)
self.assertEqual(faksimile.page_tree.getroot().get('title'), 'test')
self.assertEqual(faksimile.page_tree.getroot().get('page-number'), '1')
- self.assertEqual(faksimile.tag, 'faksimile-page')
self.assertEqual(faksimile.faksimile_image.width, 10)
self.assertEqual(faksimile.text_field.width, 10)
def test_GET_TEXTFIELDS(self):
svg_tree = ET.parse(self.svg_file)
pages = FaksimilePage.GET_FAKSIMILEPAGES(svg_tree)
self.assertEqual(len(pages), 2)
text_field = pages[0].text_field
self.assertEqual(text_field.width, 663.333)
result_dir = '.{}xml{}'.format(sep, sep) if isdir('xml') else ''
self.assertEqual(pages[0].xml_file, result_dir + 'W-II-1_49.xml')
self.assertEqual(pages[0].title, 'W II 1')
self.assertEqual(pages[0].page_number, '49')
pages = FaksimilePage.GET_FAKSIMILEPAGES(svg_tree, page_number='49')
self.assertEqual(len(pages), 1)
svg_tree = ET.parse(self.svg_testmatrix)
pages = FaksimilePage.GET_FAKSIMILEPAGES(svg_tree)
self.assertEqual(len(pages), 1)
self.assertEqual(len(pages[0].word_positions), 1)
self.assertEqual(pages[0].word_positions[0].transform.toCSSTransformString(), 'rotate(45deg)')
svg_tree = ET.parse(self.faksimile_file)
pages = FaksimilePage.GET_FAKSIMILEPAGES(svg_tree)
self.assertEqual(len(pages), 2)
textfield_id = pages[1].title.replace(' ', '-') + '_' + pages[1].page_number
#print([ position.id for position in pages[0].word_positions])
self.assertEqual(textfield_id not in [ position.id for position in pages[0].word_positions ], True)
self.assertEqual('path1237' in [ position.id for position in pages[0].word_positions ], True)
self.assertEqual('Vorgangs' in [ position.text for position in pages[0].word_positions ], False)
def test_get_paths_inside_rect(self):
svg_tree = ET.parse(self.faksimile_file)
paths = get_paths_inside_rect(svg_tree, '//ns:path', 360, 786, 92, 765, 'N-VII-1_5')
self.assertEqual(len(paths), 1)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_fix_missing_glyphs.py
===================================================================
--- tests_svgscripts/test_fix_missing_glyphs.py (revision 72)
+++ tests_svgscripts/test_fix_missing_glyphs.py (revision 73)
@@ -1,79 +1,79 @@
import unittest
from os import sep, path, remove
from os.path import isdir, isfile, dirname
import shutil
import sys
import lxml.etree as ET
import warnings
import sys
sys.path.append('svgscripts')
import fix_missing_glyphs
from datatypes.page import Page
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkriptionField import TranskriptionField
class TestMissingGlyphs(unittest.TestCase):
def setUp(self):
fix_missing_glyphs.UNITTESTING = True
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.manuscript = DATADIR + sep + 'pdfsvg' + sep + 'W_II_1.xml'
self.manuscript_copy = self.manuscript.replace('.', '_copy.')
self.svgposfile = DATADIR + sep + 'pdfsvg' + sep + 'W_II_1_page015.xml'
self.svgposfile_copy = DATADIR + sep + 'pdfsvg' + sep + 'W_II_1_page015_copy.xml'
def test_main(self):
argv_fileNotFound = [ 'asdf' ]
with self.assertRaises(FileNotFoundError):
fix_missing_glyphs.main(argv_fileNotFound)
#shutil.copy(self.manuscript, self.manuscript_copy)
#shutil.copy(self.svgposfile, self.svgposfile_copy)
#self.assertEqual(fix_missing_glyphs.main([self.manuscript_copy]), 0)
#shutil.copy(self.svgposfile_copy, self.svgposfile)
#remove(self.manuscript_copy)
shutil.copy(self.svgposfile, self.svgposfile_copy)
self.assertEqual(fix_missing_glyphs.main([self.svgposfile_copy]), 0)
remove(self.svgposfile_copy)
def test_update_word(self):
- page = Page(xml_source_file=self.svgposfile)
+ page = Page(self.svgposfile)
pwps = page.words[5].transkription_positions[0].positional_word_parts
new_left = 10
old_left = pwps[0].left
new_width = pwps[0].width + old_left - new_left
pwps[0].left = new_left
pwps[0].width = new_width
pwps[0].text = 'X' + pwps[0].text
original_text = page.words[5].text
pwp_node = page.page_tree.xpath('//word[@id="5"]/transkription-position[@id="0"]/' + PositionalWordPart.XML_TAG + '[@id="0"]')[0]
#print(ET.dump(pwp_node))
fix_missing_glyphs.update_word(page, pwp_node, [ pwps[0] ])
pwp_node = page.page_tree.xpath('//word[@id="5"]/transkription-position[@id="0"]/' + PositionalWordPart.XML_TAG + '[@id="0"]')[0]
#print(ET.dump(pwp_node.getparent().getparent()))
self.assertEqual(float(pwp_node.get('width')), new_width)
self.assertEqual(pwp_node.getparent().getparent().get('text'), 'X' + original_text)
def test_find_missing_glyph_for_pwp(self):
- page = Page(xml_source_file=self.svgposfile)
+ page = Page(self.svgposfile)
transkription_field = TranskriptionField(page.svg_file)
svg_path_tree = ET.parse(page.svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_path_tree.getroot().nsmap.items() }
positional_word_part_node = page.page_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]')[0]\
if len(page.page_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]')) > 0 else None
pwps = fix_missing_glyphs.find_missing_glyph_for_pwp(positional_word_part_node, svg_path_tree, namespaces, xmin=transkription_field.xmin, ymin=transkription_field.ymin)
self.assertEqual(len(pwps), 2)
def test_get_filelist_and_manuscript_file(self):
file_list, manuscript_file = fix_missing_glyphs.get_filelist_and_manuscript_file(self.manuscript, self.svgposfile)
self.assertEqual(len(file_list), 1)
self.assertEqual(file_list[0], self.svgposfile)
self.assertEqual(manuscript_file, self.manuscript)
file_list, manuscript_file = fix_missing_glyphs.get_filelist_and_manuscript_file(self.svgposfile, self.manuscript)
self.assertEqual(len(file_list), 1)
self.assertEqual(file_list[0], self.svgposfile)
self.assertEqual(manuscript_file, self.manuscript)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_process_words_post_merging.py
===================================================================
--- tests_svgscripts/test_process_words_post_merging.py (revision 72)
+++ tests_svgscripts/test_process_words_post_merging.py (revision 73)
@@ -1,95 +1,95 @@
import unittest
from os import sep, path, remove
from os.path import isdir, isfile, dirname
import shutil
import sys
import lxml.etree as ET
import warnings
import sys
sys.path.append('svgscripts')
import process_words_post_merging
from datatypes.faksimile import FaksimilePage
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page
from datatypes.path import Path
from datatypes.positional_word_part import PositionalWordPart
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.word_position import WordPosition
class TestPostMerge(unittest.TestCase):
def setUp(self):
process_words_post_merging.UNITTESTING = True
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.faksimile_dir = DATADIR + sep + 'faksimile_svg'
self.manuscript = DATADIR + sep + 'N_VII_1.xml'
self.manuscript_copy = self.manuscript.replace('.', '_copy.')
self.faksimile_file = self.faksimile_dir + sep + 'N-VII-1,5et6.svg'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.Mp_XIV_1_mytest_421 = DATADIR + sep + 'Mp_XIV_1_mytest_421.xml'
self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
def test_main(self):
process_words_post_merging.main([self.manuscript])
def test_categorize_paths(self):
- page = Page(xml_source_file=self.pdf_xml)
+ page = Page(self.pdf_xml)
page.source = self.pdf_xml_source
tr = TranskriptionField(page.source)
page.words = [ word for word in page.words if word.line_number == 33 ]
path_dict = process_words_post_merging.categorize_paths(page, tr)
self.assertEqual(True in [ word.deleted for word in page.words if word.id == 269 ], False)
self.assertEqual(len(path_dict.get('deletion_or_underline_paths')) > 0, True)
self.assertEqual(len(path_dict.get('box_paths')), 5)
words = [ word for word in page.words if len(word.box_paths) > 0 ]
self.assertEqual(len(words), 1)
self.assertEqual(words[0].word_parts[0].earlier_version is not None, True)
self.assertEqual(words[0].word_parts[0].earlier_version.text, ')')
def test_find_special_words(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
process_words_post_merging.find_special_words(page)
self.assertEqual(len(page.mark_foreign_hands), 1)
self.assertEqual(page.mark_foreign_hands[0].foreign_hands_text, 'x')
page.update_and_attach_words2tree()
nodes = page.page_tree.xpath('//' + MarkForeignHands.XML_TAG)
- page = Page(xml_source_file=self.test_tcm_xml)
+ page = Page(self.test_tcm_xml)
process_words_post_merging.find_special_words(page)
self.assertEqual(len(page.text_connection_marks), 1)
self.assertEqual(page.text_connection_marks[0].text_source.first_line, 2)
"""
page.update_and_attach_words2tree()
nodes = page.page_tree.xpath('//' + TextConnectionMark.XML_TAG)
print(ET.dump(nodes[0]))
"""
def test_process_word_boxes(self):
- page = Page(xml_source_file=self.pdf_xml)
+ page = Page(self.pdf_xml)
page.source = self.pdf_xml_source
for word in page.words:
word.partition_according_to_writing_process_id()
tr = TranskriptionField(page.source)
box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\
'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\
'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\
'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\
'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315']
box_paths = [ Path(d_string=d_string) for d_string in box_path_d ]
process_words_post_merging.process_word_boxes(page, box_paths, tr)
words_with_boxes = [ word for word in page.words if len(word.box_paths) > 0 ]
self.assertEqual(len(words_with_boxes), 5)
def test_update_writing_process_ids(self):
- page = Page(xml_source_file=self.pdf_xml)
+ page = Page(self.pdf_xml)
page.words = [ word for word in page.words if word.text == 'Aber' and word.line_number == 2 ]
process_words_post_merging.update_writing_process_ids(page)
self.assertEqual(len(page.words[0].word_parts), 2)
self.assertEqual(page.words[0].word_parts[0].writing_process_id, 1)
self.assertEqual(page.words[0].word_parts[1].writing_process_id, 0)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_pdf.py
===================================================================
--- tests_svgscripts/test_pdf.py (revision 72)
+++ tests_svgscripts/test_pdf.py (revision 73)
@@ -1,132 +1,133 @@
import unittest
from os import sep, path
from os.path import isdir, dirname, basename
import lxml.etree as ET
import sys
import re
import sys
sys.path.append('svgscripts')
from datatypes.pdf import PDFText
from datatypes.page import Page
+from datatypes.page_creator import PageCreator
from datatypes.transkriptionField import TranskriptionField
from datatypes.word import Word
from extractWordPosition import Extractor
class TestPDFText(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.pdf_file = DATADIR + sep + 'Mp_XIV_1_online_420.pdf'
self.pdf_fileB = DATADIR + sep + 'W_I_8_page125.pdf'
self.xml420 = DATADIR + sep + 'Mp_XIV_1_page420.xml'
self.xml420_source = DATADIR + sep + 'Mp_XIV_1_online_420.svg'
self.pdf420 = DATADIR + sep + 'Mp_XIV_1_online_420.pdf'
self.faulty_xml = DATADIR + sep + 'W_I_8_faulty_page125.xml'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.pdf_source = DATADIR + sep + "W_I_8_neu_125-01.svg"
self.dir = DATADIR
def test_init(self):
pdftext = PDFText(self.pdf_file)
self.assertEqual(len(pdftext.text_tree.xpath('.//text')), 102)
self.assertEqual(len(pdftext.text_tree.xpath('.//text[@id="{0}"]'.format(101))), 1)
with self.assertRaises(Exception):
PDFText(self.pdf_file, current_page_number=1)
def test_tree_contains_text_at(self):
x = 146.1
y = 81
pdftext = PDFText(self.pdf_file)
self.assertEqual(pdftext.tree_contains_text_at('nicht', x, y), True)
def test_tree_contains_text(self):
pdftext = PDFText(self.pdf_fileB)
self.assertEqual(pdftext.tree_contains_text('richtiger(richtiger'), False)
self.assertEqual(pdftext.tree_contains_text('2ter'), True)
self.assertEqual(pdftext.tree_contains_text_at('$', 320, 183), True)
def test_split_str_according_to_pdf_tree(self):
pdftext = PDFText(self.pdf_fileB)
self.assertEqual(pdftext.split_str_according_to_pdf_tree('.Insofern'), 'Insofern')
self.assertEqual(pdftext.split_str_according_to_pdf_tree('sticht('), 'sticht')
self.assertEqual(pdftext.split_str_according_to_pdf_tree('.sticht('), 'sticht')
def test_split_wrongly_concatenated_words(self):
- page = Page(xml_source_file=self.faulty_xml)
+ page = Page(self.faulty_xml)
self.assertEqual('wünschtheißt.' in [ item.text for item in page.words ], True)
self.assertEqual(len(page.words), 1)
pdftext = PDFText(self.pdf_fileB, sonderzeichen=Extractor.SONDERZEICHEN_LIST)
page.words = pdftext.split_wrongly_concatenated_words(page)
self.assertEqual('wünschtheißt.' in [ item.text for item in page.words ], False)
self.assertEqual(len(page.words), 2)
@unittest.skip("have to fix PDFText.add_punctuation2words")
def test_add_punctuation2words(self):
- page = Page(xml_source_file=self.pdf_xml)
+ page = Page(self.pdf_xml)
tr = TranskriptionField(self.pdf_source)
pat = r'^[-.=,:;?]$'
punctuations = [ word for word in page.words if re.match(pat, word.text) ]
self.assertEqual(len(punctuations), 5)
self.assertEqual(len(page.words), 430)
pdftext = PDFText(self.pdf_fileB, sonderzeichen=Extractor.SONDERZEICHEN_LIST)
pdftext.add_punctuation2words(page, transkription_field=tr)
punctuations = [ word for word in page.words if re.match(pat, word.text) ]
self.assertEqual(len(punctuations), 1)
self.assertEqual(len(page.words), 426)
@unittest.skip("have to fix PDFText.join_composita")
def test_add_composita(self):
- page = Page(xml_source_file=self.pdf_xml)
+ page = Page(self.pdf_xml)
tr = TranskriptionField(self.pdf_source)
pat = r'^[=-]\s*[A-Z]'
composita_part = [ word for word in page.words if re.match(pat, word.text) ]
self.assertEqual(len(composita_part), 1)
pdftext = PDFText(self.pdf_fileB, sonderzeichen=Extractor.SONDERZEICHEN_LIST)
pdftext.join_composita(page, transkription_field=tr)
composita_part = [ word for word in page.words if re.match(pat, word.text) ]
self.assertEqual(len(composita_part), 0)
self.assertEqual(len(page.words), 429)
@unittest.skip("have to fix PDFText.join_single_char_words")
def test_join_single_char_words(self):
pat = r'^\w$'
"""
- page = Page(xml_source_file=self.xml420, pdfFile=self.pdf420)
+ page = PageCreator(self.xml420, pdfFile=self.pdf420)
tr = TranskriptionField(page.source) if page.source is not None else None
page.words[:] = [ word for word in page.words if word.line_number == 13 ]
singles = [ word for word in page.words if re.match(pat, word.text) ]
#print(['{}/{}: {}'.format(word.line_number, word.id, word.text) for word in singles])
self.assertEqual(len(singles), 8)
pdftext = PDFText(page.pdfFile, sonderzeichen=Extractor.SONDERZEICHEN_LIST)
pdftext.join_single_char_words(page, transkription_field=tr)
singles = [ word for word in page.words if re.match(pat, word.text) ]
#print(['----->{}/{}: {}'.format(word.line_number, word.id, word.text) for word in singles])
self.assertEqual(len(singles), 0)
"""
- page = Page(xml_source_file=self.pdf_xml, pdfFile=self.pdf_fileB)
+ page = PageCreator(self.pdf_xml, pdfFile=self.pdf_fileB)
page.words[:] = [ word for word in page.words if word.line_number == 19 ]
tr = TranskriptionField(self.dir + sep + page.source) if page.source is not None else None
singles = [ word for word in page.words if re.match(pat, word.text) ]
self.assertEqual(len(singles), 26)
pdftext = PDFText(self.pdf_fileB, sonderzeichen=Extractor.SONDERZEICHEN_LIST)
pdftext.join_single_char_words(page, transkription_field=tr)
singles = [ word for word in page.words if re.match(pat, word.text) ]
self.assertEqual(len(singles), 0)
self.assertEqual(':' in [word.text for word in page.words], True)
@unittest.skip("have to fix PDFText.find_word_path")
def test_find_word_path(self):
- page = Page(xml_source_file=self.pdf_xml, pdfFile=self.pdf_fileB)
+ page = PageCreator(self.pdf_xml, pdfFile=self.pdf_fileB)
full_line19 = [ word for word in page.words if word.line_number == 19 ]
pdftext = PDFText(self.pdf_fileB, sonderzeichen=Extractor.SONDERZEICHEN_LIST)
words_on_path = pdftext.find_word_path(full_line19)
self.assertEqual(len(words_on_path), len([':', 'aber', 'schon', 'in', 'der', 'Gebur', 't', 'd', 'e', 'r', 'T', 'r', 'a', 'g', 'ö', 'd', 'i', 'e', 'u', '.', 'i', 'h', 'r', 'e', 'r', 'L', 'e', 'h', 'r', 'e', 'v', 'o', 'm', 'Dionys.', 'ist', 'der', 'Schop.', 'Pessimismus', 'überwunden.']))
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_word.py
===================================================================
--- tests_svgscripts/test_word.py (revision 72)
+++ tests_svgscripts/test_word.py (revision 73)
@@ -1,244 +1,244 @@
import unittest
from os import sep, path
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.box import Box
from datatypes.matrix import Matrix
import datatypes.page
from datatypes.path import Path
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.word import Word, execute_function_on_parts
from datatypes.word_position import WordPosition
class Page:
def __init__(self):
self.svg_file = None
def get_line_number(self, input=0):
return -1
def get_biggest_fontSize4styles(self, style_set={}):
return 7
class TestWord(unittest.TestCase):
def setUp(self):
DATADIR = path.dirname(__file__) + sep + 'test_data'
self.test_file = DATADIR + sep + 'N_VII_1_page009.xml'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.word_part_objs = [{'text': 'a' }, {'text': 'b' }, {'text': 'c' }]
x = 0
for dict in self.word_part_objs:
dict['class'] = 'st22'
dict['x'] = x
dict['y'] = 11
x += 1
mylist = {'text': 'abc', 'id': '0', 'line-number': '2', 'deleted': 'true' }
word_position = TranskriptionPosition(x=0, y=1, height=10, width=10, matrix=Matrix('matrix(0.94 0.342 -0.342 0.94 0 0)'))
self.transkription_positions = [ word_position ]
self.word_node = ET.Element('word', attrib=mylist)
word_position.attach_object_to_tree(self.word_node)
x = 0
for char in mylist['text']:
ET.SubElement(self.word_node, 'part', attrib={'text': char, 'x': str(x), 'y': '11', 'class': 'st22' })
x += 1
def test_Word_with_word_part_objs(self):
word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
self.assertEqual(word.id, 0)
self.assertEqual(word.transkription_positions[0].bottom, 13)
self.assertEqual(word.transkription_positions[0].height, 10)
self.assertEqual(word.transkription_positions[0].top, 3)
self.assertEqual(word.transkription_positions[0].left, 0)
self.assertEqual(word.transkription_positions[0].width, 10)
self.assertEqual(word.text, 'abc')
def test_Word_with_word_node(self):
word = Word.create_cls(self.word_node)
self.assertEqual(word.id, 0)
self.assertEqual(word.deleted, True)
self.assertEqual(word.transkription_positions[0].bottom, 11)
self.assertEqual(word.transkription_positions[0].height, 10)
self.assertEqual(word.transkription_positions[0].top, 1)
self.assertEqual(word.transkription_positions[0].left, 0)
self.assertEqual(word.transkription_positions[0].width, 10)
self.assertEqual(word.text, 'abc')
self.assertEqual(word.line_number, 2)
self.assertEqual(word.transkription_positions[0].transform.isRotationMatrix(), True)
def test_attach_word_to_tree(self):
newWord = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
empty_tree = ET.ElementTree(ET.Element('page'))
newWord.attach_word_to_tree(empty_tree)
for word_node in empty_tree.getroot().xpath('//word'):
word = Word.CREATE_WORD(word_node=word_node)
self.assertEqual(word.id, 0)
self.assertEqual(word.deleted, False)
self.assertEqual(word.transkription_positions[0].bottom, 13)
self.assertEqual(word.transkription_positions[0].height, 10)
self.assertEqual(word.transkription_positions[0].top, 3)
self.assertEqual(word.transkription_positions[0].left, 0)
self.assertEqual(word.transkription_positions[0].width, 10)
self.assertEqual(word.text, 'abc')
def test_split(self):
page = Page()
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, self.word_part_objs)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
previousWord, currentWord, nextWord = word.split('b')
self.assertEqual(previousWord.id, 0)
self.assertEqual(previousWord.text, 'a')
self.assertEqual(currentWord.id, 1)
self.assertEqual(nextWord.id, 2)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
previousWord, currentWord, nextWord = word.split('bc')
self.assertEqual(previousWord.id, 0)
self.assertEqual(previousWord.text, 'a')
self.assertEqual(currentWord.id, 1)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
previousWord, currentWord, nextWord = word.split('ab', start_id=10)
self.assertEqual(currentWord.id, 10)
self.assertEqual(currentWord.text, 'ab')
self.assertEqual(currentWord.transkription_positions[0].width, 2.1)
self.assertEqual(nextWord.id, 11)
self.assertEqual(nextWord.transkription_positions[0].width, 5.2)
word_part_objs=[{'text': 'x', 'class':'st22', 'x': 0, 'y': 0},\
{'text': 'Insofern', 'class':'st22', 'x': 1, 'y': 0},\
{'text': 'x', 'class':'st22', 'x': 10, 'y': 0}]
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
with self.assertWarns(Warning):
previousWord, currentWord, nextWord = word.split('Insofer')
word_part_objs=[{'text': 'xInsofern', 'class':'st22', 'x': 0, 'y': 0}]
pwps = PositionalWordPart.CREATE_SIMPLE_POSITIONAL_WORD_PART_LIST(page, word_part_objs)
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(pwps)
word = Word(text=''.join([pwp.text for pwp in pwps]), transkription_positions=transkription_positions)
with self.assertWarns(Warning):
previousWord, currentWord, nextWord = word.split('Insofern')
def test_join(self):
word = Word.CREATE_WORD(word_part_objs=self.word_part_objs, height=10, endX=10)
other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}])
word.join(other_word)
self.assertEqual(word.text, 'abc.')
other_word = Word.CREATE_WORD(word_part_objs=[{'text': '.', 'class':'st22', 'x': 3, 'y': 11}])
word.join(other_word, append_at_end_of_new_word=False)
self.assertEqual(word.text, '.abc.')
"""
tree = ET.ElementTree(ET.Element('page'))
word.attach_word_to_tree(tree)
print(ET.dump(tree.getroot()))
"""
def test_get_semanticAndDataDict(self):
dictionary = Word.get_semantic_dictionary()
#print(dictionary)
def test_simplify_transkription_positions(self):
node_string = """ """
nodeA = ET.fromstring(node_string)
node_string = """
"""
nodeB = ET.fromstring(node_string)
word = Word(text="Si", transkription_positions=[ TranskriptionPosition(node=nodeA), TranskriptionPosition(node=nodeB) ])
self.assertEqual(len(word.transkription_positions), 2)
word.simplify_transkription_positions()
self.assertEqual(len(word.transkription_positions), 1)
"""
tree = ET.ElementTree(ET.Element('page'))
word.attach_word_to_tree(tree)
print(ET.dump(tree.getroot()))
"""
def test_partition(self):
- page = datatypes.page.Page(xml_source_file=self.test_file)
+ page = datatypes.page.Page(self.test_file)
word = page.words[67]
self.assertEqual(word.belongs_to_multiple_writing_processes(), True)
word.partition_according_to_writing_process_id()
self.assertEqual(len(word.word_parts), 3)
self.assertEqual(word.belongs_to_multiple_writing_processes(), False)
self.assertEqual(word.belongs_to_multiple_writing_processes(include_parts=True), True)
empty_tree = ET.ElementTree(ET.Element('page'))
word_node = word.attach_word_to_tree(empty_tree)
newWord = Word.create_cls(word_node)
self.assertEqual(len(newWord.word_parts), 3)
#print(ET.dump(empty_tree.getroot()))
def test_partition_deletion(self):
- page = datatypes.page.Page(xml_source_file=self.test_file)
+ page = datatypes.page.Page(self.test_file)
word = page.words[67]
for transkription_position in word.transkription_positions:
transkription_position.deleted = transkription_position.writing_process_id == 1
self.assertEqual(word.has_mixed_status('deleted'), True)
word.partition_according_to_deletion()
self.assertEqual(len(word.word_parts), 3)
self.assertEqual(word.has_mixed_status('deleted'), False)
self.assertEqual(word.has_mixed_status('deleted', include_parts=True), True)
- page = datatypes.page.Page(xml_source_file=self.test_file)
+ page = datatypes.page.Page(self.test_file)
word = page.words[67]
word.partition_according_to_writing_process_id()
#print([(word.text, word.deleted) for word in word.word_parts])
word.word_parts[1].transkription_positions[1].deleted = True
word.partition_according_to_deletion()
self.assertEqual(len(word.word_parts), 4)
#print([(word.text, word.deleted) for word in word.word_parts])
def test_execute_function_on_parts(self):
- page = datatypes.page.Page(xml_source_file=self.test_file)
+ page = datatypes.page.Page(self.test_file)
word_parts = [ page.words[67], page.words[68] ]
word_parts, none = execute_function_on_parts(word_parts, 'partition_according_to_writing_process_id')
self.assertEqual(len(word_parts) == 4, True)
def test_process_word_boxes(self):
- page = datatypes.page.Page(xml_source_file=self.pdf_xml)
+ page = datatypes.page.Page(self.pdf_xml)
page.source = self.pdf_xml_source
for word in page.words:
word.set_writing_process_id_to_transkription_positions(page)
word.partition_according_to_writing_process_id()
tr = TranskriptionField(page.source)
box_path_d = ['M 598.11,626.565 L 603.557,626.565 L 603.557,632.565 L 598.11,632.565 L 598.11,626.565',\
'M 557.443,683.44 L 574.182,683.44 L 574.182,694.815 L 557.443,694.815 L 557.443,683.44',\
'M 404.193,659.565 L 407.80699999999996,659.565 L 407.80699999999996,668.94 L 404.193,668.94 L 404.193,659.565',\
'M 587.932,634.065 L 598.318,634.065 L 598.318,643.19 L 587.932,643.19 L 587.932,634.065',\
'M 570.443,221.315 L 576.557,221.315 L 576.557,230.065 L 570.443,230.065 L 570.443,221.315']
box_paths = [ Box(d_string=d_string, earlier_text='test') for d_string in box_path_d ]
indices = [ 30, 276, 287, 295, 319 ]
empty_tree = ET.ElementTree(ET.Element('page'))
for word_id, index in enumerate(indices):
later_word = page.words[index].process_boxes(box_paths, tr_xmin=tr.xmin, tr_ymin=tr.ymin)
#print(later_word.text)
self.assertEqual(later_word.earlier_version is not None, True)
later_word.id = word_id
later_word.attach_word_to_tree(empty_tree)
#print(ET.dump(empty_tree.getroot()))
for word_node in empty_tree.getroot().xpath('./word'):
word = Word.create_cls(word_node)
#print(word.text)
self.assertEqual(word.earlier_version is not None, True)
def test_split_according_to_status(self):
- page = datatypes.page.Page(xml_source_file=self.test_file)
+ page = datatypes.page.Page(self.test_file)
word = page.words[67]
for transkription_position in word.transkription_positions:
transkription_position.text = 'asdf'\
if transkription_position.writing_process_id == 1\
else word.text
self.assertEqual(word.has_mixed_status('text'), True)
new_words = word.split_according_to_status('text')
self.assertEqual(len(new_words) > 1, True)
self.assertEqual(new_words[0].id, word.id)
self.assertEqual(new_words[0].deleted, word.deleted)
self.assertEqual(new_words[1].id, word.id+1)
#print([ word.text for word in new_words ])
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_page.py
===================================================================
--- tests_svgscripts/test_page.py (revision 72)
+++ tests_svgscripts/test_page.py (revision 73)
@@ -1,140 +1,120 @@
import unittest
from os import sep, path
from os.path import isdir, isfile, dirname, basename
import lxml.etree as ET
import sys
import sys
sys.path.append('svgscripts')
dir_changed = False
if not isdir('datatypes'):
sys.path.append(dirname(sys.path[0]))
dir_changed = True
from datatypes.lineNumber import LineNumber
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.path import Path
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from datatypes.writing_process import WritingProcess
from datatypes.word import Word
class TestPage(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
if not isdir(DATADIR):
DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
self.test_file = DATADIR + sep + 'test.xml'
self.test_svg_file = DATADIR + sep + 'test421.svg'
self.pdf_xml = DATADIR + sep + 'W_I_8_page125.xml'
self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
self.xml_fileB = DATADIR + sep + 'N_VII_1_page006.xml'
self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml'
self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
def test_Page(self):
- page = Page(xml_source_file=self.test_file, svg_file=self.test_svg_file)
+ page = Page(self.test_file)
self.assertEqual(page.title, 'Mp XIV 1')
self.assertEqual(page.number, '421')
self.assertEqual(len(page.sonderzeichen_list), 2)
self.assertEqual('st21' in page.sonderzeichen_list, True)
self.assertEqual('st23' in page.sonderzeichen_list, True)
self.assertEqual(page.style_dict['st0']['fill'], '#F8F9F8')
- self.assertEqual(page.width, 493.23)
stage0 = [ key for key, value in page.fontsizekey2stage_mapping.items() if value == 0 ]
stage1 = [ key for key, value in page.fontsizekey2stage_mapping.items() if value == 1 ]
stage2 = [ key for key, value in page.fontsizekey2stage_mapping.items() if value == 2 ]
fontStage0 = float(page.style_dict.get(stage0[0]).get('font-size').replace('px',''))
fontStage1 = float(page.style_dict.get(stage1[0]).get('font-size').replace('px',''))
fontStage2 = float(page.style_dict.get(stage2[0]).get('font-size').replace('px',''))
self.assertEqual(fontStage0 > fontStage1, True)
self.assertEqual(fontStage1 > fontStage2, True)
def test_get_biggest_fontSize4styles(self):
- page = Page(xml_source_file=self.test_file)
+ page = Page(self.test_file)
style_set = { 'st12', 'st2', 'st14', 'st13' }
self.assertEqual(page.get_biggest_fontSize4styles(style_set=style_set), 10)
def test_get_words(self):
- page = Page(xml_source_file=self.test_file)
+ page = Page(self.test_file)
words = page.words
self.assertEqual(len(words), 440)
self.assertEqual(words[0].text, '$')
self.assertEqual(words[439].text, 'mußte!')
- def test_create_writing_process(self):
- page = Page(xml_source_file=self.test_file)
- page.create_writing_processes_and_attach2tree()
- #self.assertEqual(page.words[97].transkription_positions[0].writing_process_id, WritingProcess.LATER_INSERTION_AND_ADDITION)
- #self.assertEqual(page.words[129].transkription_positions[0].writing_process_id, WritingProcess.LATER_INSERTION_AND_ADDITION)
-
- def test_init_line_numbers(self):
- page = Page(xml_source_file=self.test_file)
- line_numbers = [ LineNumber(id=2, top=20, bottom=40), LineNumber(id=4, top=50, bottom=60), LineNumber(id=6, top=70, bottom=90) ]
- page.init_line_numbers(line_numbers, 122.345)
- self.assertEqual(len(page.line_numbers), 7)
- self.assertEqual(page.line_numbers[0].id, 1)
- self.assertEqual(page.line_numbers[6].id, 7)
- self.assertEqual(page.line_numbers[6].top, 91)
- self.assertEqual(page.line_numbers[6].bottom, 122.345)
- self.assertEqual(page.get_line_number(122), 7)
- self.assertEqual(page.get_line_number(92), 7)
- self.assertEqual(page.get_line_number(22), 2)
-
def test_get_line_number(self):
- page = Page(xml_source_file=self.test_file)
+ page = Page(self.test_file)
self.assertEqual(page.get_line_number( (page.words[0].transkription_positions[0].bottom+page.words[0].transkription_positions[0].top)/2), 1)
self.assertEqual(page.get_line_number( (page.words[27].transkription_positions[0].bottom+page.words[27].transkription_positions[0].top)/2), 2)
self.assertEqual(page.get_line_number( (page.words[105].transkription_positions[0].bottom+page.words[105].transkription_positions[0].top)/2), 7)
def test_update_page_type(self):
- page = Page(xml_source_file=self.pdf_xml)
+ page = Page(self.pdf_xml)
tf = TranskriptionField(self.pdf_xml_source)
page.update_page_type(transkription_field=tf)
self.assertEqual(page.page_type, Page.PAGE_VERSO)
- #page = Page(xml_source_file=self.xml_fileB)
+ #page = Page(self.xml_fileB)
#page.update_page_type()
#self.assertEqual(page.page_type, Page.PAGE_RECTO)
def test_update_line_number_area(self):
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
transkription_field = TranskriptionField(page.source)
page.update_line_number_area(transkription_field)
self.assertEqual(transkription_field.line_number_area_width > 0, True)
self.assertEqual(transkription_field.line_number_area_width < 15, True)
- page = Page(xml_source_file=self.xml_fileB)
+ page = Page(self.xml_fileB)
transkription_field = TranskriptionField(page.source)
page.update_line_number_area(transkription_field)
self.assertEqual(transkription_field.line_number_area_width > 0, True)
self.assertEqual(transkription_field.line_number_area_width < 15, True)
def test_get_pages_from_xml_file(self):
pages = Page.get_pages_from_xml_file(self.test_manuscript)
self.assertEqual(len(pages), 3)
self.assertEqual(pages[0].number, '5')
self.assertEqual(pages[1].number, '6')
pages = Page.get_pages_from_xml_file(self.test_manuscript, status_contains=STATUS_MERGED_OK)
self.assertEqual(len(pages), 2)
self.assertEqual(pages[0].number, '5')
pages = Page.get_pages_from_xml_file(self.test_manuscript, status_contains=STATUS_MERGED_OK, status_not_contain=STATUS_POSTMERGED_OK)
self.assertEqual(len(pages), 1)
def test_get_semantic_dictionary(self):
dictionary = Page.get_semantic_dictionary()
#print(dictionary)
def test_lock(self):
- page = Page(xml_source_file=self.test_tcm_xml)
+ page = Page(self.test_tcm_xml)
self.assertEqual(page.is_locked(), False)
page.lock('asdf.txt')
self.assertEqual(page.is_locked(), True)
self.assertEqual(page.page_tree.xpath('//lock/reference-file/text()')[0], 'asdf.txt')
page.unlock()
self.assertEqual(page.is_locked(), False)
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_mark_foreign_hands.py
===================================================================
--- tests_svgscripts/test_mark_foreign_hands.py (revision 72)
+++ tests_svgscripts/test_mark_foreign_hands.py (revision 73)
@@ -1,81 +1,81 @@
import unittest
from os import sep, path
from os.path import dirname, isdir
import lxml.etree as ET
import sys
sys.path.append('svgscripts')
from datatypes.matrix import Matrix
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page
from datatypes.word import Word
class TestMarkForeignHands(unittest.TestCase):
def setUp(self):
DATADIR = dirname(__file__) + sep + 'test_data'
self.xml_file = DATADIR + sep + 'N_VII_1_page008.xml'
self.test_content_svg = DATADIR + sep + 'N_VII_1_xp5_4_page5.svg'
self.test_content_xml = DATADIR + sep + 'N_VII_1_page005.xml'
self.test_contentB_svg = DATADIR + sep + 'N_VII_1_xp5_4_page6.svg'
self.test_contentB_xml = DATADIR + sep + 'N_VII_1_page006.xml'
mylist = {'text': '*', 'id': '0', 'line-number': '2' }
self.node = ET.Element(MarkForeignHands.XML_TAG, attrib=mylist)
word_position = TranskriptionPosition(x=0, y=1, height=10, width=10, matrix=Matrix('matrix(0.94 0.342 -0.342 0.94 0 0)'))
self.transkription_positions = [ word_position ]
word_position.attach_object_to_tree(self.node)
def test_create_cls(self):
mark_foreign_hands = MarkForeignHands.create_cls(self.node)
self.assertEqual(mark_foreign_hands.id, 0)
self.assertEqual(mark_foreign_hands.transkription_positions[0].bottom, 11)
self.assertEqual(mark_foreign_hands.transkription_positions[0].height, 10)
self.assertEqual(mark_foreign_hands.transkription_positions[0].top, 1)
self.assertEqual(mark_foreign_hands.transkription_positions[0].left, 0)
self.assertEqual(mark_foreign_hands.transkription_positions[0].width, 10)
self.assertEqual(mark_foreign_hands.text, '*')
self.assertEqual(mark_foreign_hands.line_number, 2)
self.assertEqual(mark_foreign_hands.transkription_positions[0].transform.isRotationMatrix(), True)
def test_attach_word_to_tree(self):
mark_foreign_hands = MarkForeignHands.create_cls(self.node)
mark_foreign_hands.foreign_hands_text = 'test'
mark_foreign_hands.pen= 'Rotstift'
empty_tree = ET.ElementTree(ET.Element('page'))
mark_foreign_hands.attach_word_to_tree(empty_tree)
#print(ET.dump(empty_tree.getroot()))
for node in empty_tree.xpath('//' + MarkForeignHands.XML_TAG):
mark = MarkForeignHands.create_cls(node)
self.assertEqual(mark.pen, 'Rotstift')
self.assertEqual(mark.foreign_hands_text, 'test')
self.assertEqual(mark.id, 0)
self.assertEqual(mark.transkription_positions[0].bottom, 11)
self.assertEqual(mark.transkription_positions[0].height, 10)
self.assertEqual(mark.transkription_positions[0].top, 1)
self.assertEqual(mark.transkription_positions[0].left, 0)
self.assertEqual(mark.transkription_positions[0].width, 10)
self.assertEqual(mark.text, '*')
self.assertEqual(mark.line_number, 2)
self.assertEqual(mark.transkription_positions[0].transform.isRotationMatrix(), True)
#print(empty_tree.xpath('//mark-foreign-hands/content/text()'))
#print(empty_tree.xpath('//mark-foreign-hands/content/@pen'))
def test_get_semanticAndDataDict(self):
dictionary = MarkForeignHands.get_semantic_dictionary()
#print(dictionary)
def test_find_content(self):
- page = Page(xml_source_file=self.test_contentB_xml)
+ page = Page(self.test_contentB_xml)
transkription_field = TranskriptionField(page.source)
svg_tree = ET.parse(page.source)
page.update_line_number_area(transkription_field, svg_tree=svg_tree)
mark_foreign_hands_word = [ word for word in page.words if word.text == MarkForeignHands.CLASS_MARK ][0]
mark_foreign_hands = MarkForeignHands.create_cls_from_word(mark_foreign_hands_word)
MarkForeignHands.find_content([ mark_foreign_hands ] , transkription_field, svg_tree, style_dict=page.style_dict)
self.assertEqual(mark_foreign_hands.foreign_hands_text, 'W III, 104. (MXXIX, 3)')
self.assertEqual(mark_foreign_hands.pen, 'Bleistift')
if __name__ == "__main__":
unittest.main()
Index: tests_svgscripts/test_data/N_VII_1_page006.xml
===================================================================
--- tests_svgscripts/test_data/N_VII_1_page006.xml (revision 72)
+++ tests_svgscripts/test_data/N_VII_1_page006.xml (revision 73)
@@ -1,1276 +1,1276 @@
-
+svgWordPosition2019-08-02 15:17:372019-08-02 15:17:372019-08-02 15:30:592019-08-02 15:30:59
- 2019-11-14 09:38:45
+ 2019-11-15 12:11:36
Index: tests_svgscripts/test_page_creator.py
===================================================================
--- tests_svgscripts/test_page_creator.py (revision 0)
+++ tests_svgscripts/test_page_creator.py (revision 73)
@@ -0,0 +1,69 @@
+import unittest
+from os import sep, path
+from os.path import isdir, isfile, dirname, basename
+import lxml.etree as ET
+import sys
+import tempfile
+
+sys.path.append('svgscripts')
+dir_changed = False
+if not isdir('datatypes'):
+ sys.path.append(dirname(sys.path[0]))
+ dir_changed = True
+
+from datatypes.lineNumber import LineNumber
+from datatypes.mark_foreign_hands import MarkForeignHands
+from datatypes.page_creator import PageCreator
+from datatypes.page import STATUS_MERGED_OK, STATUS_POSTMERGED_OK
+from datatypes.path import Path
+from datatypes.text_connection_mark import TextConnectionMark
+from datatypes.transkriptionField import TranskriptionField
+from datatypes.writing_process import WritingProcess
+from datatypes.word import Word
+
+
+class TestPage(unittest.TestCase):
+ def setUp(self):
+ DATADIR = dirname(__file__) + sep + 'test_data'
+ if not isdir(DATADIR):
+ DATADIR = dirname(dirname(__file__)) + sep + 'test_data'
+ self.test_file = DATADIR + sep + 'test.xml'
+ self.test_svg_file = DATADIR + sep + 'test421.svg'
+ self.xml_file = DATADIR + sep + 'N_VII_1_page005.xml'
+ self.xml_fileB = DATADIR + sep + 'N_VII_1_page006.xml'
+ self.pdf_xml_source = DATADIR + sep + 'W_I_8_neu_125-01.svg'
+ self.test_tcm_xml = DATADIR + sep + 'N_VII_1_page001.xml'
+ self.test_manuscript = DATADIR + sep + 'N_VII_1.xml'
+ self.svg_file125 = DATADIR + sep + 'W_I_8_page125_web.svg'
+ self.xml_file125 = DATADIR + sep + 'W_I_8_page125.xml'
+ self.tmp_dir = tempfile.mkdtemp()
+
+ def test_init(self):
+ xml_target_file = self.tmp_dir + sep + 'asdf.xml'
+ page = PageCreator(xml_target_file, svg_file=self.svg_file125)
+ self.assertEqual(page.svg_image.file_name, self.svg_file125)
+ page = PageCreator(self.xml_file125, svg_file=self.svg_file125)
+ self.assertEqual(page.svg_image.file_name, self.svg_file125)
+ self.assertEqual(page.title, 'W I 8')
+ self.assertEqual(page.number, '125')
+
+ def test_init_line_numbers(self):
+ page = PageCreator(self.test_file)
+ line_numbers = [ LineNumber(id=2, top=20, bottom=40), LineNumber(id=4, top=50, bottom=60), LineNumber(id=6, top=70, bottom=90) ]
+ page.init_line_numbers(line_numbers, 122.345)
+ self.assertEqual(len(page.line_numbers), 7)
+ self.assertEqual(page.line_numbers[0].id, 1)
+ self.assertEqual(page.line_numbers[6].id, 7)
+ self.assertEqual(page.line_numbers[6].top, 91)
+ self.assertEqual(page.line_numbers[6].bottom, 122.345)
+ self.assertEqual(page.get_line_number(122), 7)
+ self.assertEqual(page.get_line_number(92), 7)
+ self.assertEqual(page.get_line_number(22), 2)
+
+ def test_create_writing_process(self):
+ page = PageCreator(self.test_file)
+ page.create_writing_processes_and_attach2tree()
+ self.assertEqual(len(page.writing_processes), 3)
+
+if __name__ == "__main__":
+ unittest.main()
Index: tests_py2ttl/test_data_handler.py
===================================================================
--- tests_py2ttl/test_data_handler.py (revision 72)
+++ tests_py2ttl/test_data_handler.py (revision 73)
@@ -1,51 +1,51 @@
import unittest
from os import sep, path
from os.path import dirname, isfile
import inspect
from rdflib import Graph, URIRef, Literal, BNode, OWL, RDF, RDFS, XSD
import rdflib
import sys
sys.path.append('shared_util')
from myxmlwriter import xml2dict
sys.path.append('svgscripts')
from datatypes.image import Image
from datatypes.page import Page
from datatypes.text_field import TextField
sys.path.append('py2ttl')
from data_handler import RDFDataHandler
class TestRDFDataHandler(unittest.TestCase):
def setUp(self):
RDFDataHandler.UNITTESTING = True
DATADIR = dirname(__file__) + sep + 'test_data'
self.mapping_dictionary = xml2dict(DATADIR + sep + 'mapping_dict.xml')
self.xml_file = DATADIR + sep + 'N_VII_1_page001.xml'
def test_add_data(self):
data_handler = RDFDataHandler('test.ttl', self.mapping_dictionary)
- page = Page(xml_source_file=self.xml_file)
+ page = Page(self.xml_file)
data_handler.add_data(page, page.title.replace(' ', '_'))
#print(data_handler.data_graph.serialize(format="turtle"))
#data_handler.write()
def test_init(self):
with self.assertRaises(Exception):
RDFDataHandler(None, {})
mapping_dictionary = { 'ontology': { 'project_name': 'test', 'project_uri': 'test' }}
data_handler = RDFDataHandler('test.ttl', mapping_dictionary)
self.assertEqual(data_handler.project_name, 'test')
def test_create_identifier_uri(self):
tf = TextField()
mapping_dictionary = { 'ontology': { 'project_name': 'test', 'project_uri': 'test' }}
data_handler = RDFDataHandler('test.ttl', mapping_dictionary)
identifier_uri = data_handler.create_identifier_uri(tf, 'asdf')
data_handler.data_graph.add((identifier_uri, RDF.type, OWL.Class))
next_identifier_uri = data_handler.create_identifier_uri(tf, 'asdf')
self.assertEqual(identifier_uri != next_identifier_uri, True)
if __name__ == "__main__":
unittest.main()
Index: tests_shared_util/test_myxmlwriter.py
===================================================================
--- tests_shared_util/test_myxmlwriter.py (revision 72)
+++ tests_shared_util/test_myxmlwriter.py (revision 73)
@@ -1,103 +1,103 @@
import unittest
import os
from os.path import isfile, isdir, dirname, sep, realpath
from datetime import datetime
import shutil
import tempfile
import xml.etree.ElementTree as ET
import lxml.etree as LET
from rdflib import Graph, URIRef, Literal, BNode, OWL, RDF, RDFS, XSD
from xmldiff import main
import sys
sys.path.append('svgscripts')
from datatypes.page import Page
sys.path.append('shared_util')
try:
from myxmlwriter import attach_dict_to_xml_node, dict2xml, lock_xml_tree, update_metadata, write_pretty, test_lock, xml_has_type,\
FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_DICT, get_dictionary_from_node, xml2dict, parse_xml_of_type
except ImportError:
sys.path.append(dirname(dirname(realpath(__file__))))
from shared_util.myxmlwriter import attach_dict_to_xml_node, dict2xml, lock_xml_tree, update_metadata, write_pretty, test_lock, xml_has_type,\
FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_DICT, get_dictionary_from_node, xml2dict, parse_xml_of_type
class TestPrettyWriter(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.title = 'ASDF'
DATADIR = dirname(__file__) + sep + 'test_data'
self.page = DATADIR + sep + 'N_VII_1_page001.xml'
self.mydict = { 'asdf': { 'b': { 'a': 1, 'b': 'c' , 'c': URIRef('adf')}},\
'str': 'test' }
def test_attach_dict_to_xml_node(self):
xml_tree = LET.Element('root')
attach_dict_to_xml_node(self.mydict, LET.SubElement(xml_tree, 'dict'))
#print(LET.dump(xml_tree))
self.assertEqual(xml_tree.xpath('//asdf/b/a/@type')[0], 'int')
self.assertEqual(xml_tree.xpath('//asdf/b/b/@type')[0], 'str')
self.assertEqual(xml_tree.xpath('//asdf/b/c/@type')[0], URIRef.__name__)
def test_dict2xml(self):
test_file = self.test_dir + sep + 'new_test.xml'
dict2xml(self.mydict, test_file)
self.assertEqual(isfile(test_file), True)
def test_get_dictionary_from_node(self):
test_file = self.test_dir + sep + 'source.xml'
dict2xml(self.mydict, test_file)
xml_tree = LET.parse(test_file)
self.assertEqual(len(xml_tree.xpath('/root/dict')[0].getchildren()), len(self.mydict.keys()))
for index, key in enumerate(self.mydict.keys()):
mydict = get_dictionary_from_node(xml_tree.xpath('/root/dict')[0].getchildren()[index])
self.assertEqual(key in mydict.keys(), True)
if type(self.mydict[key]) == dict:
self.assertEqual(mydict[key].keys(), self.mydict[key].keys())
def test_update_metadata(self):
test_tree = LET.ElementTree(LET.Element('page', attrib={"title": self.title}))
update_metadata(test_tree, __file__)
self.assertEqual(test_tree.find('./metadata').find('./createdBy').find('./script').text, __file__)
update_metadata(test_tree, __file__)
self.assertEqual(len(test_tree.find('./metadata').findall('./modifiedBy[@script="{}"]'.format(__file__))), 1)
update_metadata(test_tree, __file__)
self.assertEqual(len(test_tree.find('./metadata').findall('./modifiedBy[@script="{}"]'.format(__file__))), 1)
def test_write_pretty(self):
et_file = self.test_dir + os.sep + 'et_file.xml'
pretty_file = self.test_dir + os.sep + 'pretty_file.xml'
manuscript_tree = ET.ElementTree(ET.Element('page', attrib={"title": self.title}))
metadata = ET.SubElement(manuscript_tree.getroot(), 'metadata')
ET.SubElement(metadata, 'type').text = 'xmlManuscriptFile'
createdBy = ET.SubElement(metadata, 'createdBy')
manuscript_tree.write(et_file, xml_declaration=True, encoding='utf-8')
write_pretty(xml_string=ET.tostring(manuscript_tree.getroot()), file_name=pretty_file)
self.assertEqual(main.diff_files(et_file, pretty_file), [])
write_pretty(xml_element_tree=manuscript_tree, file_name=pretty_file)
self.assertEqual(main.diff_files(et_file, pretty_file), [])
def test_lock(self):
- page = Page(xml_source_file=self.page)
+ page = Page(self.page)
locker_dict = { 'reference_file': 'asdf.txt', 'message': 'locked on this file'}
lock_xml_tree(page.page_tree, **locker_dict)
self.assertEqual(page.is_locked(), True)
#test_lock(page.page_tree)
def test_xml2dict(self):
test_file = self.test_dir + sep + 'source.xml'
dict2xml(self.mydict, test_file)
mydict = xml2dict(test_file)
self.assertEqual(mydict, self.mydict)
def test_xml_has_type(self):
self.assertEqual(xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=self.page), True)
self.assertEqual(xml_has_type(FILE_TYPE_XML_DICT, xml_source_file=self.page), False)
with self.assertRaises(Exception):
parse_xml_of_type(self.page, FILE_TYPE_XML_DICT)
def tearDown(self):
isdir(self.test_dir) and shutil.rmtree(self.test_dir)
if __name__ == "__main__":
unittest.main()
Index: svgscripts/process_files.py
===================================================================
--- svgscripts/process_files.py (revision 72)
+++ svgscripts/process_files.py (revision 73)
@@ -1,358 +1,358 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to extract information from all text svg files in directory.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
import getopt
import re
import sys
from os import listdir, sep, path
from os.path import isfile, isdir, dirname
import lxml.etree as ET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from convertPDF2SVG4Web import Converter
-from datatypes.page import Page
+from datatypes.page_creator import PageCreator
from datatypes.transkriptionField import TranskriptionField
from extractWordPosition import Extractor
sys.path.append('shared_util')
from myxmlwriter import write_pretty, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
class MyErrorHandler:
"""This class can be used to handle errors executing extractWordPosition.Extractor.extractAndWriteInformation.
"""
ERROR_LOG = 'error_log.xml'
def __init__(self):
self.tree = ET.ElementTree(ET.Element('error-log'))
if isfile(MyErrorHandler.ERROR_LOG):
parser = ET.XMLParser(remove_blank_text=True)
self.tree = ET.parse(MyErrorHandler.ERROR_LOG, parser)
def record_error(self, svgfile, pdffile, title, page_number, error=None):
"""Records an error.
"""
if len(self.tree.xpath('//error[@title="{0}" and @number="{1}"]'.format(title, page_number))) > 0:
error_node = self.tree.xpath('//error[@title="{0}" and @number="{1}"]'.format(title, page_number))[0]
else:
error_node = ET.SubElement(self.tree.getroot(), 'error', attrib={'title': title, 'number': page_number})
ET.SubElement(error_node, 'svgfile').text = svgfile
ET.SubElement(error_node, 'pdffile').text = pdffile
if error is not None:
error_node.set('type', str(type(error).__name__))
if str(error) != '':
error_msg = ET.SubElement(error_node, 'error-msg')
error_msg.text = str(error)
if str(type(error).__name__) == 'ExpatError':
error_msg.text += '->svgfile is empty!'
def run(self, title=None, page_number=None, error_type=None):
"""Run all or some errors
[:return:] exit status (int)
"""
xpath = '//error'
if title is not None and page_number is not None:
xpath = '//error[@title="{0}" and @number="{1}"]'.format(title, page_number)
elif title is not None:
xpath = '//error[@title="{0}"]'.format(title)
elif page_number is not None:
xpath = '//error[@number="{0}"]'.format(page_number)
if error_type is not None:
xpath = xpath + '[@type="{0}"]'.format(error_type)\
if title is None and page_number is None\
else xpath.replace(']', ' ') + 'and @type="{0}"]'.format(error_type)
exit_status = 0
for error in self.tree.xpath(xpath):
title = error.get('title')
page_number = error.get('number')
svgfile = error.xpath('./svgfile/text()')[0]\
if len(error.xpath('./svgfile/text()')) > 0 else None
pdffile = error.xpath('./pdffile/text()')[0]\
if len(error.xpath('./pdffile/text()')) > 0 else None
if svgfile is not None:
converter = Converter(title=title)
- extractor = Extractor(title=title, extract_transkription_field_only=True, compare2pdf=True)
+ extractor = Extractor(title=title, compare2pdf=True)
status = process_file(converter, extractor, svgfile, pdffile, page_number)
if status > 0:
exit_status = status
if status < 2:
error.getparent().remove(error)
self.write()
return exit_status
def write(self):
"""Writes error log.
"""
write_pretty(xml_element_tree=self.tree, file_name=MyErrorHandler.ERROR_LOG, script_name=__file__, file_type='xmlErrorLog')
def is_page_ok(manuscript_file=None, page_number=None):
"""Returns true if page status is 'OK'.
"""
if manuscript_file is not None and isfile(manuscript_file):
manuscript_tree = ET.parse(manuscript_file)
if page_number is not None\
and len(manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)) > 0:
return manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)[0].get('status') == 'OK'\
and isfile(manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)[0].get('output'))
return False
def is_svg_ok(manuscript_file=None, page_number=None):
"""Returns true if svgfile contains a valid svg graphic location.
"""
if manuscript_file is not None and isfile(manuscript_file):
manuscript_tree = ET.parse(manuscript_file)
if page_number is not None\
and len(manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)) > 0\
and isfile(manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)[0].get('output')):
xml_source_tree = ET.parse(manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)[0].get('output'))
return len(xml_source_tree.xpath('//svg/@file')) > 0 and isfile(xml_source_tree.xpath('//svg/@file')[0])
return False
def process_file(converter, extractor, svgfile, pdffile, page_number):
"""Processes file.
[:return:] exit status (int)
"""
exit_status = 0
path_svg_file = converter.get_file_name(pdffile, page_number=page_number)
if not UNITTESTING:
print(Fore.LIGHTBLUE_EX + 'Processing file {} ...'.format(svgfile))
print(Style.RESET_ALL)
if converter.pdf2svg(pdffile, page_number=page_number, svg_file_name=path_svg_file) == 0:
transkriptionField = TranskriptionField(path_svg_file)
transkriptionField.shrink_svg_to_transkription_field()
xml_target_file = extractor.get_file_name(svgfile, page_number)
extraction_status = extractor.extractAndWriteInformation(svgfile, xml_target_file=xml_target_file,\
page_number=page_number, pdfFile=pdffile, svg_file=path_svg_file, record_warnings=True)
if extraction_status < 2 and extractor.manuscript_file is not None:
status = 'OK'
if extraction_status == 1:
status = extractor.latest_status
exit_status = 1
#update_manuscript_file(extractor.manuscript_file, page_number, xml_target_file, status=status)
update_svgposfile_status(xml_target_file, manuscript_file=extractor.manuscript_file, status=status)
return exit_status
-def update_graphical_svg(converter, svgfile, pdffile, page_number, xml_source_file):
+def update_graphical_svg(converter, svgfile, pdffile, page_number, xml_target_file):
"""Create a new graphical svg file and update xml output file.
[:return:] exit status (int)
"""
exit_status = 0
- if isfile(xml_source_file):
+ if isfile(xml_target_file):
path_svg_file = converter.get_file_name(pdffile, page_number=page_number)
if not UNITTESTING:
print(Fore.LIGHTBLUE_EX + 'Creating file {} ...'.format(svgfile))
print(Style.RESET_ALL)
if converter.pdf2svg(pdffile, page_number=page_number, svg_file_name=path_svg_file) == 0:
transkriptionField = TranskriptionField(path_svg_file)
transkriptionField.shrink_svg_to_transkription_field()
- page = Page(xml_source_file=xml_source_file, svg_file=path_svg_file)
- write_pretty(xml_element_tree=page.page_tree, file_name=xml_source_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
+ page = PageCreator(xml_target_file, svg_file=path_svg_file)
+ write_pretty(xml_element_tree=page.page_tree, file_name=xml_target_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
else:
exit_status = 2
return exit_status
def update_manuscript_file(manuscript_file, page_number, file_name, status='changed', append=True):
"""Updates manuscript file: adds status information about page.
"""
if isfile(manuscript_file):
parser = ET.XMLParser(remove_blank_text=True)
manuscript_tree = ET.parse(manuscript_file, parser)
if len(manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)) > 0:
node = manuscript_tree.getroot().xpath('//page[@number="%s"]' % page_number)[0]
old_status = node.get('status')
if old_status is None or 'OK' not in old_status.split(':'):
node.set('status', status)
elif append:
if status not in old_status.split(':'):
new_status = old_status + ':' + status
node.set('status', new_status)
else:
node.set('status', new_status)
if not bool(node.get('output')):
node.set('output', file_name)
else:
pages_node = manuscript_tree.getroot().find('pages')\
if manuscript_tree.getroot().find('pages') is not None\
else ET.SubElement(manuscript_tree.getroot(), 'pages')
new_id = len(pages_node.findall('page')) + 1
ET.SubElement(pages_node, 'page', attrib={'id': str(new_id), 'number': str(page_number), 'status': status, 'output': file_name})
write_pretty(xml_element_tree=manuscript_tree, file_name=manuscript_file, script_name=__file__, file_type=FILE_TYPE_XML_MANUSCRIPT)
def update_svgposfile_status(file_name, manuscript_file=None, status='changed', append=True):
"""Updates svg position file's status.
"""
if isfile(file_name):
parser = ET.XMLParser(remove_blank_text=True)
file_tree = ET.parse(file_name, parser)
old_status = file_tree.getroot().get('status')
if old_status is None or 'OK' not in old_status.split(':'):
file_tree.getroot().set('status', status)
elif append:
if status not in old_status.split(':'):
new_status = old_status + ':' + status
file_tree.getroot().set('status', new_status)
else:
file_tree.getroot().set('status', new_status)
write_pretty(xml_element_tree=file_tree, file_name=file_name, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
if manuscript_file is not None and isfile(manuscript_file):
page_number = file_tree.getroot().get('number')
update_manuscript_file(manuscript_file, page_number, file_name, status=status)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to extract information from all text svg files in a directory.
svgscripts/process_files.py [OPTIONS]
svgscripts/process_files.py [OPTIONS] Directory containing pdfs corresponding to svg files (i.e. PDFDIR/NAME.pdf <-> TEXT_SVG_DIR/NAME.svg).
Directory containing svg files corresponding to pdf files (i.e. PDFDIR/NAME.pdf <-> TEXT_SVG_DIR/NAME.svg).
OPTIONS:
-h|--help: show help
-e|--run-error Rerun error cases.
-g|--check-graphic-svg Check that graphical svg file exists or generate a new svg file.
-n|--number=pageNumber Use this with OPTION -e|--run-error in order to specify an error case.
-t|--title=title: title of the manuscript to which all files belong.
-T|--error-type: error type, use this with OPTION -e|--run-error in order to specify an error case.
-s|--svg-target-dir=svg-target-dir target directory for path svg files, i.e. svg files that can be displayed on the web.
-x|--xml-target-dir=xml-target-dir target directory for xml files.
:return: exit code (int)
"""
title = None
xml_target_dir = ".{}xml".format(sep)
svg_target_dir = ".{}svg".format(sep)
error_handler = MyErrorHandler()
number = None
rerun_errors = False
error_type = None
check_graphic_svg_exists = False
try:
opts, args = getopt.getopt(argv, "hegn:s:t:T:x:", ["help", "run-error", "check-graphic-svg", "number=", "svg-target-dir=", "title=", "error-type=", "xml-target-dir="])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
elif opt in ('-e', '--run-error'):
rerun_errors = True
elif opt in ('-g', '--check-graphic-svg'):
check_graphic_svg_exists = True
elif opt in ('-t', '--title'):
title = arg
elif opt in ('-T', '--error-type'):
error_type = arg
elif opt in ('-n', '--number'):
number = arg
elif opt in ('-s', '--svg-target-dir'):
svg_target_dir = arg
elif opt in ('-x', '--xml-target-dir'):
xml_target_dir = arg
if rerun_errors:
return error_handler.run(title=title, page_number=number, error_type=error_type)
if len(args) == 1 and args[0].endswith('.xml'):
source_tree = ET.parse(args[0])
if source_tree.getroot().find('metadata/type').text == FILE_TYPE_XML_MANUSCRIPT:
svg_word_file_tree = ET.parse(source_tree.xpath('//page/@output')[0])
svg_dir = dirname(svg_word_file_tree.xpath('//page/@source')[0])
pdf_dir = dirname(svg_word_file_tree.xpath('//page/pdf/@file')[0])
else:
print('File {} is not of type {}'.format(args[0], FILE_TYPE_XML_MANUSCRIPT))
usage()
return 2
elif len(args) < 1 or\
(len(args) == 1\
and (True not in [ pdffile.endswith('pdf') for pdffile in listdir(args[0]) ]\
or True not in [ svgfile.endswith('svg') for svgfile in listdir(args[0]) ])\
):
print("Please specify both PDFDIR and TEXT_SVG_DIR!")
usage()
return 2
elif len(args) < 2:
pdf_dir, svg_dir = args[0], args[0]
elif isdir(args[0]) and isdir(args[1]):
pdf_dir, svg_dir = args[0], args[1]
if True in [ svgfile.endswith('pdf') for svgfile in listdir(args[1]) ]:
pdf_dir, svg_dir = args[1], args[0]
else:
not_existing = args[0] if not isdir(args[0]) else args[1]
print("ERROR directory {} does not exist!".format(not_existing))
return 2
list_of_svg = [ svgfile for svgfile in listdir(svg_dir) if svgfile.endswith('svg') ]
list_of_pdf = [ pdffile for pdffile in listdir(pdf_dir) if pdffile.endswith('pdf') ]
converter = Converter(target_dir=svg_target_dir, title=title)
- extractor = Extractor(xml_dir=xml_target_dir, title=title, extract_transkription_field_only=True, compare2pdf=True)
+ extractor = Extractor(xml_dir=xml_target_dir, title=title, compare2pdf=True)
exit_status = 0
for svgfile in list_of_svg:
if svgfile.replace('.svg', '.pdf') in list_of_pdf:
title = re.split(r'(^[A-Z]+p*_[A-Z]*_[0-9]*)', svgfile)[1].replace('_', ' ')
if extractor.title is None or extractor.title != title:
extractor.update_title_and_manuscript(title)
if converter.title is None or converter.title != title:
converter.title = title.replace(' ', '_')
if 'page' in svgfile:
page_number = svgfile.replace('.svg','').split('page')[1]
else:
page_number = svgfile.replace('.svg','').split('_')[len(svgfile.replace('.svg','').split('_'))-1]
pdffile = '{}{}{}'.format(pdf_dir, sep, svgfile.replace('.svg', '.pdf'))
if not check_graphic_svg_exists and not is_page_ok(manuscript_file=extractor.manuscript_file, page_number=page_number):
try:
svgfile = '{}{}{}'.format(svg_dir, sep, svgfile)
exit_status = process_file(converter, extractor, svgfile, pdffile, page_number)
except Exception as err:
error_handler.record_error(svgfile, pdffile, title, page_number, error=err)
if not UNITTESTING:
print(Fore.RED)
print('There was an error ->', err)
print(Style.RESET_ALL)
elif not is_svg_ok(manuscript_file=extractor.manuscript_file, page_number=page_number):
update_graphical_svg(converter, svgfile, pdffile, page_number, extractor.get_file_name(svgfile, page_number))
error_handler.write()
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/fix_missing_glyphs.py
===================================================================
--- svgscripts/fix_missing_glyphs.py (revision 72)
+++ svgscripts/fix_missing_glyphs.py (revision 73)
@@ -1,192 +1,192 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to fix missing glyphs.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
import getopt
import re
import sys
from os import listdir, sep, path
from os.path import isfile, isdir, dirname
import lxml.etree as ET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.page import Page, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from datatypes.positional_word_part import PositionalWordPart
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from process_files import update_svgposfile_status
sys.path.append('shared_util')
from myxmlwriter import write_pretty
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
def find_missing_glyph_for_pwp(positional_word_part_node, svg_path_tree, namespaces, xmin=0.0, ymin=0.0):
"""Finds missing glyph for node of a PositionalWordPart.
:return: list of PositionalWordPart
"""
THRESHOLD = 15.5
pwp = PositionalWordPart(node=positional_word_part_node)
word_part_obj = { "x": pwp.left, "y": pwp.top, "text": pwp.text, "matrix": pwp.transform, "class": pwp.style_class }
start_id = int(pwp.id)
threshold = -0.5
positional_word_parts = []
while threshold < THRESHOLD and len(positional_word_parts) < 1:
try:
positional_word_parts = PositionalWordPart.CREATE_POSITIONAL_WORD_PART_LIST(word_part_obj, svg_path_tree, namespaces,\
start_id=start_id, xmin=xmin, ymin=ymin, threshold=threshold, throw_error_if_not_found=True)
except Exception:
threshold += 0.1
return positional_word_parts
def update_word(page, positional_word_part_node, positional_word_parts):
"""Updates word according to new positional_word_parts.
"""
if len(positional_word_parts) > 0:
debug_msg_string = 'update word from ' + __file__
positional_word_part_id = int(positional_word_part_node.get('id'))
transkription_position_id = int(positional_word_part_node.getparent().get('id'))
word_id = int(positional_word_part_node.getparent().getparent().get('id'))
word = page.words[word_id]
transkription_position = word.transkription_positions[transkription_position_id]
transkription_position.positional_word_parts.pop(positional_word_part_id)
positional_word_parts.reverse()
for positional_word_part in positional_word_parts:
transkription_position.positional_word_parts.insert(positional_word_part_id, positional_word_part)
for index, positional_word_part in enumerate(transkription_position.positional_word_parts):
positional_word_part.id = index
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST_FROM_PWPS(\
transkription_position.positional_word_parts, debug_msg_string=debug_msg_string, transkription_position_id=transkription_position_id)
word.transkription_positions.pop(transkription_position_id)
transkription_positions.reverse()
for new_tp in transkription_positions:
word.transkription_positions.insert(transkription_position_id, new_tp)
text = ''
for index, tp in enumerate(word.transkription_positions):
tp.id = index
tp.writing_process_id = transkription_position.writing_process_id
for pwp in tp.positional_word_parts:
text += pwp.text
if word.text != text:
word.text = text
word.attach_word_to_tree(page.page_tree)
def fix_missing_glyphs(svg_word_pos_file, manuscript_file=None):
"""Finds missing glyphs for xml file of type FILE_TYPE_SVG_WORD_POSITION.
"""
if isfile(svg_word_pos_file):
if not UNITTESTING:
print(Fore.LIGHTBLUE_EX + 'Fixing missing glyphs for file {} ... '.format(svg_word_pos_file), end='')
#print(Style.RESET_ALL)
- page = Page(xml_source_file=svg_word_pos_file)
+ page = Page(svg_word_pos_file)
transkription_field = TranskriptionField(page.svg_file)
svg_path_tree = ET.parse(page.svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_path_tree.getroot().nsmap.items() }
number_of_missing_glyphs = len(page.page_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]'))
for positional_word_part_node in page.page_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]'):
pwps = find_missing_glyph_for_pwp(positional_word_part_node, svg_path_tree, namespaces, xmin=transkription_field.xmin, ymin=transkription_field.ymin)
update_word(page, positional_word_part_node, pwps)
write_pretty(xml_element_tree=page.page_tree, file_name=svg_word_pos_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
- page = Page(xml_source_file=svg_word_pos_file)
+ page = Page(svg_word_pos_file)
new_number_of_missing_glyphs = len(page.page_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]'))
if not UNITTESTING:
result_color = Fore.LIGHTBLUE_EX if new_number_of_missing_glyphs == 0 else Fore.MAGENTA
print(result_color + ' {0}/{1}'.format(number_of_missing_glyphs-new_number_of_missing_glyphs, number_of_missing_glyphs), end='')
print(Fore.LIGHTBLUE_EX + ' fixed.', end='')
print(Style.RESET_ALL)
if len(page.page_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]')) == 0:
update_svgposfile_status(svg_word_pos_file, manuscript_file=manuscript_file, status='OK')
def get_filelist_and_manuscript_file(file_a, file_b=None):
"""Returns a file list and a manuscript file (or None)
"""
file_list = []
manuscript_file = None
source_tree = ET.parse(file_a)
if source_tree.getroot().find('metadata/type').text == FILE_TYPE_SVG_WORD_POSITION\
and len([ word_part for word_part in source_tree.xpath('//' + PositionalWordPart.XML_TAG + '[not(@symbol-id)]')]) > 0: # if symbol_ids are missing ...
file_list.append(file_a)
if file_b is not None:
manuscript_file = file_b
elif source_tree.getroot().find('metadata/type').text == FILE_TYPE_XML_MANUSCRIPT:
manuscript_file = file_a
if file_b is not None:
file_list.append(file_b)
else:
file_list = source_tree.xpath('//page[contains(@status, "{}")]/@output'.format(PositionalWordPart.WARN_NO_USE_NODE_FOUND.lower()))
return file_list, manuscript_file
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to fix missing glyphs.
svgscripts/fix_missing_glyphs.py [OPTIONS] -File [-File]
a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help: show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
if len(args) < 1:
usage()
return 2
exit_status = 0
file_a = args[0]
if isfile(file_a):
file_b = None
if len(args) > 1 and isfile(args[1]):
file_b = args[1]
file_list, manuscript_file = get_filelist_and_manuscript_file(file_a, file_b=file_b)
for svg_word_pos_file in file_list:
fix_missing_glyphs(svg_word_pos_file, manuscript_file=manuscript_file)
else:
raise FileNotFoundError('File {} does not exist!'.format(file_a))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/datatypes/image.py
===================================================================
--- svgscripts/datatypes/image.py (revision 72)
+++ svgscripts/datatypes/image.py (revision 73)
@@ -1,116 +1,116 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This super class can be used to represent all image types.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
import sys
from .attachable_object import AttachableObject
from .text_field import TextField
sys.path.append('py2ttl')
from class_spec import SemanticClass
class Image(AttachableObject,SemanticClass):
"""
This super class represents all types of images.
Args:
file_name (str): name of the image file.
node (lxml.etree.Element) node, containing information
URL (str): URL of image file.
height (float): height of image
width (float): width of image
text_field (.text_field.TextField) text_field on image representation
"""
stringKeys = [ 'file_name', 'URL', 'local_path' ]
floatKeys = [ 'height', 'width' ]
XML_TAG = 'image'
def __init__(self, node=None, file_name=None, local_path=None, URL=None, height=0.0, width=0.0, text_field=None, tag=XML_TAG):
self.text_field = text_field
+ self.tag = tag
if node is not None:
self.file_name = node.get('file-name')
self.local_path = node.get('local-path')
self.URL = node.get('URL')
self.height = float(node.get('height'))
self.width = float(node.get('width'))
if len(node.findall(TextField.XML_TAG)) > 0:
self.text_field = TextField(node=node.find(TextField.XML_TAG))
else:
- self.tag = tag
self.file_name = file_name
self.local_path = local_path
self.URL = URL
self.height = height
self.width = width
def attach_object_to_tree(self, target_tree):
"""Attach object to tree.
"""
obj_node = target_tree.getroot().find('.//' + self.tag) \
if(len(target_tree.getroot().findall('.//' + self.tag)) > 0) \
else ET.SubElement(target_tree.getroot(), self.tag)
for key in self.floatKeys:
if self.__dict__[key] is not None:
obj_node.set(key.replace('_','-'), str(round(self.__dict__[key], 3)))
for key in self.stringKeys:
if self.__dict__[key] is not None:
obj_node.set(key.replace('_','-'), self.__dict__[key])
if self.text_field is not None:
self.text_field.attach_object_to_tree(obj_node)
@classmethod
def get_semantic_dictionary(cls):
""" Creates and returns a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = {}
for floatKey in Image.floatKeys:
properties.update(cls.create_semantic_property_dictionary(floatKey, float, cardinality=1))
properties.update(cls.create_semantic_property_dictionary('file_name', str, cardinality=1))
#properties.update(cls.create_semantic_property_dictionary('URL', str))
dictionary.update({'class': class_dict})
dictionary.update({'properties': properties})
return dictionary
class SVGImage(Image):
"""This class represents a svg image.
"""
XML_TAG = 'svg-image'
def __init__(self, node=None, file_name=None, URL=None, height=0.0, width=0.0, text_field=None, tag=XML_TAG):
if node is not None and node.tag != self.XML_TAG:
file_name = node.get('file')
height = float(node.get('height')) if bool(node.get('height')) else 0.0
width = float(node.get('width')) if bool(node.get('width')) else 0.0
node = None
super(SVGImage, self).__init__(node=node, file_name=file_name, URL=URL,\
height=height, width=width, text_field=text_field, tag=self.XML_TAG)
Index: svgscripts/datatypes/page_creator.py
===================================================================
--- svgscripts/datatypes/page_creator.py (revision 0)
+++ svgscripts/datatypes/page_creator.py (revision 73)
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+""" This class can be used to create a page.
+"""
+# Copyright (C) University of Basel 2019 {{{1
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see 1}}}
+
+__author__ = "Christian Steiner"
+__maintainer__ = __author__
+__copyright__ = 'University of Basel'
+__email__ = "christian.steiner@unibas.ch"
+__status__ = "Development"
+__license__ = "GPL v3"
+__version__ = "0.0.1"
+
+from lxml import etree as ET
+from os.path import isfile
+from progress.bar import Bar
+from svgpathtools import svg2paths2, svg_to_paths
+from svgpathtools.parser import parse_path
+import sys
+import warnings
+
+from .box import Box
+from .image import Image, SVGImage
+from .faksimile_image import FaksimileImage
+from .faksimile_position import FaksimilePosition
+from .lineNumber import LineNumber
+from .line import Line
+from .mark_foreign_hands import MarkForeignHands
+from .matrix import Matrix
+from .path import Path
+from .positional_word_part import PositionalWordPart
+from .super_page import SuperPage
+from .text_connection_mark import TextConnectionMark
+from .text_field import TextField
+from .transkriptionField import TranskriptionField
+from .writing_process import WritingProcess
+from .word import Word
+from .word_insertion_mark import WordInsertionMark
+
+
+FILE_TYPE_SVG_WORD_POSITION = SuperPage.FILE_TYPE_SVG_WORD_POSITION
+FILE_TYPE_XML_MANUSCRIPT = SuperPage.FILE_TYPE_XML_MANUSCRIPT
+
+class PageCreator(SuperPage):
+ """
+ This class represents a page.
+
+ Args:
+ xml_target_file (str): name of the xml file to which page info will be written.
+
+ """
+ UNITTESTING = False
+ WARNING_MISSING_USE_NODE4PWP = PositionalWordPart.WARN_NO_USE_NODE_FOUND
+ WARNING_MISSING_GLYPH_ID4WIM = WordInsertionMark.WARN_NO_GLYPH_ID
+ PAGE_RECTO = 'recto'
+ PAGE_VERSO = 'verso'
+
+ def __init__(self, xml_target_file, title=None, page_number=None, faksimile_image=None, faksimile_svgFile=None, pdfFile=None, svg_file=None, orientation='North', page_type=PAGE_VERSO, source=None):
+ super(PageCreator,self).__init__(xml_target_file, title=title, page_number=page_number, orientation=orientation, page_type=page_type)
+ self.update_property_dictionary('faksimile_image', faksimile_image)
+ self.update_property_dictionary('faksimile_svgFile', faksimile_svgFile)
+ self.update_property_dictionary('pdfFile', pdfFile)
+ self.update_property_dictionary('svg_file', svg_file)
+ self.update_property_dictionary('source', source)
+ if svg_file is not None and isfile(svg_file):
+ tf = TranskriptionField(svg_file)
+ width = round(tf.documentWidth, 3)
+ height = round(tf.documentHeight, 3)
+ self.update_property_dictionary('width', width)
+ self.update_property_dictionary('height', height)
+ self.update_property_dictionary('svg_image', SVGImage(file_name=svg_file, width=width, height=height))
+ for xpath2remove in [ 'word', 'style', 'freehand', LineNumber.XML_TAG, WordInsertionMark.XML_TAG, WritingProcess.XML_TAG, Path.WORD_DELETION_PATH_TAG]:
+ for node in self.page_tree.xpath('//' + xpath2remove):
+ node.getparent().remove(node)
+ self.init_all_properties()
+
+ def create_writing_processes_and_attach2tree(self):
+ """Creates three stages of Nietzsche's process of writing.
+ """
+ self.writing_processes = [ WritingProcess(version=WritingProcess.FIRST_VERSION),\
+ WritingProcess(version=WritingProcess.INSERTION_AND_ADDITION),\
+ WritingProcess(version=WritingProcess.LATER_INSERTION_AND_ADDITION) ]
+ for writing_process in self.writing_processes:
+ writing_process.attach_object_to_tree(self.page_tree)
+ #for word in self.words:
+ # for transkription_position in word.transkription_positions:
+ # for font_key in transkription_position.positional_word_parts[0].style_class.split(' '):
+ # if font_key in self.fontsizekey2stage_mapping.keys():
+ # transkription_position.writing_process_id = self.fontsizekey2stage_mapping.get(font_key)
+
+ def init_line_numbers(self, line_numbers, document_bottom):
+ """Init line numbers.
+ """
+ even_index = 0
+ MINABOVE = 1
+ self.line_numbers = []
+ if len(line_numbers) > 0:
+ first_line_bottom = line_numbers[even_index].top - MINABOVE
+ self.line_numbers.append(LineNumber(id=1, top=0, bottom=first_line_bottom))
+ self.line_numbers.append(line_numbers[even_index])
+ even_index += 1
+ while even_index < len(line_numbers):
+ self.line_numbers.append(LineNumber(id=line_numbers[even_index].id-1,\
+ top=line_numbers[even_index-1].bottom+MINABOVE,\
+ bottom=line_numbers[even_index].top-MINABOVE))
+ self.line_numbers.append(line_numbers[even_index])
+ even_index += 1
+ self.line_numbers.append(LineNumber(id=line_numbers[even_index-1].id+1,\
+ top=line_numbers[even_index-1].bottom+MINABOVE,\
+ bottom=document_bottom))
+ for line_number in self.line_numbers:
+ line_number.attach_object_to_tree(self.page_tree)
+
+
Index: svgscripts/datatypes/faksimile.py
===================================================================
--- svgscripts/datatypes/faksimile.py (revision 72)
+++ svgscripts/datatypes/faksimile.py (revision 73)
@@ -1,152 +1,174 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent a faksimile page.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
import re
from lxml import etree as ET
from os import path
from os.path import isdir, isfile, sep, basename
from svgpathtools.parser import parse_path
from .faksimile_image import FaksimileImage
from .matrix import Matrix
-from .super_page import SuperPage
from .text_field import TextField
from .word_position import WordPosition
-class FaksimilePage(SuperPage):
+class FaksimilePage:
"""
This class represents a faksimile page.
Args:
xml_target_file (str): name of the xml file to which page info will be written.
xml_source_file (str): name of the xml file that will be instantiated.
"""
XML_TAG = 'faksimile-page'
def __init__(self, xml_source_file=None, xml_target_file=None, title=None, page_number=None, svg_source_file=None, faksimile_image=None, text_field=None):
xml_file = xml_source_file if xml_source_file is not None else xml_target_file
- super(FaksimilePage, self).__init__(xml_file=xml_file, title=title, page_number=page_number, tag=self.XML_TAG)
+ self.title = title
+ self.page_number = page_number
+ self.xml_file = xml_file
+ if xml_file is not None and isfile(xml_file):
+ parser = ET.XMLParser(remove_blank_text=True)
+ self.page_tree = ET.parse(xml_file, parser)
+ self.title = self.page_tree.getroot().get('title')
+ self.page_number = self.page_tree.getroot().get('page-number')
+ self.width = float(self.page_tree.getroot().get('width')) if bool(self.page_tree.getroot().get('width')) else 0.0
+ self.height = float(self.page_tree.getroot().get('height')) if bool(self.page_tree.getroot().get('height')) else 0.0
+ else:
+ self.page_tree = ET.ElementTree(ET.Element(self.XML_TAG))
+ if title is not None:
+ self.page_tree.getroot().set('title', title)
+ if page_number is not None:
+ self.page_tree.getroot().set('page-number', str(page_number))
+
if xml_target_file is not None:
self.remove_tags_from_page_tree([WordPosition.FAKSIMILE])
if svg_source_file is not None:
self.page_tree.getroot().set('svg-source-file', svg_source_file)
if faksimile_image is not None:
faksimile_image.attach_object_to_tree(self.page_tree)
if text_field is not None:
text_field.attach_object_to_tree(self.page_tree)
self.svg_source_file = self.page_tree.getroot().get('svg-source-file')
self.faksimile_image = FaksimileImage(node=self.page_tree.getroot().find('.//' + FaksimileImage.XML_TAG))\
if len(self.page_tree.getroot().findall('.//' + FaksimileImage.XML_TAG)) > 0 else None
self.text_field = TextField(node=self.page_tree.getroot().find('.//' + TextField.XML_TAG))\
if len(self.page_tree.getroot().findall('.//' + TextField.XML_TAG)) > 0 else None
self.word_positions = [ WordPosition(node=node) for node in self.page_tree.getroot().findall('.//' + WordPosition.FAKSIMILE) ]\
if len(self.page_tree.getroot().findall('.//' + WordPosition.FAKSIMILE)) > 0 else []
def append_word_position(self, word_position):
"""Appends word_position to word_positions and attaches it to page_tree.
"""
self.word_positions.append(word_position)
word_position.attach_object_to_tree(self.page_tree)
@staticmethod
def GET_FAKSIMILEPAGES(svg_tree, namespaces=None, page_number=''):
"""Creates and returns text fields contained in a svg_file as a list.
"""
THRESHOLD_X = 10
if namespaces is None:
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
source_file_name = svg_tree.docinfo.URL
image = FaksimileImage.CREATE_IMAGE(svg_tree.getroot().find('.//image', svg_tree.getroot().nsmap), source_file_name)
xml_dir = '.{}xml'.format(sep)
faksimile_pages = list()
title_string = re.sub(r'[,_][0-9]+.*\.svg', '', basename(source_file_name))
title = title_string.replace('-', ' ')
rect_list = [ rect for rect in svg_tree.getroot().findall('.//rect', svg_tree.getroot().nsmap)\
if rect.get('id', svg_tree.getroot().nsmap).startswith(title_string)\
and rect.get('id', svg_tree.getroot().nsmap).endswith(str(page_number)) ]
for text_field_rect in rect_list:
tf_x = float(text_field_rect.get('x', svg_tree.getroot().nsmap)) - image.x
tf_y = float(text_field_rect.get('y', svg_tree.getroot().nsmap)) - image.y
tf_width = float(text_field_rect.get('width', svg_tree.getroot().nsmap))
tf_height = float(text_field_rect.get('height', svg_tree.getroot().nsmap))
id = text_field_rect.get('id', svg_tree.getroot().nsmap)
target_file_name = xml_dir + sep + id + '.xml' if isdir(xml_dir) else id + '.xml'
page_number = re.sub(r'.*[,_]', '', id)
text_field = TextField(id=id, width=tf_width, height=tf_height, x=tf_x, y=tf_y)
faksimile_page = FaksimilePage(xml_target_file=target_file_name, svg_source_file=source_file_name,\
title=title, page_number=page_number, faksimile_image=image, text_field=text_field)
x_min = text_field.xmin + image.x
y_min = text_field.ymin + image.y
rect_titles = svg_tree.getroot().xpath('//ns:rect[@x>"{0}" and @x<"{1}" and @y>"{2}" and @y<"{3}" and @id!="{4}"]/ns:title'.format(\
x_min, text_field.xmax + image.x - THRESHOLD_X, y_min, text_field.ymax + image.y, text_field.id), namespaces=namespaces)
rect_titles += get_paths_inside_rect(svg_tree, '//ns:path/ns:title', x_min, text_field.xmax + image.x - THRESHOLD_X,\
y_min, text_field.ymax + image.y, text_field.id, namespaces=namespaces)
for rect_title in rect_titles:
rect = rect_title.getparent()
x, y, height, width = 0.0, 0.0, 0.0, 0.0
if rect.tag.endswith('path'):
path = parse_path(rect.get('d'))
x, xmax, y, ymax = path.bbox()
width = xmax - x
height = ymax - y
else:
x = float(rect.get('x', svg_tree.getroot().nsmap))
y = float(rect.get('y', svg_tree.getroot().nsmap))
height = float(rect.get('height', svg_tree.getroot().nsmap))
width = width=float(rect.get('width', svg_tree.getroot().nsmap))
matrix = None
if bool(rect.get('transform')):
matrix = Matrix(transform_matrix_string=rect.get('transform'))
faksimile_page.append_word_position(\
WordPosition(id=rect.get('id', svg_tree.getroot().nsmap), text=rect_title.text, height=height,\
width=width, x=x-x_min, y=y-y_min, matrix=matrix, tag=WordPosition.FAKSIMILE))
faksimile_pages.append(faksimile_page)
return faksimile_pages
+ def remove_tags_from_page_tree(self, list_of_tags_to_remove):
+ """Removes the tags specified in the list from the target tree.
+ """
+ for xpath2remove in list_of_tags_to_remove:
+ for node in self.page_tree.xpath('//' + xpath2remove):
+ node.getparent().remove(node)
+
def get_paths_inside_rect(svg_tree, xpath, x_min, x_max, y_min, y_max, not_id, namespaces={}):
"""Returns a list of all paths selected by xpath that are inside x_min, x_max, y_min, y_max and do not have id == not_id.
"""
paths = []
if len(namespaces) == 0:
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
for path_node in svg_tree.xpath(xpath, namespaces=namespaces):
append_node = path_node
if not path_node.tag.endswith('path'):
path_node = path_node.getparent()
if bool(path_node.get('d')) and path_node.get('d') != 0:
path = parse_path(path_node.get('d'))
x, xmax, y, ymax = path.bbox()
width = xmax - x
height = ymax - y
if x > x_min and x < x_max\
and y > y_min and y < y_max\
and path_node.get('id') != not_id:
paths.append(append_node)
return paths
Index: svgscripts/datatypes/manuscript.py
===================================================================
--- svgscripts/datatypes/manuscript.py (revision 72)
+++ svgscripts/datatypes/manuscript.py (revision 73)
@@ -1,98 +1,98 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent an archival unity of manuscript pages, i.e. workbooks, notebooks, folders of handwritten pages.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
import sys
from .page import Page, FILE_TYPE_XML_MANUSCRIPT, FILE_TYPE_SVG_WORD_POSITION
sys.path.append('py2ttl')
from class_spec import SemanticClass
sys.path.append('shared_util')
from myxmlwriter import parse_xml_of_type, xml_has_type
class ArchivalManuscriptUnity(SemanticClass):
"""
This class represents an archival unity of manuscript pages (workbooks, notebooks and portfolios of handwritten pages).
@label archival unity of manuscript pages
Args:
title title of archival unity
manuscript_type type of manuscript: 'Arbeitsheft', 'Notizheft', 'Mappe'
"""
XML_TAG = 'manuscript'
#RDFS_SUBCLASSOF = 'http://www.knora.org/ontology/0068/nietzsche#Manuscript' DEPRECATED
def __init__(self, title='', manuscript_type=''):
self.title = title
self.manuscript_type = manuscript_type
self.pages = []
def get_name_and_id(self):
"""Return an identification for object as 2-tuple.
"""
return '', self.title.replace(' ', '_')
@classmethod
def get_semantic_dictionary(cls):
""" Creates a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = {}
properties.update(cls.create_semantic_property_dictionary('title', str, 1))
properties.update(cls.create_semantic_property_dictionary('manuscript_type', str, 1))
properties.update(cls.create_semantic_property_dictionary('pages', list))
dictionary.update({'class': class_dict})
dictionary.update({'properties': properties})
return dictionary
@classmethod
def create_cls(cls, xml_manuscript_file, page_status_list=None, page_xpath=''):
"""Create an instance of ArchivalManuscriptUnity from a xml file of type FILE_TYPE_XML_MANUSCRIPT.
:return: ArchivalManuscriptUnity
"""
manuscript_tree = parse_xml_of_type(xml_manuscript_file, FILE_TYPE_XML_MANUSCRIPT)
title = manuscript_tree.getroot().get('title') if bool(manuscript_tree.getroot().get('title')) else ''
manuscript_type = manuscript_tree.getroot().get('type') if bool(manuscript_tree.getroot().get('type')) else ''
manuscript = cls(title=title, manuscript_type=manuscript_type)
if page_xpath == '':
page_status = ''
if page_status_list is not None\
and type(page_status_list) is list\
and len(page_status_list) > 0:
page_status = '[' + ' and '.join([ 'contains(@status, "{}")'.format(status) for status in page_status_list ]) + ']'
page_xpath = '//pages/page{0}/@output'.format(page_status)
- manuscript.pages = [ Page(xml_source_file=page_source)\
+ manuscript.pages = [ Page(page_source)\
for page_source in manuscript_tree.xpath(page_xpath)\
if isfile(page_source) and xml_has_type(FILE_TYPE_SVG_WORD_POSITION, xml_source_file=page_source) ]
return manuscript
Index: svgscripts/datatypes/super_page.py
===================================================================
--- svgscripts/datatypes/super_page.py (revision 72)
+++ svgscripts/datatypes/super_page.py (revision 73)
@@ -1,67 +1,291 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-""" This super class can be used to represent all page types.
+""" This class can be used to represent a super page.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
-from os.path import isfile
+from os.path import isfile, basename, dirname
+from progress.bar import Bar
+from svgpathtools import svg2paths2, svg_to_paths
+from svgpathtools.parser import parse_path
+import sys
+import warnings
+from .image import Image, SVGImage
+from .faksimile_image import FaksimileImage
+from .mark_foreign_hands import MarkForeignHands
+from .text_connection_mark import TextConnectionMark
+from .text_field import TextField
+from .writing_process import WritingProcess
class SuperPage:
"""
- This super class represents all types of pages.
+ This super class represents a page.
Args:
- xml_file (str): name of the xml file to be instantiated.
- title (str): title
- page_number (str): page number
+ xml_source_file (str): name of the xml file to be instantiated.
+ xml_target_file (str): name of the xml file to which page info will be written.
+
"""
- def __init__(self, xml_file=None, title=None, page_number=None, tag='super-page'):
- self.tag = tag
- self.title = title
- self.page_number = page_number
+ FILE_TYPE_SVG_WORD_POSITION = 'svgWordPosition'
+ FILE_TYPE_XML_MANUSCRIPT = 'xmlManuscriptFile'
+ PAGE_RECTO = 'recto'
+ PAGE_VERSO = 'verso'
+ STATUS_MERGED_OK = 'faksimile merged'
+ STATUS_POSTMERGED_OK = 'words processed'
+ UNITTESTING = False
+
+ def __init__(self, xml_file, title=None, page_number='', orientation='North', page_type=PAGE_VERSO, should_xml_file_exist=False):
+ self.properties_dictionary = {\
+ 'faksimile_image': (FaksimileImage.XML_TAG, None, FaksimileImage),\
+ 'faksimile_svgFile': ('data-source/@file', None, str),\
+ 'height': ('page/@height', 0.0, float),\
+ 'number': ('page/@number', str(page_number), str),\
+ 'orientation': ('page/@orientation', orientation, str),\
+ 'page_type': ('page/@pageType', page_type, str),\
+ 'pdfFile': ('pdf/@file', None, str),\
+ 'source': ('page/@source', None, str),\
+ 'svg_file': ('svg/@file', None, str),\
+ 'svg_image': (SVGImage.XML_TAG, None, SVGImage),\
+ 'text_field': (FaksimileImage.XML_TAG + '/' + TextField.XML_TAG, None, TextField),\
+ 'title': ('page/@title', title, str),\
+ 'width': ('page/@width', 0.0, float)\
+ }
+ self.online_properties = []
+ self.line_numbers = []
+ self.lines = []
+ self.mark_foreign_hands = []
+ self.page_tree = None
+ self.sonderzeichen_list = []
+ self.style_dict = {}
+ self.text_connection_marks = []
+ self.word_deletion_paths = []
+ self.word_insertion_marks = []
+ self.words = []
+ self.writing_processes = []
self.xml_file = xml_file
- if xml_file is not None and isfile(xml_file):
- parser = ET.XMLParser(remove_blank_text=True)
- self.page_tree = ET.parse(xml_file, parser)
- self.title = self.page_tree.getroot().get('title')
- self.page_number = self.page_tree.getroot().get('page-number')
- self.width = float(self.page_tree.getroot().get('width')) if bool(self.page_tree.getroot().get('width')) else 0.0
- self.height = float(self.page_tree.getroot().get('height')) if bool(self.page_tree.getroot().get('height')) else 0.0
+ if not self.is_page_source_xml_file():
+ msg = f'ERROR: xml_source_file {self.xml_file} is not of type "{FILE_TYPE_SVG_WORD_POSITION}"'
+ raise Exception(msg)
+ self._init_tree(should_xml_file_exist=should_xml_file_exist)
+
+ def add_style(self, sonderzeichen_list=[], letterspacing_list=[], style_dict={}, style_node=None):
+ """Adds a list of classes that are sonderzeichen and a style dictionary to page.
+ """
+ self.sonderzeichen_list = sonderzeichen_list
+ self.letterspacing_list = letterspacing_list
+ self.style_dict = style_dict
+ if style_node is not None:
+ self.style_dict = { item.get('name'): { key: value for key, value in item.attrib.items() if key != 'name' } for item in style_node.findall('.//class') }
+ self.sonderzeichen_list = [ item.get('name') for item in style_node.findall('.//class')\
+ if bool(item.get('font-family')) and 'Sonderzeichen' in item.get('font-family') ]
+ self.letterspacing_list = [ item.get('name') for item in style_node.findall('.//class')\
+ if bool(item.get('letterspacing-list')) ]
+ elif bool(self.style_dict):
+ style_node = ET.SubElement(self.page_tree.getroot(), 'style')
+ if len(self.sonderzeichen_list) > 0:
+ style_node.set('Sonderzeichen', ' '.join(self.sonderzeichen_list))
+ if len(self.letterspacing_list) > 0:
+ style_node.set('letterspacing-list', ' '.join(self.letterspacing_list))
+ for key in self.style_dict.keys():
+ self.style_dict[key]['name'] = key
+ ET.SubElement(style_node, 'class', attrib=self.style_dict[key])
+ fontsize_dict = { key: float(value.get('font-size').replace('px','')) for key, value in self.style_dict.items() if 'font-size' in value }
+ fontsizes = sorted(fontsize_dict.values(), reverse=True)
+ # create a mapping between fontsizes and word stages
+ self.fontsizekey2stage_mapping = {}
+ for fontsize_key, value in fontsize_dict.items():
+ if value >= fontsizes[0]-1:
+ self.fontsizekey2stage_mapping.update({ fontsize_key: WritingProcess.FIRST_VERSION })
+ elif value <= fontsizes[len(fontsizes)-1]+1:
+ self.fontsizekey2stage_mapping.update({ fontsize_key: WritingProcess.LATER_INSERTION_AND_ADDITION })
+ else:
+ self.fontsizekey2stage_mapping.update({ fontsize_key: WritingProcess.INSERTION_AND_ADDITION })
+
+ def get_biggest_fontSize4styles(self, style_set={}):
+ """Returns biggest font size from style_dict for a set of style class names.
+
+ [:returns:] (float) biggest font size OR 1 if style_dict is empty
+ """
+ if bool(self.style_dict):
+ sorted_font_sizes = sorted( (float(self.style_dict[key]['font-size'].replace('px','')) for key in style_set if bool(self.style_dict[key].get('font-size'))), reverse=True)
+ return sorted_font_sizes[0] if len(sorted_font_sizes) > 0 else 1
else:
- self.page_tree = ET.ElementTree(ET.Element(self.tag))
- if title is not None:
- self.page_tree.getroot().set('title', title)
- if page_number is not None:
- self.page_tree.getroot().set('page-number', str(page_number))
-
- def remove_tags_from_page_tree(self, list_of_tags_to_remove):
- """Removes the tags specified in the list from the target tree.
+ return 1
+
+ def get_line_number(self, y):
+ """Returns line number id for element at y.
+
+ [:return:] (int) line number id or -1
+ """
+ if len(self.line_numbers) > 0:
+ result_list = [ line_number.id for line_number in self.line_numbers if y >= line_number.top and y <= line_number.bottom ]
+ return result_list[0] if len(result_list) > 0 else -1
+ else:
+ return -1
+
+ def init_all_properties(self, overwrite=False):
+ """Initialize all properties.
+ """
+ for property_key in self.properties_dictionary.keys():
+ if property_key not in self.online_properties:
+ self.init_property(property_key, overwrite=overwrite)
+
+ def init_property(self, property_key, value=None, overwrite=False):
+ """Initialize all properties.
+
+ Args:
+ property_key: key of property in self.__dict__
+ value: new value to set to property
+ overwrite: whether or not to update values from xml_file (default: read only)
+ """
+ if value is None:
+ if property_key not in self.online_properties:
+ xpath, value, cls = self.properties_dictionary.get(property_key)
+ if len(self.page_tree.xpath('//' + xpath)) > 0:
+ value = self.page_tree.xpath('//' + xpath)[0]
+ if value is not None:
+ if cls.__module__ == 'builtins':
+ self.update_tree(value, xpath)
+ self.__dict__.update({property_key: cls(value)})
+ else:
+ value = cls(node=value)\
+ if type(value) != cls\
+ else value
+ self.__dict__.update({property_key: value})
+ self.__dict__.get(property_key).attach_object_to_tree(self.page_tree)
+ else:
+ self.__dict__.update({property_key: value})
+ self.online_properties.append(property_key)
+ elif overwrite or property_key not in self.online_properties:
+ xpath, default_value, cls = self.properties_dictionary.get(property_key)
+ if cls.__module__ == 'builtins':
+ self.__dict__.update({property_key: cls(value)})
+ self.update_tree(value, xpath)
+ else:
+ self.__dict__.update({property_key: value})
+ self.__dict__.get(property_key).attach_object_to_tree(self.page_tree)
+ self.online_properties.append(property_key)
+
+ def is_locked(self):
+ """Return true if page is locked.
"""
- for xpath2remove in list_of_tags_to_remove:
- for node in self.page_tree.xpath('//' + xpath2remove):
+ return len(self.page_tree.xpath('//metadata/lock')) > 0
+
+ def is_page_source_xml_file(self, source_tree=None):
+ """Return true if xml_file is of type FILE_TYPE_SVG_WORD_POSITION.
+ """
+ if not isfile(self.xml_file):
+ return True
+ if source_tree is None:
+ source_tree = ET.parse(self.xml_file)
+ return source_tree.getroot().find('metadata/type').text == self.FILE_TYPE_SVG_WORD_POSITION
+
+ def lock(self, reference_file, message=''):
+ """Lock tree such that ids of words etc. correspond to ids
+ in reference_file, optionally add a message that will be shown.
+ """
+ if not self.is_locked():
+ metadata = self.page_tree.xpath('./metadata')[0]\
+ if len(self.page_tree.xpath('./metadata')) > 0\
+ else ET.SubElement(self.page_tree.getroot(), 'metadata')
+ lock = ET.SubElement(metadata, 'lock')
+ ET.SubElement(lock, 'reference-file').text = reference_file
+ if message != '':
+ ET.SubElement(lock, 'message').text = message
+
+ def unlock(self):
+ """Lock tree such that ids of words etc. correspond to ids
+ in reference_file, optionally add a message that will be shown.
+ """
+ if self.is_locked():
+ lock = self.page_tree.xpath('//metadata/lock')[0]
+ lock.getparent().remove(lock)
+
+ def update_and_attach_words2tree(self, update_function_on_word=None, include_special_words_of_type=[]):
+ """Update word ids and attach them to page.page_tree.
+ """
+ if not self.is_locked():
+ update_function_on_word = [ update_function_on_word ]\
+ if type(update_function_on_word) != list\
+ else update_function_on_word
+ for node in self.page_tree.xpath('.//word|.//' + MarkForeignHands.XML_TAG + '|.//' + TextConnectionMark.XML_TAG):
node.getparent().remove(node)
+ for index, word in enumerate(self.words):
+ word.id = index
+ for func in update_function_on_word:
+ if callable(func):
+ func(word)
+ word.attach_word_to_tree(self.page_tree)
+ for index, mark_foreign_hands in enumerate(self.mark_foreign_hands):
+ mark_foreign_hands.id = index
+ if MarkForeignHands in include_special_words_of_type:
+ for func in update_function_on_word:
+ if callable(update_function_on_word):
+ func(mark_foreign_hands)
+ mark_foreign_hands.attach_word_to_tree(self.page_tree)
+ for index, text_connection_mark in enumerate(self.text_connection_marks):
+ text_connection_mark.id = index
+ if TextConnectionMark in include_special_words_of_type:
+ for func in update_function_on_word:
+ if callable(update_function_on_word):
+ func(text_connection_mark)
+ text_connection_mark.attach_word_to_tree(self.page_tree)
+ else:
+ print('locked')
+
+ def update_property_dictionary(self, property_key, default_value):
+ """Update properties_dictionary.
+ """
+ content = self.properties_dictionary.get(property_key)
+ if content is not None:
+ self.properties_dictionary.update({property_key: (content[0], default_value, content[2])})
+ else:
+ msg = f'ERROR: properties_dictionary does not contain a key {property_key}!'
+ raise Exception(msg)
+
+ def update_tree(self, value, xpath):
+ """Update tree.
+ """
+ node_name = dirname(xpath)
+ node = self.page_tree.xpath('//' + node_name)[0]\
+ if len(self.page_tree.xpath('//' + node_name)) > 0\
+ else ET.SubElement(self.page_tree.getroot(), node_name)
+ node.set(basename(xpath).replace('@', ''), str(value))
+
+ def _init_tree(self, should_xml_file_exist=False):
+ """Initialize page_tree from xml_file if it exists.
+ """
+ if isfile(self.xml_file):
+ parser = ET.XMLParser(remove_blank_text=True)
+ self.page_tree = ET.parse(self.xml_file, parser)
+ elif not should_xml_file_exist:
+ self.page_tree = ET.ElementTree(ET.Element('page'))
+ self.page_tree.docinfo.URL = self.xml_file
+ else:
+ msg = f'ERROR: xml_source_file {self.xml_file} does not exist!'
+ raise FileNotFoundError(msg)
Index: svgscripts/datatypes/page.py
===================================================================
--- svgscripts/datatypes/page.py (revision 72)
+++ svgscripts/datatypes/page.py (revision 73)
@@ -1,495 +1,237 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This class can be used to represent a page.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from lxml import etree as ET
from os.path import isfile
from progress.bar import Bar
from svgpathtools import svg2paths2, svg_to_paths
from svgpathtools.parser import parse_path
import sys
import warnings
from .box import Box
from .image import Image, SVGImage
from .faksimile_image import FaksimileImage
from .faksimile_position import FaksimilePosition
from .lineNumber import LineNumber
from .line import Line
from .mark_foreign_hands import MarkForeignHands
from .matrix import Matrix
from .path import Path
from .positional_word_part import PositionalWordPart
+from .super_page import SuperPage
from .text_connection_mark import TextConnectionMark
from .text_field import TextField
from .transkriptionField import TranskriptionField
from .writing_process import WritingProcess
from .word import Word
from .word_insertion_mark import WordInsertionMark
sys.path.append('py2ttl')
from class_spec import SemanticClass
-FILE_TYPE_SVG_WORD_POSITION = 'svgWordPosition'
-FILE_TYPE_XML_MANUSCRIPT = 'xmlManuscriptFile'
-STATUS_MERGED_OK = 'faksimile merged'
-STATUS_POSTMERGED_OK = 'words processed'
+FILE_TYPE_SVG_WORD_POSITION = SuperPage.FILE_TYPE_SVG_WORD_POSITION
+FILE_TYPE_XML_MANUSCRIPT = SuperPage.FILE_TYPE_XML_MANUSCRIPT
+STATUS_MERGED_OK = SuperPage.STATUS_MERGED_OK
+STATUS_POSTMERGED_OK = SuperPage.STATUS_POSTMERGED_OK
-class Page(SemanticClass):
+class Page(SemanticClass,SuperPage):
"""
This class represents a page.
Args:
xml_source_file (str): name of the xml file to be instantiated.
xml_target_file (str): name of the xml file to which page info will be written.
"""
UNITTESTING = False
- WARNING_MISSING_USE_NODE4PWP = PositionalWordPart.WARN_NO_USE_NODE_FOUND
- WARNING_MISSING_GLYPH_ID4WIM = WordInsertionMark.WARN_NO_GLYPH_ID
- PAGE_RECTO = 'recto'
- PAGE_VERSO = 'verso'
-
- def __init__(self, xml_source_file=None, xml_target_file=None, title=None, page_number=None, faksimile_image=None, faksimile_svgFile=None, pdfFile=None, svg_file=None, orientation='North', page_type=PAGE_VERSO, extract_transkription_field_only=True):
- self.title = title
- self.mark_foreign_hands = []
- self.text_connection_marks = []
- self.line_numbers = []
- self.style_dict = {}
- self.sonderzeichen_list = []
- self.svg_file = None
- self.svg_image = None
- self.pdfFile = None
- self.faksimile_svgFile = None
- self.source = None
- self.number = page_number if page_number is not None else -1
- self.orientation = orientation
- self.page_type = page_type
- self.word_deletion_paths = []
- self.faksimile_image = faksimile_image
- self.text_field = None
- self.lines = []
- if xml_source_file is not None:
- if isfile(xml_source_file):
- parser = ET.XMLParser(remove_blank_text=True)
- self.page_tree = ET.parse(xml_source_file, parser)
- self.title = self.page_tree.getroot().get('title')
- self.number = self.page_tree.getroot().get('number')
- self.source = self.page_tree.getroot().get('source')
- self.orientation = self.page_tree.getroot().get('orientation')
- self.page_type = self.page_tree.getroot().get('pageType')
- self.add_style(style_node=self.page_tree.getroot().find('.//style'))
- self.pdfFile = self.page_tree.xpath('.//pdf/@file')[0]\
- if len(self.page_tree.xpath('.//pdf/@file')) > 0 else None
- self.faksimile_svgFile = self.page_tree.xpath('.//data-source/@file')[0]\
- if len(self.page_tree.xpath('.//data-source/@file')) > 0 else None
- self.svg_image = SVGImage(node=self.page_tree.xpath('.//' + SVGImage.XML_TAG)[0])\
- if len(self.page_tree.xpath('.//' + SVGImage.XML_TAG)) > 0 else None
- if len(self.page_tree.xpath('.//' + FaksimileImage.XML_TAG)) > 0:
- self.faksimile_image = FaksimileImage(node=self.page_tree.xpath('.//' + FaksimileImage.XML_TAG)[0])
- self.text_field = self.faksimile_image.text_field
- self.svg_file = self.page_tree.xpath('.//svg/@file')[0]\
- if len(self.page_tree.xpath('.//svg/@file')) > 0 else None
- self.width = float(self.page_tree.xpath('.//svg/@width')[0])\
- if len(self.page_tree.xpath('.//svg/@width')) > 0 else 0.0
- self.height = float(self.page_tree.xpath('.//svg/@height')[0])\
- if len(self.page_tree.xpath('.//svg/@height')) > 0 else 0.0
- if pdfFile is not None and self.pdfFile is None:
- self.pdfFile = pdfFile
- ET.SubElement(self.page_tree.getroot(), 'pdf', attrib={'file': self.pdfFile})
- if faksimile_svgFile is not None and self.faksimile_svgFile is None:
- self.update_data_source(faksimile_svgFile=faksimile_svgFile)
- if faksimile_image is not None:
- self.faksimile_image = faksimile_image
- self.faksimile_image.attach_object_to_tree(self.page_tree)
- if svg_file is not None and self.svg_file is None:
- self.svg_file = svg_file
- tf = TranskriptionField(svg_file)
- self.width = round(tf.documentWidth, 3)
- self.height = round(tf.documentHeight, 3)
- self.svg_image = SVGImage(file_name=self.svg_file, width=self.width, height=self.height)
- self.svg_image.attach_object_to_tree(self.page_tree)
- if self.svg_image is not None and self.svg_file is None:
- self.svg_file = self.svg_image.file_name
- if self.svg_image is not None and self.width == 0.0:
- self.width = self.svg_image.width
- if self.svg_image is not None and self.height == 0.0:
- self.height = self.svg_image.height
- self.init_node_objects()
- else:
- raise Exception('File "{}" does not exist!'.format(xml_source_file))
- elif xml_target_file is not None:
- self.word_insertion_marks = []
- self.words = []
- self.writing_processes = []
- self.svg_file = svg_file
- self.pdfFile = pdfFile
- self.faksimile_svgFile = faksimile_svgFile
- if isfile(xml_target_file):
- parser = ET.XMLParser(remove_blank_text=True)
- self.page_tree = ET.parse(xml_target_file, parser)
- self.source = self.page_tree.getroot().get('source')
- if bool(self.page_tree.getroot().get('orientation')):
- self.orientation = self.page_tree.getroot().get('orientation')
- elif orientation is not None:
- self.page_tree.getroot().set('orientation', orientation)
- if bool(self.page_tree.getroot().get('title')):
- self.title = self.page_tree.getroot().get('title')
- elif title is not None:
- self.page_tree.getroot().set('title', title)
- if self.svg_file is None:
- self.svg_file = self.page_tree.xpath('.//svg/@file')[0]\
- if len(self.page_tree.xpath('.//svg/@file')) > 0 else None
- self.width = float(self.page_tree.xpath('.//svg/@width')[0])\
- if len(self.page_tree.xpath('.//svg/@width')) > 0 else 0.0
- self.height = float(self.page_tree.xpath('.//svg/@height')[0])\
- if len(self.page_tree.xpath('.//svg/@height')) > 0 else 0.0
- elif len(self.page_tree.xpath('.//svg/@file')) == 0:
- tf = TranskriptionField(svg_file)
- self.width = round(tf.documentWidth, 3)
- self.height = round(tf.documentHeight, 3)
- self.svg_image = SVGImage(file_name=self.svg_file, width=self.width, height=self.height)
- self.svg_image.attach_object_to_tree(self.page_tree)
- #ET.SubElement(self.page_tree.getroot(), 'svg', attrib={'width': str(self.width), 'height': str(self.height), 'file': self.svg_file})
- else:
- self.width = float(self.page_tree.xpath('.//svg/@width')[0])\
- if len(self.page_tree.xpath('.//svg/@width')) > 0 else 0.0
- self.height = float(self.page_tree.xpath('.//svg/@height')[0])\
- if len(self.page_tree.xpath('.//svg/@height')) > 0 else 0.0
- if self.pdfFile is None:
- self.pdfFile = self.page_tree.xpath('.//pdf/@file')[0]\
- if len(self.page_tree.xpath('.//pdf/@file')) > 0 else None
- elif len(self.page_tree.xpath('.//pdf/@file')) == 0:
- ET.SubElement(self.page_tree.getroot(), 'pdf', attrib={'file': self.pdfFile})
- for xpath2remove in [ 'word', 'style', 'freehand', LineNumber.XML_TAG, WordInsertionMark.XML_TAG,\
- WritingProcess.XML_TAG, Path.WORD_DELETION_PATH_TAG ]:
- for node in self.page_tree.xpath('//' + xpath2remove):
- node.getparent().remove(node)
- else:
- self.page_tree = ET.ElementTree(ET.Element('page'))
- self.pdfFile = pdfFile
- self.svg_file = svg_file
- if title is not None:
- self.page_tree.getroot().set('title', title)
- if orientation is not None:
- self.page_tree.getroot().set('orientation', orientation)
- self.page_tree.getroot().set('transkription-field-only', str(extract_transkription_field_only).lower())
- if page_number is not None:
- self.page_tree.getroot().set('number', str(page_number))
- if self.pdfFile is not None:
- ET.SubElement(self.page_tree.getroot(), 'pdf', attrib={'file': self.pdfFile})
- if self.svg_file is not None:
- tf = TranskriptionField(self.svg_file)
- self.width = round(tf.documentWidth, 3)
- self.height = round(tf.documentHeight, 3)
- self.svg_image = SVGImage(file_name=self.svg_file, width=self.width, height=self.height)
- self.svg_image.attach_object_to_tree(self.page_tree)
- #ET.SubElement(self.page_tree.getroot(), 'svg', attrib={'width': str(self.width), 'height': str(self.height), 'file': self.svg_file})
- if self.svg_image is None and self.svg_file is not None:
- self.svg_image = SVGImage(file_name=self.svg_file, width=self.width, height=self.height)
- self.svg_image.attach_object_to_tree(self.page_tree)
-
- def add_style(self, sonderzeichen_list=[], letterspacing_list=[], style_dict={}, style_node=None):
- """Adds a list of classes that are sonderzeichen and a style dictionary to page.
- """
- self.sonderzeichen_list = sonderzeichen_list
- self.letterspacing_list = letterspacing_list
- self.style_dict = style_dict
- if style_node is not None:
- self.style_dict = { item.get('name'): { key: value for key, value in item.attrib.items() if key != 'name' } for item in style_node.findall('.//class') }
- self.sonderzeichen_list = [ item.get('name') for item in style_node.findall('.//class')\
- if bool(item.get('font-family')) and 'Sonderzeichen' in item.get('font-family') ]
- self.letterspacing_list = [ item.get('name') for item in style_node.findall('.//class')\
- if bool(item.get('letterspacing-list')) ]
- elif bool(self.style_dict):
- style_node = ET.SubElement(self.page_tree.getroot(), 'style')
- if len(self.sonderzeichen_list) > 0:
- style_node.set('Sonderzeichen', ' '.join(self.sonderzeichen_list))
- if len(self.letterspacing_list) > 0:
- style_node.set('letterspacing-list', ' '.join(self.letterspacing_list))
- for key in self.style_dict.keys():
- self.style_dict[key]['name'] = key
- ET.SubElement(style_node, 'class', attrib=self.style_dict[key])
- fontsize_dict = { key: float(value.get('font-size').replace('px','')) for key, value in self.style_dict.items() if 'font-size' in value }
- fontsizes = sorted(fontsize_dict.values(), reverse=True)
- # create a mapping between fontsizes and word stages
- self.fontsizekey2stage_mapping = {}
- for fontsize_key, value in fontsize_dict.items():
- if value >= fontsizes[0]-1:
- self.fontsizekey2stage_mapping.update({ fontsize_key: WritingProcess.FIRST_VERSION })
- elif value <= fontsizes[len(fontsizes)-1]+1:
- self.fontsizekey2stage_mapping.update({ fontsize_key: WritingProcess.LATER_INSERTION_AND_ADDITION })
- else:
- self.fontsizekey2stage_mapping.update({ fontsize_key: WritingProcess.INSERTION_AND_ADDITION })
-
- def add_source(self, source):
- """Adds a source to page and attaches it to page_tree.
- """
- self.source = source
- self.page_tree.getroot().set('source', self.source)
-
- def create_writing_processes_and_attach2tree(self):
- """Creates three stages of Nietzsche's process of writing.
- """
- self.writing_processes = [ WritingProcess(version=WritingProcess.FIRST_VERSION),\
- WritingProcess(version=WritingProcess.INSERTION_AND_ADDITION),\
- WritingProcess(version=WritingProcess.LATER_INSERTION_AND_ADDITION) ]
- for writing_process in self.writing_processes:
- writing_process.attach_object_to_tree(self.page_tree)
- #for word in self.words:
- # for transkription_position in word.transkription_positions:
- # for font_key in transkription_position.positional_word_parts[0].style_class.split(' '):
- # if font_key in self.fontsizekey2stage_mapping.keys():
- # transkription_position.writing_process_id = self.fontsizekey2stage_mapping.get(font_key)
-
- def get_biggest_fontSize4styles(self, style_set={}):
- """Returns biggest font size from style_dict for a set of style class names.
-
- [:returns:] (float) biggest font size OR 1 if style_dict is empty
- """
- if bool(self.style_dict):
- sorted_font_sizes = sorted( (float(self.style_dict[key]['font-size'].replace('px','')) for key in style_set if bool(self.style_dict[key].get('font-size'))), reverse=True)
- return sorted_font_sizes[0] if len(sorted_font_sizes) > 0 else 1
- else:
- return 1
- def get_line_number(self, y):
- """Returns line number id for element at y.
-
- [:return:] (int) line number id or -1
- """
- if len(self.line_numbers) > 0:
- result_list = [ line_number.id for line_number in self.line_numbers if y >= line_number.top and y <= line_number.bottom ]
- return result_list[0] if len(result_list) > 0 else -1
- else:
- return -1
+ def __init__(self, xml_source_file, faksimile_image=None, faksimile_svgFile=None):
+ super(Page,self).__init__(xml_source_file)
+ self.update_property_dictionary('faksimile_image', faksimile_image)
+ self.update_property_dictionary('faksimile_svgFile', faksimile_svgFile)
+ self.init_all_properties()
+ self.add_style(style_node=self.page_tree.getroot().find('.//style'))
+ self.init_node_objects()
@classmethod
def get_pages_from_xml_file(cls, xml_file, status_contains='', status_not_contain='', word_selection_function=None):
"""Returns a list of Page instantiating a xml_file of type FILE_TYPE_SVG_WORD_POSITION
or xml_files contained in xml_file of type FILE_TYPE_XML_MANUSCRIPT.
[optional: instantiation depends on the fulfilment of a status_contains
and/or on the selection of some words by a word_selection_function].
"""
source_tree = ET.parse(xml_file)
if source_tree.getroot().find('metadata/type').text == FILE_TYPE_SVG_WORD_POSITION:
- page = cls(xml_source_file=xml_file)
+ page = cls(xml_file)
if word_selection_function is None or len(word_selection_function(page.words)) > 0:
return [ page ]
else:
return []
elif source_tree.getroot().find('metadata/type').text == FILE_TYPE_XML_MANUSCRIPT:
pages = []
xpath = '//page/@output'
if status_contains != '' and status_not_contain != '':
xpath = '//page[contains(@status, "{0}") and not(contains(@status, "{1}"))]/@output'.format(status_contains, status_not_contain)
elif status_contains != '':
xpath = '//page[contains(@status, "{0}")]/@output'.format(status_contains)
elif status_not_contain != '':
xpath = '//page[not(contains(@status, "{0}"))]/@output'.format(status_not_contain)
for xml_source_file in source_tree.xpath(xpath):
if isfile(xml_source_file):
pages += cls.get_pages_from_xml_file(xml_source_file, word_selection_function=word_selection_function)
return pages
else:
return []
@classmethod
def get_semantic_dictionary(cls):
""" Creates a semantic dictionary as specified by SemanticClass.
"""
dictionary = {}
class_dict = cls.get_class_dictionary()
properties = { 'number': { 'class': str, 'cardinality': 1},\
'faksimile_image': { 'class': FaksimileImage, 'cardinality': 1},\
'orientation': { 'class': str, 'cardinality': 1},\
'svg_image': { 'class': SVGImage, 'cardinality': 1}}
properties.update(cls.create_semantic_property_dictionary('text_field', TextField,\
cardinality=1, name='pageIsOnTextField', label='page is on text field',\
comment='Relates a page to the text field on a faksimile image.'))
for key in [ 'lines', 'words', 'writing_processes', 'word_deletion_paths', 'word_insertion_marks']:
properties.update(cls.create_semantic_property_dictionary(key, list))
dictionary.update({'class': class_dict})
dictionary.update({'properties': properties})
return dictionary
-
- def init_line_numbers(self, line_numbers, document_bottom):
- """Init line numbers.
- """
- even_index = 0
- MINABOVE = 1
- self.line_numbers = []
- if len(line_numbers) > 0:
- first_line_bottom = line_numbers[even_index].top - MINABOVE
- self.line_numbers.append(LineNumber(id=1, top=0, bottom=first_line_bottom))
- self.line_numbers.append(line_numbers[even_index])
- even_index += 1
- while even_index < len(line_numbers):
- self.line_numbers.append(LineNumber(id=line_numbers[even_index].id-1,\
- top=line_numbers[even_index-1].bottom+MINABOVE,\
- bottom=line_numbers[even_index].top-MINABOVE))
- self.line_numbers.append(line_numbers[even_index])
- even_index += 1
- self.line_numbers.append(LineNumber(id=line_numbers[even_index-1].id+1,\
- top=line_numbers[even_index-1].bottom+MINABOVE,\
- bottom=document_bottom))
- for line_number in self.line_numbers:
- line_number.attach_object_to_tree(self.page_tree)
def init_node_objects(self):
"""Initialize all node objects.
"""
self.word_insertion_marks = [ WordInsertionMark(wim_node=wim_node) for wim_node in self.page_tree.getroot().xpath('//' + WordInsertionMark.XML_TAG) ]
self.words = [ Word.create_cls(word_node) for word_node in self.page_tree.getroot().xpath('./word') ]
self.mark_foreign_hands = [ MarkForeignHands.create_cls(node) for node in self.page_tree.getroot().xpath('//' + MarkForeignHands.XML_TAG) ]
self.text_connection_marks = [ TextConnectionMark.create_cls(node) for node in self.page_tree.getroot().xpath('//' + TextConnectionMark.XML_TAG) ]
self.line_numbers = [ LineNumber(xml_text_node=line_number_node) for line_number_node in self.page_tree.getroot().xpath('//' + LineNumber.XML_TAG) ]
self.lines = [ Line.create_cls_from_node(node=line_number_node) for line_number_node in self.page_tree.getroot().xpath('//' + LineNumber.XML_TAG) ]
self.writing_processes = [ WritingProcess.create_writing_process_from_xml(node, self.words) for node in self.page_tree.xpath('//' + WritingProcess.XML_TAG) ]
self.word_deletion_paths = [ Path(node=node) for node in self.page_tree.xpath('//' + Path.WORD_DELETION_PATH_TAG) ]
if self.faksimile_image is not None and self.text_field is not None:
for simple_word in self.words + self.mark_foreign_hands + self.text_connection_marks:
simple_word.init_word(self)
for wim in self.word_insertion_marks:
if wim.line_number > -1:
wim.line = [ line for line in self.lines if line.id == wim.line_number ][0]
-
- def is_locked(self):
- """Return true if page is locked.
- """
- return len(self.page_tree.xpath('//metadata/lock')) > 0
-
- def lock(self, reference_file, message=''):
- """Lock tree such that ids of words etc. correspond to ids
- in reference_file, optionally add a message that will be shown.
- """
- if not self.is_locked():
- metadata = self.page_tree.xpath('./metadata')[0]\
- if len(self.page_tree.xpath('./metadata')) > 0\
- else ET.SubElement(self.page_tree.getroot(), 'metadata')
- lock = ET.SubElement(metadata, 'lock')
- ET.SubElement(lock, 'reference-file').text = reference_file
- if message != '':
- ET.SubElement(lock, 'message').text = message
-
- def unlock(self):
- """Lock tree such that ids of words etc. correspond to ids
- in reference_file, optionally add a message that will be shown.
- """
- if self.is_locked():
- lock = self.page_tree.xpath('//metadata/lock')[0]
- lock.getparent().remove(lock)
def update_and_attach_words2tree(self, update_function_on_word=None, include_special_words_of_type=[]):
"""Update word ids and attach them to page.page_tree.
"""
if not self.is_locked():
update_function_on_word = [ update_function_on_word ]\
if type(update_function_on_word) != list\
else update_function_on_word
for node in self.page_tree.xpath('.//word|.//' + MarkForeignHands.XML_TAG + '|.//' + TextConnectionMark.XML_TAG):
node.getparent().remove(node)
for index, word in enumerate(self.words):
word.id = index
for func in update_function_on_word:
if callable(func):
func(word)
word.attach_word_to_tree(self.page_tree)
for index, mark_foreign_hands in enumerate(self.mark_foreign_hands):
mark_foreign_hands.id = index
if MarkForeignHands in include_special_words_of_type:
for func in update_function_on_word:
if callable(update_function_on_word):
func(mark_foreign_hands)
mark_foreign_hands.attach_word_to_tree(self.page_tree)
for index, text_connection_mark in enumerate(self.text_connection_marks):
text_connection_mark.id = index
if TextConnectionMark in include_special_words_of_type:
for func in update_function_on_word:
if callable(update_function_on_word):
func(text_connection_mark)
text_connection_mark.attach_word_to_tree(self.page_tree)
else:
print('locked')
def update_data_source(self, faksimile_svgFile=None, xml_correction_file=None):
"""Update the data source of page.
"""
if faksimile_svgFile is not None:
self.faksimile_svgFile = faksimile_svgFile
data_node = self.page_tree.xpath('.//data-source')[0]\
if len(self.page_tree.xpath('.//data-source')) > 0\
else ET.SubElement(self.page_tree.getroot(), 'data-source')
data_node.set('file', self.faksimile_svgFile)
if xml_correction_file is not None:
data_node.set('xml-corrected-words', xml_correction_file)
def update_line_number_area(self, transkription_field, svg_tree=None):
"""Determines the width of the area where the line numbers are written in the page.source file.
"""
THRESHOLD = 0.4
if svg_tree is None:
svg_tree = ET.parse(self.source)
if len(self.line_numbers) > 1:
line_number = self.line_numbers[9]\
if transkription_field.is_page_verso() and len(self.line_numbers) > 8\
else self.line_numbers[1]
ln_nodes = [ item for item in svg_tree.iterfind('//text', svg_tree.getroot().nsmap)\
if Matrix.IS_NEARX_TRANSKRIPTION_FIELD(item.get('transform'), transkription_field)\
and LineNumber.IS_A_LINE_NUMBER(item)\
and LineNumber(raw_text_node=item, transkription_field=transkription_field).id == line_number.id ]
if len(ln_nodes) > 0:
matrix = Matrix(transform_matrix_string=ln_nodes[0].get('transform'))
if transkription_field.is_page_verso():
transkription_field.add_line_number_area_width(matrix.getX())
elif self.svg_file is not None and isfile(self.svg_file):
svg_path_tree = ET.parse(self.svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_path_tree.getroot().nsmap.items() }
svg_x = matrix.getX()
svg_y = self.line_numbers[1].bottom + transkription_field.ymin
use_nodes = svg_path_tree.xpath('//ns:use[@x>="{0}" and @x<="{1}" and @y>="{2}" and @y<="{3}"]'\
.format(svg_x-THRESHOLD, svg_x+THRESHOLD,svg_y-THRESHOLD, svg_y+THRESHOLD), namespaces=namespaces)
if len(use_nodes) > 0:
symbol_id = use_nodes[0].get('{%s}href' % namespaces['xlink']).replace('#', '')
d_strings = use_nodes[0].xpath('//ns:symbol[@id="{0}"]/ns:path/@d'.format(symbol_id), namespaces=namespaces)
if len(d_strings) > 0 and d_strings[0] != '':
path = parse_path(d_strings[0])
xmin, xmax, ymin, ymax = path.bbox()
width = xmax - xmin
transkription_field.add_line_number_area_width(matrix.getX() + width)
def update_page_type(self, transkription_field=None):
"""Adds a source to page and attaches it to page_tree.
"""
if transkription_field is None:
if self.source is None or not isfile(self.source):
raise FileNotFoundError('Page does not have a source!')
transkription_field = TranskriptionField(self.source)
self.page_type = Page.PAGE_VERSO\
if transkription_field.is_page_verso()\
else Page.PAGE_RECTO
self.page_tree.getroot().set('pageType', self.page_type)
Index: svgscripts/process_words_post_merging.py
===================================================================
--- svgscripts/process_words_post_merging.py (revision 72)
+++ svgscripts/process_words_post_merging.py (revision 73)
@@ -1,326 +1,326 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to process words after they have been merged with faksimile data.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
from deprecated import deprecated
from functools import cmp_to_key
import getopt
import inspect
import lxml.etree as ET
import re
import shutil
import string
from svgpathtools import svg2paths2, svg_to_paths
import sys
import tempfile
from operator import attrgetter
import os
from os import listdir, sep, path, setpgrp, devnull
from os.path import exists, isfile, isdir, dirname, basename
from progress.bar import Bar
import warnings
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.box import Box
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page, STATUS_MERGED_OK, STATUS_POSTMERGED_OK
from datatypes.path import Path
from datatypes.text_connection_mark import TextConnectionMark
from datatypes.transkriptionField import TranskriptionField
from util import back_up
from process_files import update_svgposfile_status
sys.path.append('shared_util')
from myxmlwriter import write_pretty, xml_has_type, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
def categorize_paths(page, transkription_field=None):
"""Categorize all paths that are part of the transkription field.
:return: a dictionary containig a list for each category of path.
"""
if page.source is not None and isfile(page.source):
MAX_HEIGHT_LINES = 1
max_line = sorted(\
[line_number.bottom-line_number.top for line_number in page.line_numbers if line_number.id % 2 == 0],\
reverse=True)[0] + 2 if len(page.line_numbers) > 0 else 17
tr_xmin = transkription_field.xmin if transkription_field is not None else 0.0
tr_ymin = transkription_field.ymin if transkription_field is not None else 0.0
paths, attributes = svg_to_paths.svg2paths(page.source)
allpaths_on_tf = []
allpaths_outside_tf = []
attributes_outside_tf = []
if transkription_field is None:
transkription_field = TranskriptionField(page.source)
for index in range(0, len(paths)):
path = paths[index]
attribute = attributes[index]
if len(path) > 0\
and path != transkription_field.path\
and path.bbox()[0] > tr_xmin\
and path.bbox()[1] < transkription_field.xmax:
allpaths_on_tf.append(Path(id=index, path=path, style_class=attribute.get('class')))
elif len(path) > 0\
and path != transkription_field.path:
allpaths_outside_tf.append(path)
attributes_outside_tf.append(attribute)
path_dict = { 'text_area_deletion_paths': [],\
'deletion_or_underline_paths': [],\
'box_paths': [],\
'dots_paths': [],\
'word_connector_paths': [],\
'uncategorized_paths': [] }
for mypath in allpaths_on_tf:
xmin, xmax, ymin, ymax = mypath.path.bbox()
start_line_number = page.get_line_number(mypath.path.start.imag-tr_ymin)
if abs(xmax-xmin) < 1 and abs(ymax-ymin) < 1:
path_dict.get('dots_paths').append(mypath)
elif abs(ymax-ymin) > MAX_HEIGHT_LINES and abs(ymax-ymin) < max_line and mypath.path.iscontinuous() and mypath.path.isclosed():
path_dict.get('box_paths').append(mypath)
elif abs(ymax-ymin) > MAX_HEIGHT_LINES and abs(ymax-ymin) > max_line and mypath.path.iscontinuous() and not mypath.path.isclosed():
path_dict.get('word_connector_paths').append(mypath)
elif abs(ymax-ymin) < MAX_HEIGHT_LINES:
path_dict.get('deletion_or_underline_paths').append(mypath)
elif start_line_number != -1 and start_line_number != page.get_line_number(mypath.path.end.imag-tr_ymin):
path_dict.get('text_area_deletion_paths').append(mypath)
else:
path_dict.get('uncategorized_paths').append(mypath)
underline_path = mark_words_intersecting_with_paths_as_deleted(page, path_dict.get('deletion_or_underline_paths'), tr_xmin, tr_ymin)
path_dict.update({'underline_path': underline_path})
process_word_boxes(page, path_dict.get('box_paths'), transkription_field,\
paths=allpaths_outside_tf, attributes=attributes_outside_tf, max_line=max_line)
return path_dict
elif not UNITTESTING:
error_msg = 'Svg source file {} does not exist!'.format(page.source)\
if page.source is not None else 'Page does not contain a source file!'
raise FileNotFoundError(error_msg)
return {}
def do_paths_intersect_saveMode(path1, path2):
"""Returns true if paths intersect, false if not or if there was an exception.
"""
try:
return path1.intersect(path2, justonemode=True)
except AssertionError:
return False
def find_special_words(page, transkription_field=None):
"""Find special words, remove them from words, process their content.
"""
if page.source is None or not isfile(page.source):
raise FileNotFoundError('Page does not have a source!')
if transkription_field is None:
transkription_field = TranskriptionField(page.source)
special_char_list = MarkForeignHands.get_special_char_list()
special_char_list += TextConnectionMark.get_special_char_list()
single_char_words = [ word for word in page.words if len(word.text) == 1 and word.text in special_char_list ]
if not UNITTESTING:
bar = Bar('find special words', max=len(single_char_words))
for word in single_char_words:
not bool(UNITTESTING) and bar.next()
if word.text == MarkForeignHands.CLASS_MARK:
id = len(page.mark_foreign_hands)
page.mark_foreign_hands.append(MarkForeignHands.create_cls_from_word(word, id=id))
page.words.remove(word)
elif word.text in TextConnectionMark.SPECIAL_CHAR_LIST[0]\
or (word.text in TextConnectionMark.SPECIAL_CHAR_LIST\
and any(style in page.sonderzeichen_list for style\
in word.transkription_positions[0].positional_word_parts[0].style_class.split(' '))):
id = len(page.text_connection_marks)
page.text_connection_marks.append(TextConnectionMark.create_cls_from_word(word, id=id))
page.words.remove(word)
not bool(UNITTESTING) and bar.finish()
svg_tree = ET.parse(page.source)
page.update_page_type(transkription_field=transkription_field)
page.update_line_number_area(transkription_field, svg_tree=svg_tree)
italic_classes = [ key for key in page.style_dict\
if bool(page.style_dict[key].get('font-family')) and page.style_dict[key]['font-family'].endswith('Italic') ]
if len(page.mark_foreign_hands) > 0:
MarkForeignHands.find_content(page.mark_foreign_hands, transkription_field, svg_tree, italic_classes=italic_classes,\
SonderzeichenList=page.sonderzeichen_list)
if len(page.text_connection_marks) > 0:
TextConnectionMark.find_content_in_footnotes(page.text_connection_marks, transkription_field, svg_tree,\
title=page.title, page_number=page.number)
def mark_words_intersecting_with_paths_as_deleted(page, deletion_paths, tr_xmin=0.0, tr_ymin=0.0):
"""Marks all words that intersect with deletion paths as deleted
and adds these paths to word_deletion_paths.
[:return:] list of .path.Path that might be word_underline_paths
"""
if not UNITTESTING:
bar = Bar('mark words that intersect with deletion paths', max=len(page.words))
for word in page.words:
not bool(UNITTESTING) and bar.next()
word.deleted = False
for transkription_position in word.transkription_positions:
word_path = Path.create_path_from_transkription_position(transkription_position,\
tr_xmin=tr_xmin, tr_ymin=tr_ymin)
intersecting_paths = [ deletion_path for deletion_path in deletion_paths\
if do_paths_intersect_saveMode(deletion_path.path, word_path.path) ]
if len(intersecting_paths) > 0:
transkription_position.deleted = True
for deletion_path in intersecting_paths:
if deletion_path not in page.word_deletion_paths:
deletion_path.tag = Path.WORD_DELETION_PATH_TAG
deletion_path.attach_object_to_tree(page.page_tree)
page.word_deletion_paths.append(deletion_path)
word.partition_according_to_deletion()
not bool(UNITTESTING) and bar.finish()
# return those paths in deletion_paths that are not in page.word_deletion_paths
return [ word_underline_path for word_underline_path in set(deletion_paths) - set(page.word_deletion_paths) ]
def post_merging_processing_and_saving(svg_pos_file=None, new_words=None, page=None, manuscript_file=None, target_svg_pos_file=None):
"""Process words after merging with faksimile word positions.
"""
if page is None and svg_pos_file is None:
raise Exception('ERROR: post_merging_processing_and_saving needs either a Page or a svg_pos_file!')
if page is None:
- page = Page(xml_source_file=svg_pos_file)
+ page = Page(svg_pos_file)
if page.source is None or not isfile(page.source):
raise FileNotFoundError('Page instantiated from {} does not contain an existing source!'.format(svg_pos_file))
if svg_pos_file is None:
svg_pos_file = page.page_tree.docinfo.URL
if new_words is not None:
page.words = sorted(new_words, key=attrgetter('id'))
for word_node in page.page_tree.xpath('.//word'):
word_node.getparent().remove(word_node)
transkription_field = TranskriptionField(page.source)
find_special_words(page, transkription_field=transkription_field)
update_writing_process_ids(page)
#TODO: find_hyphenated_words(page)
categorize_paths(page, transkription_field=transkription_field)
page.update_and_attach_words2tree()
if not UNITTESTING:
if target_svg_pos_file is None:
target_svg_pos_file = svg_pos_file
status = STATUS_MERGED_OK + ":" + STATUS_POSTMERGED_OK
update_svgposfile_status(svg_pos_file, manuscript_file=manuscript_file, status=status)
write_pretty(xml_element_tree=page.page_tree, file_name=target_svg_pos_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
def process_word_boxes(page, box_paths, transkription_field, paths=None, attributes=None, max_line=17):
"""Process word boxes: partition words according to word boxes.
"""
MAX_HEIGHT_LINES = 1
if not UNITTESTING:
bar = Bar('process word boxes', max=len(page.words))
svg_tree = ET.parse(page.source)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
allpaths_on_margin_field = []
if paths is None or attributes is None:
paths, attributes = svg_to_paths.svg2paths(page.source)
for index in range(0, len(paths)):
path = paths[index]
xmin, xmax, ymin, ymax = path.bbox()
attribute = attributes[index]
if len(path) > 0\
and path != transkription_field.path\
and ((path.bbox()[1] < transkription_field.xmin and transkription_field.is_page_verso())\
or (path.bbox()[0] > transkription_field.xmax and not transkription_field.is_page_verso()))\
and abs(ymax-ymin) < max_line:
allpaths_on_margin_field.append(Path(id=index, path=path, style_class=attribute.get('class')))
box_line_number_dict = {}
for box_path in sorted(box_paths, key=lambda path: path.get_median_y()):
line_number = page.get_line_number(box_path.get_median_y(tr_ymin=transkription_field.ymin))
if line_number not in box_line_number_dict.keys():
box_line_number_dict.update({ line_number: [ box_path ]})
else:
box_line_number_dict.get(line_number).append(box_path)
boxes = []
for line_number in box_line_number_dict.keys():
box_paths_on_line = sorted(box_line_number_dict[line_number], key=lambda path: path.get_x())
margin_boxes_on_line = sorted([ margin_box for margin_box in allpaths_on_margin_field\
if page.get_line_number(margin_box.get_median_y(tr_ymin=transkription_field.ymin)) == line_number ],\
key=lambda path: path.get_x())
threshold = 3 if line_number % 2 == 0 else 1.5
for box_path in box_paths_on_line:
box = Box.create_box(box_path, margin_boxes_on_line, svg_tree=svg_tree,\
transkription_field=transkription_field, namespaces=namespaces, threshold=threshold)
if box is not None:
boxes.append(box)
for word in page.words:
not bool(UNITTESTING) and bar.next()
word.process_boxes(boxes, tr_xmin=transkription_field.xmin, tr_ymin=transkription_field.ymin)
not bool(UNITTESTING) and bar.finish()
def update_writing_process_ids(page):
"""Update the writing_process_ids of the words and split accordingly.
"""
for word in page.words:
word.set_writing_process_id_to_transkription_positions(page)
word.partition_according_to_writing_process_id()
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to process words after they have been merged with faksimile data.
svgscripts/process_words_post_merging.py [OPTIONS] a xml file about a manuscript, containing information about its pages.
a xml file about a page, containing information about svg word positions.
OPTIONS:
-h|--help: show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
if len(args) < 1:
usage()
return 2
exit_status = 0
file_a = args[0]
if isfile(file_a):
manuscript_file = file_a\
if xml_has_type(FILE_TYPE_XML_MANUSCRIPT, xml_source_file=file_a)\
else None
for page in Page.get_pages_from_xml_file(file_a, status_contains=STATUS_MERGED_OK, status_not_contain=STATUS_POSTMERGED_OK):
back_up(page, page.page_tree.docinfo.URL)
post_merging_processing_and_saving(page=page, manuscript_file=manuscript_file)
else:
raise FileNotFoundError('File {} does not exist!'.format(file_a))
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/extract_footnotes.py
===================================================================
--- svgscripts/extract_footnotes.py (revision 72)
+++ svgscripts/extract_footnotes.py (revision 73)
@@ -1,83 +1,83 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to extract footnotes from a svg file.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import getopt
import re
import sys
from os import listdir, sep, path
from os.path import isfile, isdir, dirname
import lxml.etree as ET
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
from datatypes.matrix import Matrix
from datatypes.page import Page
from datatypes.transkriptionField import TranskriptionField
from datatypes.footnotes import extract_footnotes_as_strings
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to extract footnotes from a svg file.
svgscripts/extract_footnotes.py [OPTIONS] a svg text file.
a xml file containing information about the position of the svg words.
OPTIONS:
-h|--help: show help
:return: exit code (int)
"""
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
return 0
if len(args) < 1:
usage()
return 2
exit_status = 0
svg_file = args[0]
if svg_file.endswith('xml'):
- page = Page(xml_source_file=svg_file)
+ page = Page(svg_file)
svg_file = page.source
footnotes = extract_footnotes_as_strings(svg_file=svg_file)
print(footnotes)
return exit_status
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/util.py
===================================================================
--- svgscripts/util.py (revision 72)
+++ svgscripts/util.py (revision 73)
@@ -1,419 +1,420 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to copy a faksimile svg file with the option of highlighting some word boxes.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
from colorama import Fore, Style
from datetime import datetime
from functools import cmp_to_key
import getopt
import inspect
import itertools
import lxml.etree as ET
import re
import shutil
import signal
import string
import subprocess
from svgpathtools import svg_to_paths
import sys
import tempfile
import os
from os import listdir, sep, path, setpgrp, devnull, makedirs
from os.path import basename, commonpath, dirname, exists, isfile, isdir, realpath, splitext
import warnings
import wget
import xml.etree.ElementTree as XET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.faksimile import FaksimilePage, get_paths_inside_rect
from datatypes.faksimile_image import FaksimileImage
from datatypes.lineNumber import LineNumber
from datatypes.mark_foreign_hands import MarkForeignHands
from datatypes.page import Page
+from datatypes.page_creator import PageCreator
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.word import update_transkription_position_ids
from local_config import FAKSIMILE_LOCATION, PDF_READER, SVG_EDITOR, USER_ROOT_LOCATION_DICT
from process_files import update_svgposfile_status
sys.path.append('shared_util')
from myxmlwriter import write_pretty, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
HIGHLIGHT_COLOR = 'red'
OPACITY = '0.5'
class ExternalViewer:
"""This class can be used to show files with external viewers.
"""
file_format_viewer_dict = { '.pdf': PDF_READER, '.svg': SVG_EDITOR }
@classmethod
def show_files(cls, single_file=None, list_of_files=[]):
"""Opens file(s) with corresponding external viewer(s).
"""
DEVNULL = None
if type(single_file) == list:
list_of_files = single_file
elif single_file is not None:
list_of_files.append(single_file)
if len(list_of_files) > 1:
DEVNULL = open(devnull, 'wb')
process_list = []
list_of_files.reverse()
while len(list_of_files) > 0:
file2open = list_of_files.pop()
viewer = cls.file_format_viewer_dict.get(splitext(file2open)[1])
if viewer is not None:
if len(list_of_files) > 0:
process_list.append(\
subprocess.Popen([viewer, file2open], stdout=DEVNULL, stderr=DEVNULL, preexec_fn=os.setsid))
else:
subprocess.run([viewer, file2open])
for process in process_list:
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
if DEVNULL is not None:
DEVNULL.close()
def back_up(page: Page, reference_file, bak_dir='./bak') -> str:
"""Back up a xml_source_file.
:return: target_file_name
"""
date_string = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
makedirs(bak_dir, exist_ok=True)
target_file_name = bak_dir + sep + basename(page.page_tree.docinfo.URL) + '_' + date_string
write_pretty(xml_element_tree=page.page_tree, file_name=target_file_name,\
script_name=__file__ + '({0},{1})'.format(inspect.currentframe().f_code.co_name, reference_file),\
file_type=FILE_TYPE_SVG_WORD_POSITION)
return target_file_name
def copy_faksimile_svg_file(target_file=None, faksimile_source_file=None, faksimile_tree=None, target_directory=None, abs_image_path=None, local_image_path=None, namespaces=None):
"""Copy a faksimile_svg_file to target_file.
"""
if faksimile_source_file is None and faksimile_tree is not None:
faksimile_source_file = faksimile_tree.docinfo.URL
elif faksimile_source_file is None:
raise Exception('copy_faksimile_svg_file needs either a faksimile_tree (lxml.etree.ElementTree) or a faksimile_source_file')
if target_file is not None and target_directory is not None:
target_file = target_directory + sep + target_file
elif target_file is None and target_directory is not None:
target_file = target_directory + sep + basename(faksimile_source_file)
elif target_file is None:
raise Exception('copy_faksimile_svg_file needs either a target_file or a target_directory')
paths, attributes, svg_attributes = svg_to_paths.svg2paths(faksimile_source_file, return_svg_attributes=True)
for key in [ key for key in svg_attributes.keys() if key.startswith('xmlns:') ]:
try:
XET.register_namespace(key.replace('xmlns:', ''), svg_attributes[key])
except ValueError: pass
XET.register_namespace('', 'http://www.w3.org/2000/svg')
if namespaces is None:
namespaces = { 'ns': svg_attributes['xmlns'], 'xlink': svg_attributes['xmlns:xlink'],\
'sodipodi': svg_attributes['xmlns:sodipodi'] }
if faksimile_tree is not None:
element = XET.fromstring(ET.tostring(faksimile_tree))\
if type(faksimile_tree) == ET._ElementTree\
else XET.fromstring(XET.tostring(faksimile_tree.getroot()))
target_tree = XET.ElementTree(element)
else:
target_tree = XET.parse(faksimile_source_file)
if (local_image_path is not None or abs_image_path is not None)\
and len(target_tree.findall('.//ns:image', namespaces=namespaces)) > 0:
image_node = target_tree.findall('.//ns:image', namespaces=namespaces)[0]
if local_image_path is not None:
image_node.set('{%s}href' % namespaces['xlink'], local_image_path)
if abs_image_path is not None:
image_node.set('{%s}absref' % namespaces['sodipodi'], abs_image_path)
target_tree.write(target_file)
def copy_faksimile_update_image_location(faksimile_source_file=None, faksimile_tree=None, target_file=None, target_directory=None, overwrite=False):
"""Copy a faksimile_svg_file to target_file and update image location.
"""
if faksimile_source_file is None and faksimile_tree is not None:
faksimile_source_file = faksimile_tree.docinfo.URL
elif faksimile_source_file is None:
raise Exception('copy_faksimile_svg_file needs either a faksimile_tree (lxml.etree.ElementTree) or a faksimile_source_file')
if target_file is not None and target_directory is not None:
target_file = target_directory + sep + target_file
elif target_file is None and target_directory is not None:
target_file = target_directory + sep + basename(faksimile_source_file)
elif target_directory is None and target_file is not None:
target_directory = dirname(target_file)
elif target_file is None:
raise Exception('copy_faksimile_svg_file needs either a target_file or a target_directory')
source_tree = ET.parse(faksimile_source_file) if faksimile_tree is None else faksimile_tree
namespaces = { k if k is not None else 'ns': v for k, v in source_tree.getroot().nsmap.items() }
image_nodes = source_tree.xpath('//ns:image', namespaces=namespaces)
local_image_path = None
abs_image_path = None
user_abs_image_path = None
if len(image_nodes) > 0:
image = FaksimileImage.CREATE_IMAGE(image_nodes[0], source_file=faksimile_source_file)
abs_image_path = image.local_path
for user_name in USER_ROOT_LOCATION_DICT.keys():
if user_name in target_directory:
user_abs_image_path = abs_image_path.replace(FAKSIMILE_LOCATION, USER_ROOT_LOCATION_DICT[user_name]).replace('//','/')
break
# if target_directory is subdir of FAKSIMILE_LOCATION
if realpath(target_directory).startswith(realpath(FAKSIMILE_LOCATION)):
common_path = commonpath([ realpath(target_directory), realpath(dirname(image.local_path)) ])
relative_directory = '/'.join(\
[ '..' for d in realpath(target_directory).replace(common_path + '/', '').split('/') ])
local_image_path = relative_directory + realpath(image.local_path).replace(common_path, '')
if not isfile(target_directory + sep + local_image_path):
local_image_path = None
elif abs_image_path is not None:
local_image_path = abs_image_path
if abs_image_path is not None and not isfile(abs_image_path):
wget.download(image.URL, out=dirname(abs_image_path))
if not isfile(target_file) or overwrite:
abs_image_path = user_abs_image_path if user_abs_image_path is not None else abs_image_path
copy_faksimile_svg_file(target_file=target_file, faksimile_source_file=faksimile_source_file,\
faksimile_tree=faksimile_tree, abs_image_path=abs_image_path,\
local_image_path=local_image_path, namespaces=namespaces)
else:
msg = 'File {0} not copied to directory {1}, it already contains a file {2}.'.format(faksimile_source_file, target_directory, target_file)
warnings.warn(msg)
def copy_xml_file_word_pos_only(xml_source_file, target_directory):
"""Copy word positions of a xml file to target directory.
:return: (str) xml_target_file
"""
xml_target_file = target_directory + sep + basename(xml_source_file)
- source_page = Page(xml_source_file=xml_source_file)
- target_page = Page(xml_target_file=xml_target_file, title=source_page.title, page_number=source_page.number, orientation=source_page.orientation)
+ source_page = Page(xml_source_file)
+ target_page = PageCreator(xml_target_file, title=source_page.title, page_number=source_page.number, orientation=source_page.orientation)
target_page.words = source_page.words
target_page.update_and_attach_words2tree()
write_pretty(xml_element_tree=target_page.page_tree, file_name=xml_target_file,\
script_name=__file__ + '({})'.format(inspect.currentframe().f_code.co_name), file_type=FILE_TYPE_SVG_WORD_POSITION)
return xml_target_file
def create_highlighted_svg_file(faksimile_tree, node_ids, target_file=None, target_directory=None, local_image_path=None, namespaces={}, highlight_color=HIGHLIGHT_COLOR, opacity=OPACITY):
"""Highlights the nodes of a faksimile_tree that are specified by the list of node_ids and writes the tree to a file.
"""
if len(namespaces) == 0:
namespaces = { k if k is not None else 'ns': v for k, v in faksimile_tree.getroot().nsmap.items() }
for node in itertools.chain(*[\
faksimile_tree.xpath('//ns:rect[@id="{0}"]|//ns:path[@id="{0}"]'.format(node_id), namespaces=namespaces)\
for node_id in node_ids\
]):
node.set('fill', highlight_color)
node.set('opacity', opacity)
node.set('style', '')
copy_faksimile_update_image_location(target_file=target_file, faksimile_tree=faksimile_tree, target_directory=target_directory)
def get_empty_node_ids(faksimile_tree, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0, text_field_id=None, faksimile_page=None, namespaces={}):
"""Returns a list of ids of rect and path nodes that do not have a title element.
"""
THRESHOLD_X = 10
if faksimile_page is not None:
x_min = faksimile_page.text_field.xmin + faksimile_page.faksimile_image.x
x_max = faksimile_page.text_field.xmax + faksimile_page.faksimile_image.x - THRESHOLD_X
y_min = faksimile_page.text_field.ymin + faksimile_page.faksimile_image.y
y_max = faksimile_page.text_field.ymax + faksimile_page.faksimile_image.y
text_field_id = faksimile_page.text_field.id
if len(namespaces) == 0:
namespaces = { k if k is not None else 'ns': v for k, v in faksimile_tree.getroot().nsmap.items() }
empyt_node_ids = []
nodes_without_title = faksimile_tree.xpath('//ns:rect[@x>"{0}" and @x<"{1}" and @y>"{2}" and @y<"{3}" and @id!="{4}" and not(./ns:title)]'.format(\
x_min, x_max, y_min, y_max, text_field_id), namespaces=namespaces)
nodes_without_title += get_paths_inside_rect(faksimile_tree, '//ns:path[not(./ns:title)]', x_min, x_max, y_min, y_max, text_field_id, namespaces=namespaces)
for node_without_title in nodes_without_title:
empyt_node_ids.append(node_without_title.get('id'))
return empyt_node_ids
def get_mismatching_ids(words, faksimile_positions):
""" Return the list of mismatching words and the list of mismatching faksimile_positions
as a 2-tuple.
"""
mismatching_words = []
mismatching_faksimile_positions = []
faksimile_positions, unique_faksimile_words = replace_chars(words, faksimile_positions)
word_texts = [ word.text for word in words ]
for word_text in set(word_texts):
if word_text not in unique_faksimile_words:
mismatching_words += [ word for word in words if word.text == word_text ]
for faksimile_position_text in unique_faksimile_words:
if faksimile_position_text not in set(word_texts):
mismatching_faksimile_positions += [ faksimile_position for faksimile_position in faksimile_positions\
if faksimile_position.text == faksimile_position_text ]
return mismatching_words, mismatching_faksimile_positions
def record_changes(original_svg_file, changed_svg_file, node_ids, namespaces={}):
"""Copy changes made to changed_svg_file to original_svg_file.
"""
old_tree = ET.parse(original_svg_file)
new_tree = ET.parse(changed_svg_file)
if len(namespaces) == 0:
namespaces = { k if k is not None else 'ns': v for k, v in new_tree.getroot().nsmap.items() }
for node_id in node_ids:
new_titles = new_tree.xpath('//ns:rect[@id="{0}"]/ns:title|//ns:path[@id="{0}"]/ns:title'.format(node_id), namespaces=namespaces)
old_nodes = old_tree.xpath('//ns:rect[@id="{0}"]|//ns:path[@id="{0}"]'.format(node_id), namespaces=namespaces)
if len(new_titles) > 0 and len(old_nodes) > 0:
if old_nodes[0].find('ns:title', namespaces=namespaces) is not None:
old_nodes[0].find('ns:title', namespaces=namespaces).text = new_titles[0].text
else:
old_title_id_string = new_titles[0].get('id')
old_title = ET.SubElement(old_nodes[0], 'title', attrib={ 'id': old_title_id_string })
old_title.text = new_titles[0].text
elif len(old_nodes) > 0:
for old_node in old_nodes:
old_node.getparent().remove(old_node)
copy_faksimile_svg_file(target_file=original_svg_file, faksimile_tree=old_tree)
def record_changes_on_svg_file_to_page(xml_source_file, svg_file, word_ids=None):
"""Copy changes made to svg_file to xml_source_file.
:return: datatypes.page.Page
"""
svg_tree = ET.parse(svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_tree.getroot().nsmap.items() }
transkription_field = TranskriptionField(svg_file)
- page = Page(xml_source_file=xml_source_file)
+ page = Page(xml_source_file)
words = [ word for word in page.words if word.id in word_ids ]\
if word_ids is not None else page.words
new_page_words = []
for word in words:
word_id = 'word_' + str(word.id) + '_'
recorded_ids = []
for transkription_position in word.transkription_positions:
transkription_position_id = word_id + str(transkription_position.id)
tp_nodes = svg_tree.xpath('//ns:g[@id="Transkription"]/ns:rect[@id="{0}"]'.format(transkription_position_id), namespaces=namespaces)
if len(tp_nodes) > 0:
record_changes_to_transkription_position(tp_nodes[0], transkription_position,\
transkription_field.xmin, transkription_field.ymin, namespaces=namespaces)
recorded_ids.append(transkription_position_id)
extra_nodes = [ node for node in\
svg_tree.xpath('//ns:g[@id="Transkription"]/ns:rect[contains(@id, "{0}")]'.format(word_id), namespaces=namespaces)\
if node.get('id') not in recorded_ids ]
if len(extra_nodes) > 0:
for extra_node in extra_nodes:
old_ids = [ inkscape_id.replace('#','') for inkscape_id in\
svg_tree.xpath('//ns:g[@id="Transkription"]/ns:rect[@id="{0}"]/@inkscape:label'.format(extra_node.get('id')),\
namespaces=namespaces) ]
if len(old_ids) > 0 and re.match(r'word_[0-9]+_[0-9]+', old_ids[0]):
old_id_list = old_ids[0].split('_')
ref_word_id = int(old_id_list[1])
ref_tp_id = old_id_list[2]
ref_words = [ word for word in page.words if word.id == ref_word_id ]
if len(ref_words) > 0:
ref_tps = [ tp for tp in ref_words[0].transkription_positions\
if tp.id == ref_tp_id ]
if len(ref_tps) > 0:
ref_words[0].transkription_positions.remove(ref_tps[0])
record_changes_to_transkription_position(extra_node,\
ref_tps[0], transkription_field.xmin, transkription_field.ymin, namespaces=namespaces)
word.transkription_positions.append(ref_tps[0])
for word in page.words:
if word.has_mixed_status('text'):
new_page_words += [ word for word in word.split_according_to_status('text') if word.text is not None and word.text != '' ]
elif len(word.transkription_positions) > 0:
new_text = [ tp.text for tp in word.transkription_positions if tp.text is not None and tp.text != '' ]
if len(new_text) > 0:
word.text = new_text[0]
new_page_words.append(word)
page.words = new_page_words
page.update_and_attach_words2tree(update_function_on_word=update_transkription_position_ids)
page.unlock()
if not UNITTESTING:
write_pretty(xml_element_tree=page.page_tree, file_name=xml_source_file,\
script_name=__file__ + ' -> ' + inspect.currentframe().f_code.co_name, file_type=FILE_TYPE_SVG_WORD_POSITION)
return page
def record_changes_on_xml_file_to_page(xml_source_file, xml_file) -> Page:
"""Copy changes made to xml_file to xml_source_file.
:return: datatypes.page.Page
"""
- copy_page = Page(xml_source_file=xml_file)
- page = Page(xml_source_file=xml_source_file)
+ copy_page = Page(xml_file)
+ page = Page(xml_source_file)
page.unlock()
back_up(page, xml_file)
page.words = []
for word in copy_page.words:
if word.split_strings is None\
or len(word.split_strings) == 0:
page.words.append(word)
else:
next_word = word
for split_string in word.split_strings:
_, new_word, next_word = next_word.split(split_string)
page.words.append(new_word)
if next_word is not None:
page.words.append(next_word)
page.update_and_attach_words2tree(update_function_on_word=update_transkription_position_ids)
if not UNITTESTING:
write_pretty(xml_element_tree=page.page_tree, file_name=xml_source_file,\
script_name=__file__ + '({0},{1})'.format(inspect.currentframe().f_code.co_name, xml_file), file_type=FILE_TYPE_SVG_WORD_POSITION)
return page
def record_changes_to_transkription_position(node, transkription_position, xmin=0.0, ymin=0.0, namespaces=None):
"""Record changes made to node to transkription_position.
"""
if namespaces is None:
namespaces = { k if k is not None else 'ns': v for k, v in node.nsmap.items() }
if bool(node.get('x')):
transkription_position.left = float(node.get('x')) - xmin
if bool(node.get('y')):
transkription_position.top = float(node.get('y')) - ymin
if bool(node.get('width')):
transkription_position.width = float(node.get('width'))
if bool(node.get('height')):
transkription_position.height = float(node.get('height'))
if len(node.xpath('./ns:title/text()', namespaces=namespaces)) > 0:
transkription_position.text = node.xpath('./ns:title/text()', namespaces=namespaces)[0]
def replace_chars(words, faksimile_positions, unique_faksimile_words=None):
"""Return unique_faksimile_words and faksimile_positions, with characters changed according to transcription words.
"""
if unique_faksimile_words is None:
unique_faksimile_words = sorted(set(faksimile_position.text for faksimile_position in faksimile_positions),\
key=lambda text: len(text))
for index, word_text in enumerate(unique_faksimile_words):
if len([ word for word in words if word.text == word_text ]) == 0:
if re.match(r'.*".*', word_text)\
and len([ word for word in words if word.text == word_text.replace('"', '“') ]) > 0:
unique_faksimile_words[index] = word_text.replace('"', '“')
elif re.match(r'.*ss.*', word_text)\
and len([ word for word in words if word.text == word_text.replace('ss', 'ß') ]) > 0:
unique_faksimile_words[index] = word_text.replace('ss', 'ß')
elif re.match(r'.*-.*', word_text)\
and len([ word for word in words if word.text == word_text.replace('-', '–') ]) > 0:
unique_faksimile_words[index] = word_text.replace('-', '–')
for faksimile_position in [ faksimile_position for faksimile_position in faksimile_positions\
if faksimile_position.text == word_text ]:
faksimile_position.text = unique_faksimile_words[index]
elif word_text == '-'\
and len([ word for word in words if word.text == '–' ]) > 0:
print([ word.text for word in words if word.text == word_text ])
print([ word.text for word in words if word.text == '–' ])
return faksimile_positions, unique_faksimile_words
Index: svgscripts/convert_wordPositions.py
===================================================================
--- svgscripts/convert_wordPositions.py (revision 72)
+++ svgscripts/convert_wordPositions.py (revision 73)
@@ -1,372 +1,379 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to convert the word positions to HTML for testing purposes.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import cairosvg
import getopt
from lxml.html import builder as E
from lxml.html import open_in_browser
import lxml
from os import sep, listdir, mkdir, path, remove
from os.path import exists, isfile, isdir, dirname
import re
import sys
from svgpathtools import svg_to_paths
import xml.etree.ElementTree as ET
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from datatypes.matrix import Matrix
from datatypes.page import Page
+from datatypes.page_creator import PageCreator
from datatypes.transkriptionField import TranskriptionField
from datatypes.writing_process import WritingProcess
from datatypes.word import Word
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
class Converter:
"""The converter super class.
"""
def __init__(self, page, non_testing=True, show_word_insertion_mark=False):
self.page = page
self.non_testing = non_testing
self.show_word_insertion_mark = show_word_insertion_mark
def _get_transkription_positions(self, transkription_positions, stage_version=''):
"""Returns the transkription_positions of the indicated stage_version.
"""
convertable_transkription_positions = transkription_positions
if stage_version != '':
convertable_transkription_positions = []
if re.match(r'^\d$', stage_version):
writing_process_id = int(stage_version)
for transkription_position in transkription_positions:
if transkription_position.writing_process_id == writing_process_id:
convertable_transkription_positions.append(transkription_position)
elif re.match(r'^\d\+$', stage_version):
version_range = [ *range(int(stage_version.replace('+','')), len(WritingProcess.VERSION_DESCRIPTION)) ]
for transkription_position in transkription_positions:
if transkription_position.writing_process_id in version_range:
convertable_transkription_positions.append(transkription_position)
elif re.match(r'^\d\-\d$', stage_version):
start_stop = [ int(i) for i in re.split(r'-', stage_version) ]
version_range = [ *range(start_stop[0], start_stop[1]+1) ]
for transkription_position in transkription_positions:
if transkription_position.writing_process_id in version_range:
convertable_transkription_positions.append(transkription_position)
return convertable_transkription_positions
def _get_words(self, words, highlighted_words=None):
"""Return the words that will be hightlighted.
"""
return highlighted_words if highlighted_words is not None else words
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Prints all words.
"""
first_word_of_line = None
out = sys.stdout
if output_file is not None:
out = open(output_file, 'w')
for word in self.page.words:
if first_word_of_line is None or first_word_of_line.line_number != word.line_number:
out.write('\n')
first_word_of_line = word
if word.line_number % 2 == 0:
out.write(str(word.line_number).zfill(2) + ' ')
else:
out.write(' ')
if stage_version == '' or len(self._get_transkription_positions(word.transkription_positions, stage_version=stage_version)) > 0:
if word.text is not None:
out.write(word.text + ' ')
out.close()
@classmethod
def CREATE_CONVERTER(cls, page, non_testing=True,converter_type='', show_word_insertion_mark=False):
"""Returns a converter of type converter_type.
[:return:] SVGConverter for 'SVG', HTMLConverter for 'HTML', Converter for None
"""
cls_dict = { subclass.__name__: subclass for subclass in cls.__subclasses__() }
cls_key = converter_type + 'Converter'
if bool(cls_dict.get(cls_key)):
return cls_dict.get(cls_key)(page, non_testing, show_word_insertion_mark)
else:
return Converter(page, non_testing, show_word_insertion_mark)
class SVGConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a svg file that combines text as path and text-as-text.
"""
BG_COLOR = 'yellow'
OPACITY = '0.2'
def __init__(self, page, non_testing=True, show_word_insertion_mark=False, bg_color=BG_COLOR, opacity=OPACITY):
Converter.__init__(self, page, non_testing, show_word_insertion_mark)
self.bg_color = bg_color
self.opacity = opacity
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to SVG
"""
title = self.page.title if(self.page.title is not None) else 'Test Page'
title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title
- transkription_field = TranskriptionField(self.page.svg_file)
+ svg_file = self.page.svg_file
+ if svg_file is None and self.page.svg_image is not None:
+ svg_file = self.page.svg_image.file_name
+ elif svg_file is None:
+ msg = f'ERROR: xml_source_file {self.page.docinfo.URL} does neither have a svg_file nor a svg_image!'
+ raise Exception(msg)
+ transkription_field = TranskriptionField(svg_file)
if bool(transkription_field.get_svg_attributes('xmlns')):
ET.register_namespace('', transkription_field.get_svg_attributes('xmlns'))
if bool(transkription_field.get_svg_attributes('xmlns:xlink')):
ET.register_namespace('xlink', transkription_field.get_svg_attributes('xmlns:xlink'))
- svg_tree = ET.parse(self.page.svg_file)
+ svg_tree = ET.parse(svg_file)
transkription_node = ET.SubElement(svg_tree.getroot(), 'g', attrib={'id': 'Transkription'})
colors = [ 'yellow', 'orange' ] if self.bg_color == self.BG_COLOR else [ self.bg_color ]
if highlighted_words is not None:
colors = ['yellow']
else:
highlighted_words = []
color_index = 0
for word in self.page.words:
word_id = 'word_' + str(word.id)
for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version):
transkription_position_id = word_id + '_' + str(transkription_position.id)
color = colors[color_index] if word not in highlighted_words else self.bg_color
rect_node = ET.SubElement(transkription_node, 'rect',\
attrib={'id': transkription_position_id, 'x': str(transkription_position.left + transkription_field.xmin),\
'y': str(transkription_position.top + transkription_field.ymin), 'width': str(transkription_position.width),\
'height': str(transkription_position.height), 'fill': color, 'opacity': self.opacity})
if transkription_position.transform is not None:
matrix = transkription_position.transform.clone_transformation_matrix()
matrix.matrix[Matrix.XINDEX] = round(transkription_position.transform.matrix[Matrix.XINDEX] + transkription_field.xmin, 3)
matrix.matrix[Matrix.YINDEX] = round(transkription_position.transform.matrix[Matrix.YINDEX] + transkription_field.ymin, 3)
rect_node.set('transform', matrix.toString())
rect_node.set('x', str(round(transkription_position.left - transkription_position.transform.matrix[Matrix.XINDEX], 3)))
rect_node.set('y', str(round((transkription_position.height-1.5)*-1, 3)))
ET.SubElement(rect_node, 'title').text = word.text
color_index = (color_index + 1) % len(colors)
if output_file is not None:
svg_tree.write(output_file)
class HTMLConverter(Converter):
"""This class can be used to convert a 'svgWordPositions' xml file to a test HTML file.
"""
CSS = """ .highlight0 { background-color: yellow; opacity: 0.2; }
.highlight1 { background-color: pink; opacity: 0.2; }
.foreign { background-color: blue; opacity: 0.4; }
.word-insertion-mark { background-color: orange; opacity: 0.2; }
.deleted { background-color: grey; opacity: 0.2; }
"""
def __init__(self, page, non_testing=True, show_word_insertion_mark=False):
Converter.__init__(self, page, non_testing, show_word_insertion_mark)
def convert(self, output_file=None, stage_version='', highlighted_words=None):
"""Converts Page to HTML
"""
title = self.page.title if(self.page.title is not None) else 'Test Page'
title = '{}, S. {}'.format(title, self.page.number) if (self.page.number is not None) else title
if stage_version != '':
title = title + ', Schreibstufe: ' + stage_version
width = self.page.width
height = self.page.height
style_content = ' position: relative; width: {}px; height: {}px; background-image: url({}); background-size: {}px {}px '\
.format(width, height, path.abspath(self.page.svg_file), width, height)
style = E.STYLE('#transkription {' + style_content + '}', HTMLConverter.CSS)
head = E.HEAD(E.TITLE(title),E.META(charset='UTF-8'), style)
transkription = E.DIV(id="transkription")
counter = 0
for word in self.page.words:
highlight_class = 'highlight' + str(counter)\
if not word.deleted else 'deleted'
earlier_text = '' if word.earlier_version is None else word.earlier_version.text
if earlier_text == '' and len(word.word_parts) > 0:
earlier_versions = [ word for word in word.word_parts if word.earlier_version is not None ]
earlier_text = earlier_versions[0].text if len(earlier_versions) > 0 else ''
if earlier_text != '':
word_title = 'id: {}/line: {}\n0: {}\n1: {}'.format(str(word.id), str(word.line_number), earlier_text, word.text)
else:
word_title = 'id: {}/line: {}\n{}'.format(str(word.id), str(word.line_number), word.text)
for transkription_position in self._get_transkription_positions(word.transkription_positions, stage_version=stage_version):
self._append2transkription(transkription, highlight_class, word_title, transkription_position)
for part_word in word.word_parts:
for part_transkription_position in self._get_transkription_positions(part_word.transkription_positions, stage_version=stage_version):
self._append2transkription(transkription, highlight_class, word_title, part_transkription_position)
counter = (counter + 1) % 2
word_insertion_mark_class = 'word-insertion-mark'
counter = 0
for mark_foreign_hands in self.page.mark_foreign_hands:
highlight_class = 'foreign'
title = 'id: {}/line: {}\n{} {}'.format(str(mark_foreign_hands.id), str(word.line_number),\
mark_foreign_hands.foreign_hands_text, mark_foreign_hands.pen)
for transkription_position in mark_foreign_hands.transkription_positions:
self._append2transkription(transkription, highlight_class, title, transkription_position)
if self.show_word_insertion_mark:
for word_insertion_mark in self.page.word_insertion_marks:
wim_title = 'id: {}/line: {}\nword insertion mark'.format(str(word_insertion_mark.id), str(word_insertion_mark.line_number))
style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\
word_insertion_mark.top, word_insertion_mark.left, word_insertion_mark.width, word_insertion_mark.height)
link = E.A(' ', E.CLASS(word_insertion_mark_class), title=wim_title, style=style_content)
transkription.append(link)
html = E.HTML(head,E.BODY(transkription))
bool(self.non_testing) and open_in_browser(html)
if output_file is not None:
with open(output_file, 'wb') as f:
f.write(lxml.html.tostring(html, pretty_print=True, include_meta_content_type=True, encoding='utf-8'))
f.closed
def _append2transkription(self, transkription, highlight_class, title, transkription_position):
"""Append content to transkription-div.
"""
style_content = 'position:absolute; top:{0}px; left:{1}px; width:{2}px; height:{3}px;'.format(\
transkription_position.top, transkription_position.left, transkription_position.width, transkription_position.height)
if transkription_position.transform is not None:
style_content = style_content + ' transform: {}; '.format(transkription_position.transform.toCSSTransformString())
transform_origin_x = (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1\
if (transkription_position.left-round(transkription_position.transform.getX(), 1))*-1 < 0 else 0
style_content = style_content + ' transform-origin: {}px {}px; '.format(transform_origin_x, transkription_position.height)
link = E.A(' ', E.CLASS(highlight_class), title=title, style=style_content)
transkription.append(link)
def create_pdf_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, pdf_file_name='output.pdf', bg_color=SVGConverter.BG_COLOR):
"""Creates a pdf file highlighting some words.
"""
if not pdf_file_name.endswith('pdf'):
pdf_file_name = pdf_file_name + '.pdf'
tmp_svg_file = pdf_file_name.replace('.pdf', '.svg')
create_svg_with_highlighted_words(xml_source_file=xml_source_file, page=page, highlighted_words=highlighted_words,\
svg_file_name=tmp_svg_file, bg_color=bg_color)
if isfile(tmp_svg_file):
cairosvg.svg2pdf(url=tmp_svg_file, write_to=pdf_file_name)
remove(tmp_svg_file)
def create_svg_with_highlighted_words(xml_source_file=None, page=None, highlighted_words=None, svg_file_name='output.svg', bg_color=SVGConverter.BG_COLOR):
"""Creates a svg file highlighting some words.
"""
if page is None and xml_source_file is not None:
- page = Page(xml_source_file=xml_source_file)
+ page = Page(xml_source_file)
converter = SVGConverter(page, bg_color=bg_color)
if not svg_file_name.endswith('svg'):
svg_file_name = svg_file_name + '.svg'
converter.convert(output_file=svg_file_name, highlighted_words=highlighted_words)
def usage():
"""prints information on how to use the script
"""
print(main.__doc__)
def main(argv):
"""This program can be used to convert the word positions to HTML, SVG or TEXT for testing purposes.
svgscripts/convert_wordPositions.py OPTIONS
OPTIONS:
-h|--help: show help
-H|--HTML [default] convert to HTML test file
-o|--output=outputFile save output to file outputFile
-P|--PDF convert to PDF test file
-S|--SVG convert to SVG test file
-s|--svg=svgFile: svg web file
-T|--TEXT convert to TEXT output
-t|--testing execute in test mode, do not write to file or open browser
-w|--word-insertion-mark show word insertion mark on HTML
-v|--version=VERSION show words that belong to writing process VERSION: { 0, 1, 2, 0-1, 0+, etc. }
:return: exit code (int)
"""
convert_to_type = None
svg_file = None
output_file = None
non_testing = True
show_word_insertion_mark = False
page = None
stage_version = ''
try:
opts, args = getopt.getopt(argv, "htHPSTws:o:v:", ["help", "testing", "HTML", "PDF", "SVG", "TEXT", "word-insertion-mark", "svg=", "output=", "version="])
except getopt.GetoptError:
usage()
return 2
for opt, arg in opts:
if opt in ('-h', '--help') or not args:
usage()
return 0
elif opt in ('-v', '--version'):
if re.match(r'^(\d|\d\+|\d\-\d)$', arg):
stage_version = arg
else:
raise ValueError('OPTION -v|--version=VERSION does not work with "{}" as value for VERSION!'.format(arg))
elif opt in ('-w', '--word-insertion-mark'):
show_word_insertion_mark = True
elif opt in ('-P', '--PDF'):
convert_to_type = 'PDF'
elif opt in ('-S', '--SVG'):
convert_to_type = 'SVG'
elif opt in ('-T', '--TEXT'):
convert_to_type = 'TEXT'
elif opt in ('-H', '--HTML'):
convert_to_type = 'HTML'
elif opt in ('-t', '--testing'):
non_testing = False
elif opt in ('-s', '--svg'):
svg_file = arg
elif opt in ('-o', '--output'):
output_file = arg
if len(args) < 1:
usage()
return 2
if convert_to_type is None:
if output_file is not None and len(re.split(r'\.', output_file)) > 1:
output_file_part_list = re.split(r'\.', output_file)
convert_to_type = output_file_part_list[len(output_file_part_list)-1].upper()
else:
convert_to_type = 'HTML'
for word_position_file in args:
if not isfile(word_position_file):
print("'{}' does not exist!".format(word_position_file))
return 2
if convert_to_type == 'PDF':
if output_file is None:
output_file = 'output.pdf'
create_pdf_with_highlighted_words(word_position_file, pdf_file_name=output_file)
else:
if svg_file is not None:
if isfile(svg_file):
- page = Page(xml_source_file=word_position_file, svg_file=svg_file)
+ page = PageCreator(word_position_file, svg_file=svg_file)
else:
print("'{}' does not exist!".format(word_position_file))
return 2
else:
- page = Page(xml_source_file=word_position_file)
+ page = Page(word_position_file)
if page.svg_file is None:
print('Please specify a svg file!')
usage()
return 2
converter = Converter.CREATE_CONVERTER(page, non_testing=non_testing, converter_type=convert_to_type, show_word_insertion_mark=show_word_insertion_mark)
converter.convert(output_file=output_file, stage_version=stage_version)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/create_task.py
===================================================================
--- svgscripts/create_task.py (revision 72)
+++ svgscripts/create_task.py (revision 73)
@@ -1,313 +1,313 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to create a task.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import abc
from colorama import Fore, Style
import getopt
import inspect
import itertools
import lxml.etree as ET
import re
import shutil
import sys
import os
from os import listdir, sep, makedirs
from os.path import exists, isfile, isdir, dirname, basename, splitext
if dirname(__file__) not in sys.path:
sys.path.append(dirname(__file__))
from convert_wordPositions import create_pdf_with_highlighted_words, create_svg_with_highlighted_words
from util import copy_xml_file_word_pos_only, get_mismatching_ids
from datatypes.page import Page
from datatypes.faksimile import FaksimilePage
#from join_faksimileAndTranskription import STATUS_MERGED_OK
from util import ExternalViewer, create_highlighted_svg_file
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
UNITTESTING = False
HIGHLIGHT_COLOR = 'red'
OPACITY = '0.5'
class Task(metaclass=abc.ABCMeta):
"""This abstract class can be used to create a task.
"""
finish_dir = 'Fertig'
def __init__(self, xml_source_file, target_dir, page=None, faksimile_svgFile=None, dirname=None, description='', edit_transkription=False, edit_xml=False, manual=None, status_contains='', bg_color=HIGHLIGHT_COLOR, opacity=OPACITY):
self.xml_source_file = xml_source_file
self.page = page
if self.page is None:
- self.page = Page(xml_source_file=self.xml_source_file)
+ self.page = Page(self.xml_source_file)
self.faksimile_svgFile = faksimile_svgFile
self.target_dir = target_dir + sep + dirname\
if dirname is not None else target_dir
self.dirname = dirname
self.description = description if description != '' else self.__doc__
self.edit_transkription = edit_transkription
self.edit_xml = edit_xml
self.manual = manual
self.status_contains = status_contains
self.bg_color = bg_color
self.opacity = opacity
self.created_files = []
self.finished_files = []
if isdir(self.target_dir):
self.created_files = [ created_file for created_file in listdir(self.target_dir) if not isdir(created_file) ]
if isdir(self.target_dir + sep + self.finish_dir):
self.finished_files = listdir(self.target_dir + sep + self.finish_dir)
def create(self):
makedirs(self.target_dir + sep + Task.finish_dir, exist_ok=True)
if self.manual is not None and isfile(self.manual):
shutil.copy(self.manual, self.target_dir)
words = self.select_words(self.page.words)
if not self.edit_transkription:
transkription_file = self.target_dir + sep + self.create_file_name(self.page, is_faksimile_svg=False, suffix='.pdf')
create_pdf_with_highlighted_words(page=self.page, highlighted_words=words,\
pdf_file_name=transkription_file, bg_color=self.bg_color)
else:
transkription_file = self.target_dir + sep + self.create_file_name(self.page, is_faksimile_svg=False)
create_svg_with_highlighted_words(page=self.page, highlighted_words=words,\
svg_file_name=transkription_file, bg_color=self.bg_color)
if self.edit_xml:
xml_file = copy_xml_file_word_pos_only(self.page.page_tree.docinfo.URL, self.target_dir)
self.created_files.append(xml_file)
note = self.create_note_about_missing_words()
if note != '':
note_file = self.target_dir + sep + self.create_file_name(self.page, is_faksimile_svg=False, suffix='.txt')
with open(note_file, 'w+') as f:
f.write(note)
f.close()
if isfile(transkription_file):
self.created_files.append(transkription_file)
source_svg_file = self.page.faksimile_svgFile if self.page.faksimile_svgFile is not None\
else self.faksimile_svgFile
if source_svg_file is None:
raise Exception('source_svg_file not specified: neither page nor self have a faksimile_svgFile!')
svg_file = self.target_dir + sep + self.create_file_name(self.page)\
if self.page.title != '' and self.page.number != -1\
else self.target_dir + sep + basename(source_svg_file)
faksimile_tree = ET.parse(source_svg_file)
node_ids = self.get_node_ids()
create_highlighted_svg_file(faksimile_tree, node_ids, target_file=svg_file,\
highlight_color=self.bg_color, opacity=self.opacity)
if isfile(svg_file):
self.created_files.append(svg_file)
def create_file_name(self, page, suffix='.svg', is_faksimile_svg=True):
"""Return a file name for page.
"""
if is_faksimile_svg:
return page.title.replace(' ', '-') + ',{}.svg'.format(str(page.number))
else:
return basename(page.page_tree.docinfo.URL).replace('.xml', suffix)
def create_note_about_missing_words(self):
"""Create a note about missing words for faksimile and transkription ids.
"""
return ''
def contains_file(self, file_name, is_finished=False):
"""Return whether task created a file with basename file_name.
"""
if is_finished:
return len([ finished_file for finished_file in self.finished_files if basename(finished_file) == basename(file_name) ]) > 0
return len([ created_file for created_file in self.created_files if basename(created_file) == basename(file_name) ]) > 0
def get_fullpath4file(self, file_name):
"""Return full path for created file with file_name.
"""
if not self.contains_file(file_name):
return None
return [ created_file for created_file in self.created_files if basename(created_file) == basename(file_name) ][0]
@abc.abstractmethod
def get_node_ids(self):
"""Return node ids for faksimile svg rect.
"""
pass
def has_been_created(self, page):
"""Return true if task has been created.
"""
faksimile_svg = self.create_file_name(page)
transkription_svg = self.create_file_name(page, is_faksimile_svg=False)
xml_file = self.create_file_name(page, is_faksimile_svg=False, suffix='.xml')
return self.contains_file(faksimile_svg)\
or self.contains_file(transkription_svg)\
or self.contains_file(xml_file)\
or self.has_been_finished(page, faksimile_svg=faksimile_svg,\
transkription_svg=transkription_svg, xml_file=xml_file)
def has_been_finished(self, page, faksimile_svg=None, transkription_svg=None, xml_file=None):
"""Return true if task has been finished.
"""
if faksimile_svg is None:
faksimile_svg = self.create_file_name(page)
if transkription_svg is None:
transkription_svg = self.create_file_name(page, is_faksimile_svg=False)
if xml_file is None:
xml_file = self.create_file_name(page, is_faksimile_svg=False, suffix='.xml')
return self.contains_file(faksimile_svg, is_finished=True)\
or self.contains_file(transkription_svg, is_finished=True)\
or self.contains_file(xml_file, is_finished=True)
@abc.abstractmethod
def select_words(self, words):
"""Returns selected words.
"""
pass
class SplitFaksimileWordBoxes(Task):
"""Split faksimile word boxes according to how many boxes a word has on the transkription.
TODO
"""
def __init__(self, xml_source_file, target_dir):
super(SplitFaksimileWordBoxes, self).__int__(xml_source_file, target_dir,\
status_contains=STATUS_MERGED_OK)
def select_words(self, words):
"""Returns selected words. TODO
"""
#TODO create those functions!!!!
#return [ word for word in words if word.hasParts() and word.partsMissFaksimilePostion() ]
return words
class CorrectWords(Task):
"""Correct words from faksimile and from transkription such that they correspond.
"""
def __init__(self, xml_source_file, source_svg_file, target_dir, page=None, unmatched_node_ids=None, edit_xml=True):
super(CorrectWords, self).__init__(xml_source_file, target_dir, page=page, faksimile_svgFile=source_svg_file,\
edit_transkription=True, edit_xml=edit_xml)
self.unmatched_words = []
self.unmatched_faksimile_positions = []
self.unmatched_node_ids = unmatched_node_ids if unmatched_node_ids is not None else []
if self.page is None:
- self.page = Page(xml_source_file=self.xml_source_file)
+ self.page = Page(self.xml_source_file)
self.init_unmatched_words()
def init_unmatched_words(self):
"""Init unmatched ids.
"""
source_svg_file = self.page.faksimile_svgFile if self.page.faksimile_svgFile is not None\
else self.faksimile_svgFile
faksimile_tree = ET.parse(source_svg_file)
faksimile_page = FaksimilePage.GET_FAKSIMILEPAGES(faksimile_tree, page_number=str(self.page.number))[0]
self.unmatched_words, self.unmatched_faksimile_positions = get_mismatching_ids(self.page.words, faksimile_page.word_positions)
def create_note_about_missing_words(self):
"""Create a note about missing words for faksimile and transkription ids.
"""
note = '{0},{1}: nicht übereinstimmende Wörter.\n'.format(self.page.title, str(self.page.number))
if len(self.unmatched_words) > 0:
note += '\nFolgende Transkription-Wörter haben keine Entsprechung bei den Wörtern auf dem Faksimile:\n'
for word in self.unmatched_words:
note += '- "{0}", id="{1}", line_number: {2}\n'.format(word.text, word.id, word.line_number)
if len(self.unmatched_faksimile_positions) > 0:
note += '\nFolgende Faksimile-Wörter haben keine Entsprechung bei den Wörtern der Transkription:\n'
for faksimile_position in self.unmatched_faksimile_positions:
note += '- "{0}", id: {1}\n'.format(faksimile_position.text, faksimile_position.id)
return note
def get_target_filepath(self, page, is_faksimile_svg=True, suffix='.svg', is_finished=False):
"""Return target filepath for page.
"""
if is_finished:
return self.target_dir + sep + self.finish_dir + sep + self.create_file_name(page, is_faksimile_svg=is_faksimile_svg, suffix=suffix)
return self.target_dir + sep + self.create_file_name(page, is_faksimile_svg=is_faksimile_svg, suffix=suffix)
def get_node_ids(self):
"""Return node ids for faksimile svg rect.
"""
return self.unmatched_node_ids
def select_words(self, words):
"""Return words that match unmatched_word_ids.
"""
if len(self.unmatched_words) == 0:
return words
return self.unmatched_words
def usage(func_name):
"""prints information on how to use the script
"""
print(func_name.__doc__)
def main_correct_words(argv):
"""This program can be used to create the task 'CorrectWords' in directory ./correct-words.
svgscripts/copy_faksimile_svg_file.py [OPTIONS]
OPTIONS:
-h|--help: show help
-r|--refdir=dir reference directory
:return: exit code (int)
"""
tmp_dir = './correct-words'
ref_dir = None
try:
opts, args = getopt.getopt(argv, "hr:", ["help", "refdir=" ])
except getopt.GetoptError:
usage(eval(inspect.currentframe().f_code.co_name))
return 2
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(eval(inspect.currentframe().f_code.co_name))
return 0
elif opt in ('-r', '--refdir'):
ref_dir = arg
if len(args) < 1:
usage(eval(inspect.currentframe().f_code.co_name))
return 2
exit_status = 0
for xml_source_file in args:
if isfile(xml_source_file):
- page = Page(xml_source_file=xml_source_file)
+ page = Page(xml_source_file)
if ref_dir is not None and isdir(ref_dir)\
and isfile(ref_dir + sep + basename(xml_source_file)):
- ref_page = Page(xml_source_file=ref_dir + sep + basename(xml_source_file))
+ ref_page = Page(ref_dir + sep + basename(xml_source_file))
page.words = ref_page.words
if page.faksimile_svgFile is not None\
and isfile(page.faksimile_svgFile):
correct_words = CorrectWords(xml_source_file, page.faksimile_svgFile, tmp_dir, page=page)
for faksimile_position in correct_words.unmatched_faksimile_positions:
correct_words.unmatched_node_ids.append(faksimile_position.id)
correct_words.create()
else:
print('Skipping {0}. File does not contain a valid faksimile_svgFile reference!'.format(xml_source_file))
return exit_status
def main(argv):
return main_correct_words(argv)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Index: svgscripts/extractWordPosition.py
===================================================================
--- svgscripts/extractWordPosition.py (revision 72)
+++ svgscripts/extractWordPosition.py (revision 73)
@@ -1,586 +1,576 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This program can be used to extract the position of the words in a svg file and write them to a xml file.
"""
# Copyright (C) University of Basel 2019 {{{1
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see 1}}}
import inspect
import getopt
from lxml import etree as ET
from os import sep, listdir, mkdir, path
from os.path import exists, isfile, isdir
from progress.bar import Bar
import re
import sys
import warnings
from datatypes.lineNumber import LineNumber
from datatypes.matrix import Matrix
-from datatypes.page import Page
+from datatypes.page_creator import PageCreator, FILE_TYPE_SVG_WORD_POSITION, FILE_TYPE_XML_MANUSCRIPT
from datatypes.pdf import PDFText
from datatypes.transkriptionField import TranskriptionField
from datatypes.transkription_position import TranskriptionPosition
from datatypes.word import Word
from datatypes.word_insertion_mark import WordInsertionMark
sys.path.append('shared_util')
from myxmlwriter import write_pretty
__author__ = "Christian Steiner"
__maintainer__ = __author__
__copyright__ = 'University of Basel'
__email__ = "christian.steiner@unibas.ch"
__status__ = "Development"
__license__ = "GPL v3"
__version__ = "0.0.1"
class Extractor:
"""
This class can be used to extract the word positions in a svg file and write it to a xml file.
Args:
[xml_dir (str): target directory]
[title (str): title of document]
[manuscript_file (str): xml file containing information about the archival unity to which the current page belongs
- [extract_transkription_field_only (Boolean): if true extract_word_position will extract word positions only that
- are part of the transkription field.
"""
UNITTESTING = False
SONDERZEICHEN_LIST = [ 'A', 'B', '{', '}' ]
- def __init__(self, xml_dir=None, title=None, manuscript_file=None, extract_transkription_field_only=False, compare2pdf=False):
+ def __init__(self, xml_dir=None, title=None, manuscript_file=None, compare2pdf=False):
if bool(xml_dir):
self.xml_dir = xml_dir
not isdir(self.xml_dir) and mkdir(self.xml_dir)
else:
self.xml_dir = 'xml' if(isdir('xml')) else ''
self.latest_status = None
self.compare2pdf = compare2pdf
self.xml_dir = self.xml_dir + sep if(bool(self.xml_dir)) else ''
self.title = title
self.manuscript_file = manuscript_file
- self.extract_transkription_field_only = extract_transkription_field_only
self.manuscript_tree = None
if not bool(self.title) and bool(self.manuscript_file) and isfile(self.manuscript_file):
self.manuscript_tree = ET.parse(self.manuscript_file)
self.title = self.manuscript_tree.getroot().get('title')
elif bool(self.manuscript_file):
raise FileNotFoundError('File "{}" does not exist!'.format(self.manuscript_file))
elif bool(self.title):
self.update_title_and_manuscript(self.title, False)
def add_word(self, page, index, word_part_objs, endSign, endX, matrix=None, debug_msg=None, transkription_field=None):
"""Creates transkription_positions and a new word from word_part_objs (i.e. a list of dictionaries about parts of this word).
If word contains a Sonderzeichen as specified by self.SONDERZEICHEN_LIST, word_part_objs will be split and several words are created.
:returns: the new word counter (int)
"""
break_points = []
if(len(page.sonderzeichen_list) > 0): # check for Sonderzeichen and special chars -> mark for word insertion, create break points
for Sonderzeichen in self.SONDERZEICHEN_LIST:
contains_Sonderzeichen = [ dict['text'] == Sonderzeichen and any(sz in dict['class'] for sz in page.sonderzeichen_list) for dict in word_part_objs ]
if True in contains_Sonderzeichen:
break_points += [ (endPoint, endPoint + 1) for endPoint in [i for i, e in enumerate(contains_Sonderzeichen) if e == True ]]
for sz_point in [i for i, e in break_points]:
wim_index = len(page.word_insertion_marks)
x = float(word_part_objs[sz_point]['x'])
y = float(word_part_objs[sz_point]['y'])
if page.svg_file is not None and isfile(page.svg_file) and transkription_field is not None:
svg_path_tree = ET.parse(page.svg_file)
namespaces = { k if k is not None else 'ns': v for k, v in svg_path_tree.getroot().nsmap.items() }
xmin = transkription_field.xmin
ymin = transkription_field.ymin
wim = WordInsertionMark.CREATE_WORD_INSERTION_MARK(svg_path_tree, namespaces, id=wim_index, x=x, y=y, xmin=xmin, ymin=ymin,\
line_number=page.get_line_number(y-1), mark_type=Sonderzeichen)
page.word_insertion_marks.append(wim)
if(bool(re.search(r'\d[A-Za-z]', self.get_word_from_part_obj(word_part_objs)))): # case: digits from line number and chars from words -> create break points
THRESHOLDX = 20 # Threshold between line number and text
last_x = -1
for i, x in enumerate([float(dict['x']) for dict in word_part_objs]):
if(last_x > -1 and (x - last_x > THRESHOLDX)):
break_points.append((i, i))
last_x = x
if(len(break_points) > 0): # if there are break points -> split word_part_obj and add the corresponding words
from_index = 0
for end_point, next_from_index in break_points:
new_word_part_objs = word_part_objs[from_index:end_point]
new_endX = word_part_objs[end_point]['x']
from_index = next_from_index
index = self.add_word(page, index, new_word_part_objs, None, new_endX, matrix=matrix, debug_msg=debug_msg, transkription_field=transkription_field)
if from_index > 0 and from_index < len(word_part_objs):
new_word_part_objs = word_part_objs[from_index:]
index = self.add_word(page, index, new_word_part_objs, endSign, endX, matrix=matrix, debug_msg=debug_msg, transkription_field=transkription_field)
return index
else:
if len(word_part_objs) > 0:
transkription_positions = TranskriptionPosition.CREATE_TRANSKRIPTION_POSITION_LIST(page, word_part_objs, matrix=matrix,\
debug_msg_string=debug_msg, transkription_field=transkription_field)
text = self.get_word_from_part_obj(word_part_objs)
line_number = page.get_line_number((transkription_positions[0].bottom+transkription_positions[0].top)/2)
if line_number == -1:
if len(page.words) > 0:
lastWord = page.words[len(page.words)-1]
lastWord_lastTP = lastWord.transkription_positions[len(lastWord.transkription_positions)-1]
lastTP = transkription_positions[len(transkription_positions)-1]
if transkription_positions[0].left > lastWord_lastTP.left\
and abs(lastWord_lastTP.bottom-lastTP.bottom) < lastTP.height/2:
line_number = lastWord.line_number
else:
line_number = lastWord.line_number+1
newWord = Word(id=index, text=text, line_number=line_number, transkription_positions=transkription_positions)
page.words.append(newWord)
return int(index) + 1
else:
return int(index)
def extractAndWriteInformation(self, file_name, page_number=None, xml_target_file=None, svg_file=None, pdfFile=None, record_warnings=False, warning_filter='default'):
"""Extracts information about positions of text elements and writes them to a xml file.
"""
if isfile(file_name):
if not bool(xml_target_file):
xml_target_file = self.get_file_name(file_name, page_number)
if bool(self.xml_dir) and not bool(path.dirname(xml_target_file)):
xml_target_file = path.dirname(self.xml_dir) + sep + xml_target_file
exit_status = 0
with warnings.catch_warnings(record=record_warnings) as w:
warnings.simplefilter(warning_filter)
page = self.extract_information(file_name, page_number=page_number, xml_target_file=xml_target_file, svg_file=svg_file, pdfFile=pdfFile)
status_message = 'OK'
if w is not None and len(w) > 0:
status_message = 'with warnings'
- if True in [ str(warn.message).startswith(Page.WARNING_MISSING_USE_NODE4PWP) for warn in w ]:
- status_message += ':{}:'.format(Page.WARNING_MISSING_USE_NODE4PWP.lower())
- if True in [ str(warn.message).startswith(Page.WARNING_MISSING_GLYPH_ID4WIM) for warn in w ]:
- status_message += ':{}:'.format(Page.WARNING_MISSING_GLYPH_ID4WIM.lower())
+ if True in [ str(warn.message).startswith(PageCreator.WARNING_MISSING_USE_NODE4PWP) for warn in w ]:
+ status_message += ':{}:'.format(PageCreator.WARNING_MISSING_USE_NODE4PWP.lower())
+ if True in [ str(warn.message).startswith(PageCreator.WARNING_MISSING_GLYPH_ID4WIM) for warn in w ]:
+ status_message += ':{}:'.format(PageCreator.WARNING_MISSING_GLYPH_ID4WIM.lower())
self.latest_status = status_message
exit_status = 1
else:
self.latest_status = None
page.page_tree.getroot().set('status', status_message)
- write_pretty(xml_element_tree=page.page_tree, file_name=xml_target_file, script_name=__file__, file_type='svgWordPosition')
+ write_pretty(xml_element_tree=page.page_tree, file_name=xml_target_file, script_name=__file__, file_type=FILE_TYPE_SVG_WORD_POSITION)
return exit_status
else:
raise FileNotFoundError('\"{}\" is not an existing file!'.format(file_name))
def extract_information(self, file_name, page_number=None, xml_target_file=None, svg_file=None, pdfFile=None):
"""Extracts information about positions of text elements.
- [:returns:] (datatypes.page) the Page containing all information.
+ [:returns:] (datatypes.page) the PageCreator containing all information.
"""
if isfile(file_name):
if not bool(xml_target_file):
xml_target_file = self.get_file_name(file_name, page_number)
if bool(self.xml_dir) and not bool(path.dirname(xml_target_file)):
xml_target_file = path.dirname(self.xml_dir) + sep + xml_target_file
- transkription_field = TranskriptionField(file_name) if bool(self.extract_transkription_field_only) else None
+ transkription_field = TranskriptionField(file_name)
svg_tree = ET.parse(file_name)
- page = Page(xml_target_file=xml_target_file, title=self.title, page_number=page_number, pdfFile=pdfFile,\
- svg_file=svg_file, extract_transkription_field_only=self.extract_transkription_field_only)
- page.add_source(file_name)
+ page = PageCreator(xml_target_file, title=self.title, page_number=page_number, pdfFile=pdfFile,\
+ svg_file=svg_file, source=file_name)
sonderzeichen_list, letterspacing_list, style_dict = self.get_style(svg_tree.getroot())
page.add_style(sonderzeichen_list=sonderzeichen_list, letterspacing_list=letterspacing_list, style_dict=style_dict)
if transkription_field is not None:
page.init_line_numbers(self.extract_line_numbers(svg_tree, transkription_field), transkription_field.ymax)
self.extract_word_position(svg_tree, page, transkription_field=transkription_field)
page.create_writing_processes_and_attach2tree()
page.update_and_attach_words2tree()
for word_insertion_mark in page.word_insertion_marks:
# it is not clear if we really need to know this alternative word ordering. See 'TODO.md'
#word_insertion_mark.inserted_words = self.find_inserted_words(page.page_tree, word_insertion_mark)
word_insertion_mark.attach_object_to_tree(page.page_tree)
return page
else:
raise FileNotFoundError('\"{}\" is not an existing file!'.format(file_name))
def extract_line_numbers(self, svg_tree, transkription_field):
"""Extracts line numbers and write them to a xml file.
"""
nodes_near_tf = [ item for item in filter(lambda x: Matrix.IS_NEARX_TRANSKRIPTION_FIELD(x.get('transform'), transkription_field),\
svg_tree.getroot().iterfind('.//text', svg_tree.getroot().nsmap))]
line_numbers = [ LineNumber(raw_text_node=item, transkription_field=transkription_field)\
for item in filter(lambda x: LineNumber.IS_A_LINE_NUMBER(x), nodes_near_tf)]
if len(line_numbers) > 0:
MINABOVE = 3
last_to_position = transkription_field.ymin
for line_number in line_numbers:
above_current_line_bottom = line_number.bottom + transkription_field.ymin - MINABOVE
bottoms = self.get_bottoms(svg_tree.getroot(), from_position=last_to_position, to_position=above_current_line_bottom)
last_to_position = above_current_line_bottom
if len(bottoms) > 0:
current_line_top = float(bottoms[len(bottoms)-1]) - transkription_field.ymin + MINABOVE
line_number.setTop(current_line_top)
return line_numbers
def extract_word_position(self, svg_tree, page, transkription_field=None):
"""Extracts word positions.
"""
counter = 0
word_part_obj = []
endSign = '%'
last_matrix = None
MAXBOTTOMDIFF = 5
MAXXDIFF = 6
if not Extractor.UNITTESTING:
bar = Bar('extracting word positions from text_item', max=len([*self.get_text_items(svg_tree.getroot(), transkription_field=transkription_field)]))
for text_item in self.get_text_items(svg_tree.getroot(), transkription_field=transkription_field):
current_matrix = Matrix(text_item.get('transform'), transkription_field=transkription_field)
# check for line breaks
if (last_matrix is not None and len(word_part_obj) > 0 and (\
Matrix.DO_CONVERSION_FACTORS_DIFFER(last_matrix, current_matrix) or\
(abs(current_matrix.getY() - last_matrix.getY()) > MAXBOTTOMDIFF) or\
(abs(current_matrix.getX() - word_part_obj[len(word_part_obj)-1]['x']) > MAXXDIFF)))\
or (len(word_part_obj) > 0 and self.get_word_object_multi_char_x(word_part_obj[0]) > current_matrix.getX()):
endSign = '%'
if(self.get_word_from_part_obj(word_part_obj) != ''):
debug_msg = 'check for line breaks, diffx: {}, diffy: {}, diff_conversion_matrix: {}'.format(\
round(abs(current_matrix.getX() - word_part_obj[len(word_part_obj)-1]['x']), 3), round(abs(current_matrix.getY() - last_matrix.getY()), 3),\
str(Matrix.DO_CONVERSION_FACTORS_DIFFER(last_matrix, current_matrix)))
counter = self.add_word(page, counter, word_part_obj, endSign, endX, matrix=last_matrix, debug_msg=debug_msg, transkription_field=transkription_field)
word_part_obj = []
endX = current_matrix.getX()
if(len(text_item.findall(".//tspan", svg_tree.getroot().nsmap)) < 1): # case: