Coverage for src/wiktextract/page.py: 87%
266 statements
« prev ^ index » next coverage.py v7.9.0, created at 2025-06-13 07:43 +0000
« prev ^ index » next coverage.py v7.9.0, created at 2025-06-13 07:43 +0000
1# Code for parsing information from a single Wiktionary page.
2#
3# Copyright (c) 2018-2022 Tatu Ylonen. See file LICENSE and https://ylonen.org
5import re
6from collections import defaultdict
7from copy import copy
8from typing import Any, Callable, Optional, Union
10from mediawiki_langcodes import name_to_code
11from wikitextprocessor.core import (
12 NamespaceDataEntry,
13 PostTemplateFnCallable,
14 TemplateArgs,
15 TemplateFnCallable,
16)
17from wikitextprocessor.node_expand import NodeHandlerFnCallable
18from wikitextprocessor.parser import GeneralNode, NodeKind, WikiNode
20from .clean import clean_value
21from .datautils import data_append, data_extend
22from .import_utils import import_extractor_module
23from .wxr_context import WiktextractContext
25# NodeKind values for subtitles
26LEVEL_KINDS = {
27 NodeKind.LEVEL2,
28 NodeKind.LEVEL3,
29 NodeKind.LEVEL4,
30 NodeKind.LEVEL5,
31 NodeKind.LEVEL6,
32}
35def parse_page(
36 wxr: WiktextractContext, page_title: str, page_text: str
37) -> list[dict[str, Any]]:
38 """Parses the text of a Wiktionary page and returns a list of
39 dictionaries, one for each word/part-of-speech defined on the page
40 for the languages specified by ``capture_language_codes`` (None means
41 all available languages). ``word`` is page title, and ``text`` is
42 page text in Wikimedia format. Other arguments indicate what is
43 captured."""
44 page_extractor_mod = import_extractor_module(wxr.wtp.lang_code, "page")
45 page_data = page_extractor_mod.parse_page(wxr, page_title, page_text)
46 if wxr.config.extract_thesaurus_pages: 46 ↛ 48line 46 didn't jump to line 48 because the condition on line 46 was always true
47 inject_linkages(wxr, page_data)
48 if wxr.config.dump_file_lang_code == "en": 48 ↛ 50line 48 didn't jump to line 50 because the condition on line 48 was always true
49 process_categories(wxr, page_data)
50 remove_duplicate_data(page_data)
51 return page_data
54def is_panel_template(wxr: WiktextractContext, template_name: str) -> bool:
55 """Checks if `Template_name` is a known panel template name (i.e., one that
56 produces an infobox in Wiktionary, but this also recognizes certain other
57 templates that we do not wish to expand)."""
58 page_extractor_mod = import_extractor_module(wxr.wtp.lang_code, "page")
59 if (
60 hasattr(page_extractor_mod, "PANEL_TEMPLATES")
61 and template_name in page_extractor_mod.PANEL_TEMPLATES
62 ):
63 return True
64 if hasattr(
65 page_extractor_mod, "PANEL_PREFIXES"
66 ) and template_name.startswith(tuple(page_extractor_mod.PANEL_PREFIXES)):
67 return True
68 return False
71def recursively_extract(
72 contents: Union[WikiNode, str, list[Union[str, WikiNode]]],
73 fn: Callable[[Union[WikiNode, list[WikiNode]]], bool],
74) -> tuple[list[Union[str, WikiNode]], list[Union[str, WikiNode]]]:
75 """Recursively extracts elements from contents for which ``fn`` returns
76 True. This returns two lists, the extracted elements and the remaining
77 content (with the extracted elements removed at each level). Only
78 WikiNode objects can be extracted."""
79 # If contents is a list, process each element separately
80 extracted = []
81 new_contents = []
82 if isinstance(contents, (list, tuple)):
83 for x in contents:
84 e1, c1 = recursively_extract(x, fn)
85 extracted.extend(e1)
86 new_contents.extend(c1)
87 return extracted, new_contents
88 # If content is not WikiNode, just return it as new contents.
89 if not isinstance(contents, WikiNode):
90 return [], [contents]
91 # Check if this content should be extracted
92 if fn(contents):
93 return [contents], []
94 # Otherwise content is WikiNode, and we must recurse into it.
95 kind = contents.kind
96 new_node = copy(contents)
97 new_node.children = []
98 new_node.sarg = ""
99 new_node.largs = []
100 new_node.attrs = {}
101 new_contents.append(new_node)
102 if kind in LEVEL_KINDS or kind == NodeKind.LINK:
103 # Process args and children
104 new_args = []
105 for arg in contents.largs:
106 e1, c1 = recursively_extract(arg, fn)
107 new_args.append(c1)
108 extracted.extend(e1)
109 new_node.largs = new_args
110 e1, c1 = recursively_extract(contents.children, fn)
111 extracted.extend(e1)
112 new_node.children = c1
113 elif kind in {
114 NodeKind.ITALIC,
115 NodeKind.BOLD,
116 NodeKind.TABLE,
117 NodeKind.TABLE_CAPTION,
118 NodeKind.TABLE_ROW,
119 NodeKind.TABLE_HEADER_CELL,
120 NodeKind.TABLE_CELL,
121 NodeKind.PRE,
122 NodeKind.PREFORMATTED,
123 }:
124 # Process only children
125 e1, c1 = recursively_extract(contents.children, fn)
126 extracted.extend(e1)
127 new_node.children = c1
128 elif kind in (NodeKind.HLINE,): 128 ↛ 130line 128 didn't jump to line 130 because the condition on line 128 was never true
129 # No arguments or children
130 pass
131 elif kind in (NodeKind.LIST, NodeKind.LIST_ITEM):
132 # Keep args as-is, process children
133 new_node.sarg = contents.sarg
134 e1, c1 = recursively_extract(contents.children, fn)
135 extracted.extend(e1)
136 new_node.children = c1
137 elif kind in {
138 NodeKind.TEMPLATE,
139 NodeKind.TEMPLATE_ARG,
140 NodeKind.PARSER_FN,
141 NodeKind.URL,
142 }:
143 # Process only args
144 new_args = []
145 for arg in contents.largs:
146 e1, c1 = recursively_extract(arg, fn)
147 new_args.append(c1)
148 extracted.extend(e1)
149 new_node.largs = new_args
150 elif kind == NodeKind.HTML: 150 ↛ 158line 150 didn't jump to line 158 because the condition on line 150 was always true
151 # Keep attrs and args as-is, process children
152 new_node.attrs = contents.attrs
153 new_node.sarg = contents.sarg
154 e1, c1 = recursively_extract(contents.children, fn)
155 extracted.extend(e1)
156 new_node.children = c1
157 else:
158 raise RuntimeError(f"recursively_extract: unhandled kind {kind}")
159 return extracted, new_contents
162def inject_linkages(wxr: WiktextractContext, page_data: list[dict]) -> None:
163 # Inject linkages from thesaurus entries
164 from .thesaurus import search_thesaurus
166 local_thesaurus_ns = wxr.wtp.NAMESPACE_DATA.get("Thesaurus", {}).get("name") # type: ignore[call-overload]
167 for data in page_data:
168 if "pos" not in data: 168 ↛ 169line 168 didn't jump to line 169 because the condition on line 168 was never true
169 continue
170 word = data["word"]
171 lang_code = data["lang_code"]
172 pos = data["pos"]
173 for term in search_thesaurus(
174 wxr.thesaurus_db_conn, # type:ignore[arg-type]
175 word,
176 lang_code,
177 pos, # type: ignore[arg-type]
178 ):
179 for dt in data.get(term.linkage, ()):
180 if dt.get("word") == term.term and ( 180 ↛ 183line 180 didn't jump to line 183 because the condition on line 180 was never true
181 not term.sense or dt.get("sense") == term.sense
182 ):
183 break
184 else:
185 dt = {
186 "word": term.term,
187 "source": f"{local_thesaurus_ns}:{word}",
188 }
189 if len(term.sense) > 0: 189 ↛ 190line 189 didn't jump to line 190 because the condition on line 189 was never true
190 dt["sense"] = term.sense
191 if len(term.tags) > 0: 191 ↛ 192line 191 didn't jump to line 192 because the condition on line 191 was never true
192 dt["tags"] = term.tags
193 if len(term.raw_tags) > 0: 193 ↛ 194line 193 didn't jump to line 194 because the condition on line 193 was never true
194 dt["raw_tags"] = term.raw_tags
195 if len(term.topics) > 0: 195 ↛ 196line 195 didn't jump to line 196 because the condition on line 195 was never true
196 dt["topics"] = term.topics
197 if len(term.roman) > 0: 197 ↛ 198line 197 didn't jump to line 198 because the condition on line 197 was never true
198 dt["roman"] = term.roman
199 data_append(data, term.linkage, dt)
202def process_categories(
203 wxr: WiktextractContext, page_data: list[dict[str, Any]]
204) -> None:
205 # Categories are not otherwise disambiguated, but if there is only
206 # one sense and only one data in ret for the same language, move
207 # categories to the only sense. Note that categories are commonly
208 # specified for the page, and thus if we have multiple data in
209 # ret, we don't know which one they belong to (not even which
210 # language necessarily?).
211 # XXX can Category links be specified globally (i.e., in a different
212 # language?)
213 by_lang = defaultdict(list)
214 for data in page_data:
215 by_lang[data["lang"]].append(data)
216 for la, lst in by_lang.items():
217 if len(lst) > 1:
218 # Propagate categories from the last entry for the language to
219 # its other entries. It is common for them to only be specified
220 # in the last part-of-speech.
221 last = lst[-1]
222 for field in ("categories",):
223 if field not in last:
224 continue
225 vals = last[field]
226 for data in lst[:-1]:
227 assert data is not last
228 assert data.get(field) is not vals
229 if data.get("alt_of") or data.get("form_of"): 229 ↛ 230line 229 didn't jump to line 230 because the condition on line 229 was never true
230 continue # Don't add to alt-of/form-of entries
231 data_extend(data, field, vals)
232 continue
233 if len(lst) != 1: 233 ↛ 234line 233 didn't jump to line 234 because the condition on line 233 was never true
234 continue
235 data = lst[0]
236 senses = data.get("senses") or []
237 if len(senses) != 1:
238 continue
239 # Only one sense for this language. Move categories and certain other
240 # data to sense.
241 for field in ("categories", "topics", "wikidata", "wikipedia"):
242 if field in data:
243 v = data[field]
244 del data[field]
245 data_extend(senses[0], field, v)
247 # If the last part-of-speech of the last language (i.e., last item in "ret")
248 # has categories or topics not bound to a sense, propagate those
249 # categories and topics to all datas on "ret". It is common for categories
250 # to be specified at the end of an article. Apparently these can also
251 # apply to different languages.
252 if len(page_data) > 1:
253 last = page_data[-1]
254 for field in ("categories",):
255 if field not in last:
256 continue
257 lst = last[field]
258 for data in page_data[:-1]:
259 if data.get("form_of") or data.get("alt_of"): 259 ↛ 260line 259 didn't jump to line 260 because the condition on line 259 was never true
260 continue # Don't add to form_of or alt_of entries
261 data_extend(data, field, lst)
263 # Remove category links that start with a language name from entries for
264 # different languages
265 rhymes_ns_prefix = (
266 wxr.wtp.NAMESPACE_DATA.get("Rhymes", {}).get("name", "") + ":" # type: ignore[call-overload]
267 )
268 for data in page_data:
269 lang_code = data.get("lang_code")
270 cats = data.get("categories", [])
271 new_cats = []
272 for cat in cats:
273 no_prefix_cat = cat.removeprefix(rhymes_ns_prefix)
274 cat_lang = no_prefix_cat.split(maxsplit=1)[0].split(
275 "/", maxsplit=1
276 )[0]
277 cat_lang_code = name_to_code(cat_lang, "en")
278 if (
279 cat_lang_code != ""
280 and cat_lang_code != lang_code
281 and not (lang_code == "mul" and cat_lang_code == "en")
282 ):
283 continue
284 new_cats.append(cat)
285 if len(new_cats) == 0:
286 if "categories" in data:
287 del data["categories"]
288 else:
289 data["categories"] = new_cats
292def remove_duplicate_data(page_data: dict) -> None:
293 # Remove duplicates from tags, categories, etc.
294 for data in page_data:
295 for field in ("categories", "topics", "tags", "wikidata", "wikipedia"):
296 if field in data:
297 data[field] = list(sorted(set(data[field])))
298 for sense in data.get("senses", ()):
299 if field in sense:
300 sense[field] = list(sorted(set(sense[field])))
302 # If raw_glosses is identical to glosses, remove it
303 # If "empty-gloss" in tags and there are glosses, remove the tag
304 for data in page_data:
305 for s in data.get("senses", []):
306 rglosses = s.get("raw_glosses", ())
307 if not rglosses:
308 continue
309 sglosses = s.get("glosses", ())
310 if sglosses: 310 ↛ 314line 310 didn't jump to line 314 because the condition on line 310 was always true
311 tags = s.get("tags", ())
312 while "empty-gloss" in s.get("tags", ()): 312 ↛ 313line 312 didn't jump to line 313 because the condition on line 312 was never true
313 tags.remove("empty-gloss")
314 if len(rglosses) != len(sglosses):
315 continue
316 same = True
317 for rg, sg in zip(rglosses, sglosses):
318 if rg != sg:
319 same = False
320 break
321 if same:
322 del s["raw_glosses"]
325def clean_node(
326 wxr: WiktextractContext,
327 sense_data: Optional[Any],
328 wikinode: GeneralNode,
329 template_fn: Optional[TemplateFnCallable] = None,
330 post_template_fn: Optional[PostTemplateFnCallable] = None,
331 node_handler_fn: Optional[NodeHandlerFnCallable] = None,
332 collect_links: bool = False,
333 no_strip=False,
334 no_html_strip=False,
335) -> str:
336 """
337 Expands node or nodes to text, cleaning up HTML tags and duplicate spaces.
339 If `sense_data` is a dictionary, expanded category links will be added to
340 it under the `categories` key. And if `collect_link` is `True`, expanded
341 links will be added to the `links` key.
342 """
344 # print("CLEAN_NODE:", repr(value))
345 def clean_template_fn(name: str, ht: TemplateArgs) -> Optional[str]:
346 if template_fn is not None:
347 return template_fn(name, ht)
348 if is_panel_template(wxr, name):
349 return ""
350 return None
352 def clean_node_handler_fn_default(
353 node: WikiNode,
354 ) -> Optional[list[Union[str, WikiNode]]]:
355 assert isinstance(node, WikiNode)
356 kind = node.kind
357 if kind in {
358 NodeKind.TABLE_CELL,
359 NodeKind.TABLE_HEADER_CELL,
360 }:
361 return node.children
362 return None
364 if node_handler_fn is not None:
365 # override clean_node_handler_fn, the def above can't be accessed
366 clean_node_handler_fn = node_handler_fn
367 else:
368 clean_node_handler_fn = clean_node_handler_fn_default
370 # print("clean_node: value={!r}".format(value))
371 v = wxr.wtp.node_to_html(
372 wikinode,
373 node_handler_fn=clean_node_handler_fn,
374 template_fn=template_fn,
375 post_template_fn=post_template_fn,
376 )
377 # print("##########")
378 # print(f"{wikinode=}")
379 # print("clean_node: v={!r}".format(v))
381 # Capture categories if sense_data has been given. We also track
382 # Lua execution errors here.
383 # If collect_links=True (for glosses), capture links
384 category_ns_data: NamespaceDataEntry = wxr.wtp.NAMESPACE_DATA.get(
385 "Category",
386 {}, # type: ignore[typeddict-item]
387 )
388 category_ns_names: set[str] = {category_ns_data.get("name")} | set(
389 category_ns_data.get("aliases") # type:ignore[assignment,arg-type]
390 )
391 category_ns_names |= {"Category", "category"}
392 category_names_pattern = rf"(?:{'|'.join(category_ns_names)})"
393 if sense_data is not None:
394 # Check for Lua execution error
395 if '<strong class="error">Lua execution error' in v: 395 ↛ 396line 395 didn't jump to line 396 because the condition on line 395 was never true
396 data_append(sense_data, "tags", "error-lua-exec")
397 if '<strong class="error">Lua timeout error' in v: 397 ↛ 398line 397 didn't jump to line 398 because the condition on line 397 was never true
398 data_append(sense_data, "tags", "error-lua-timeout")
399 # Capture Category tags
400 if not collect_links:
401 for m in re.finditer(
402 rf"(?is)\[\[:?\s*{category_names_pattern}\s*:([^]|]+)",
403 v,
404 ):
405 cat = clean_value(wxr, m.group(1))
406 cat = re.sub(r"\s+", " ", cat)
407 cat = cat.strip()
408 if not cat: 408 ↛ 409line 408 didn't jump to line 409 because the condition on line 408 was never true
409 continue
410 if not sense_data_has_value(sense_data, "categories", cat):
411 data_append(sense_data, "categories", cat)
412 else:
413 for m in re.finditer(
414 r"(?is)\[\[:?(\s*([^][|:]+):)?\s*([^]|]+)(\|([^]|]+))?\]\]",
415 # 1 2 3 4 5
416 v,
417 ):
418 # Add here other stuff different "Something:restofthelink"
419 # things;
420 if m.group(2) and m.group(2).strip() in category_ns_names:
421 cat = clean_value(wxr, m.group(3))
422 cat = re.sub(r"\s+", " ", cat)
423 cat = cat.strip()
424 if not cat: 424 ↛ 425line 424 didn't jump to line 425 because the condition on line 424 was never true
425 continue
426 if not sense_data_has_value(sense_data, "categories", cat): 426 ↛ 413line 426 didn't jump to line 413 because the condition on line 426 was always true
427 data_append(sense_data, "categories", cat)
428 elif not m.group(1):
429 if m.group(5):
430 ltext = clean_value(wxr, m.group(5))
431 ltarget = clean_value(wxr, m.group(3))
432 elif not m.group(3): 432 ↛ 433line 432 didn't jump to line 433 because the condition on line 432 was never true
433 continue
434 else:
435 txt = clean_value(wxr, m.group(3))
436 ltext = txt
437 ltarget = txt
438 ltarget = re.sub(r"\s+", " ", ltarget)
439 ltarget = ltarget.strip()
440 ltext = re.sub(r"\s+", " ", ltext)
441 ltext = ltext.strip()
442 if not ltext and not ltarget: 442 ↛ 443line 442 didn't jump to line 443 because the condition on line 442 was never true
443 continue
444 if not ltext and ltarget: 444 ↛ 445line 444 didn't jump to line 445 because the condition on line 444 was never true
445 ltext = ltarget
446 ltuple = (ltext, ltarget)
447 if not sense_data_has_value(sense_data, "links", ltuple):
448 data_append(sense_data, "links", ltuple)
450 v = clean_value(wxr, v, no_strip=no_strip, no_html_strip=no_html_strip)
451 # print("After clean_value:", repr(v))
453 # Strip any unhandled templates and other stuff. This is mostly intended
454 # to clean up erroneous codings in the original text.
455 # v = re.sub(r"(?s)\{\{.*", "", v)
456 # Some templates create <sup>(Category: ...)</sup>; remove
457 v = re.sub(
458 rf"(?si)\s*(?:<sup>)?\({category_names_pattern}:[^)]+\)(?:</sup>)?",
459 "",
460 v,
461 )
462 # Some templates create question mark in <sup>, e.g.,
463 # some Korean Hanja form
464 v = re.sub(r"\^\?", "", v)
465 return v
468def sense_data_has_value(
469 sense_data: dict[str, Any], name: str, value: Any
470) -> bool:
471 """
472 Return True if `sense_data` has value in the attribute `name`'s value or
473 in the value of key `name` if `sense_date` is dictionary.
474 """
475 if hasattr(sense_data, name):
476 return value in getattr(sense_data, name)
477 elif isinstance(sense_data, dict): 477 ↛ 479line 477 didn't jump to line 479 because the condition on line 477 was always true
478 return value in sense_data.get(name, ()) # type:ignore[operator]
479 return False