--- ValBot/Python/check_intrawiki_section_links.py 2022/06/28 22:06:29 1173
+++ ValBot/Python/check_intrawiki_section_links.py 2024/11/18 04:00:08 1194
@@ -1,8 +1,8 @@
# Check Intrawiki Section Links
# by iritscen@yahoo.com
# Looks at each wikilink on a page (or in all the pages in a category) for a section link ('#'),
-# and loads the linked page and verifies that the named section actually exists. The output will
-# use the keywords ADVICE, WARNING or ERROR depending on the nature of issue that it encounters.
+# and loads the linked page and verifies that the named section actually exists. It also
+# understands section links generated through a call to Template:SectionLink.
# Recommended viewing width:
# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- --|
@@ -20,229 +20,389 @@ from pywikibot.comms.http import fetch
from pywikibot.specialbots import UploadRobot
from bs4 import BeautifulSoup
-# Array of OniGalore's namespaces
+# Tuple of OniGalore's namespaces
intrawiki_prefixes = ('Image', 'Special', 'Talk', 'User', 'User_talk', 'OniGalore', 'OniGalore_talk', 'File', 'File_talk', 'MediaWiki', 'MediaWiki_talk', 'Template', 'Template_talk', 'Help', 'Help_talk', 'Category', 'Category_talk', 'BSL', 'BSL_talk', 'OBD', 'OBD_talk', 'AE', 'AE_talk', 'Oni2', 'Oni2_talk', 'XML', 'XML_talk')
# URL for main namespace of our wiki
onigalore_url = 'https://wiki.oni2.net/'
-# Interwiki prefixes, for ruling out these links
+# Tuple of interwiki prefixes, for recognizing and passing over such links
interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
+# List of chapter names, for substitution into links that use "{{Cn}}" transclusion
+chapter_names = ['CHAPTER_00_._COMBAT_TRAINING', 'CHAPTER_01_._TRIAL_RUN', 'CHAPTER_02_._ENGINES_OF_EVIL', 'CHAPTER_03_._PUZZLE_PIECES', 'CHAPTER_04_._TIGER_BY_THE_TAIL', 'CHAPTER_05_._HOT_PURSUIT', 'CHAPTER_06_._COUNTERATTACK', 'CHAPTER_07_._A_FRIEND_IN_NEED', 'CHAPTER_08_._AN_INNOCENT_LIFE', 'CHAPTER_09_._TRUTH_AND_CONSEQUENCES', 'CHAPTER_10_._CAT_AND_MOUSE', 'CHAPTER_11_._DREAM_DIVER', 'CHAPTER_12_._SINS_OF_THE_FATHER', 'CHAPTER_13_._PHOENIX_RISING', 'CHAPTER_14_._DAWN_OF_THE_CHRYSALIS']
+
+# Tuple of patterns for recognizing wikilinks
+# Pattern 1: Detect "[[anything]]", "[[any:thing]]", "[[any|thing]]", "[[any:thi|ng]]"
+# Pattern 2: Detect "{{SectionLink|Page|Section name}}", "{{SectionLink||Section name}}"
+link_patterns = (r"\[\[[^|\]]*(\||\])", r"\{\{SectionLink\|[^|\}]*\|[^|\}]*\}\}")
+
+# Initialize globals
+debug = 0
pages_checked = 0
iw_found = 0
advice_issued = 0
-warnings_issued = 0
errors_issued = 0
-page_name = ''
+name_printed = 0
+
+# Prints the name of a page on which something occurred, if it has not been printed before
+def possibly_print(page_name):
+ global debug
+ global name_printed
+
+ if not name_printed and not debug:
+ pywikibot.stdout('')
+ pywikibot.stdout('From page "{}":'.format(page_name))
+ name_printed = 1
+
+# Search a page for the section specified in the link
+def find_section(page_text, page_name, page_slug, print_result):
+ global errors_issued
+ found_section = False
+
+ # Isolate section link or text fragment link
+ target_page_name, anchor_name = page_slug.split('#', 1)
+ target_page_name_human = target_page_name.replace('_', ' ')
+
+ # First check if this is a text fragment directive, and look for it if so
+ if anchor_name.startswith(':~:text='):
+ if debug: pywikibot.stdout(' Found text fragment directive {} from URL {}.'.format(anchor_name, page_slug))
+ anchor_name = anchor_name[8:]
+ # We're only checking the first text directive, so strip add'l ones if present
+ addl_fragment = anchor_name.find('&text=')
+ if addl_fragment != -1:
+ anchor_name = anchor_name[:addl_fragment]
+ search_terms = anchor_name.split(',')
+ # Delete prefix and suffix terms because they aren't needed
+ if search_terms[0].endswith('-'):
+ search_terms.pop(0)
+ if search_terms[-1].startswith('-'):
+ search_terms.pop()
+ # Remake text directive with the terms separated by spaces as they should be in the page text
+ newSep = ' '
+ search_string = newSep.join(search_terms)
+ if debug: pywikibot.stdout(' Converted text fragment to string "{}".'.format(search_string))
+ if search_string in page_text:
+ found_section = True
+ if debug and not print_result: pywikibot.stdout(' Found text fragment!')
+
+ # If we're still here, it's a section link; read linked page to see if it really has this
+ # anchor link
+ if found_section == False:
+ if debug: pywikibot.stdout(' Searching for section link {} on page.'.format(anchor_name))
+ soup = BeautifulSoup(page_text, 'html.parser')
+ # Search for a span with this ID
+ for span_tag in soup.findAll('span'):
+ span_name = span_tag.get('id', None)
+ if span_name == anchor_name:
+ if debug and not print_result: pywikibot.stdout(' Found section in a span!')
+ found_section = True
+ break
+ if found_section == False:
+ # Search for a div with this ID
+ for span_tag in soup.findAll('div'):
+ span_name = span_tag.get('id', None)
+ if span_name == anchor_name:
+ if debug and not print_result: pywikibot.stdout(' Found section in a div!')
+ found_section = True
+ break
+ if found_section == False:
+ possibly_print(page_name)
+ pywikibot.stdout(' ERROR: Could not find section "{0}" on page {1}!'.format(anchor_name, target_page_name_human))
+ errors_issued += 1
+ elif debug and print_result:
+ pywikibot.stdout(' The section "{0}" was found on page "{1}".'.format(anchor_name, target_page_name_human))
+
+# For a link that redirected us to another page, extract the name of the target page from
+# the target page's source
+def find_canonical_link(page_text, page_name, page_slug):
+ # Extract link from this markup which contains name of redirected-to page:
+ #
+ # "wgPageName":"Namespace:Page_name",
+ canonical_name = page_text.split('"wgPageName":"')[-1]
+ tag_end = canonical_name.find('",')
+
+ if tag_end == -1:
+ pywikibot.stdout(' ERROR: The link "{}" is a redirect page, but this script could not isolate the target page name.'.format(page_slug))
+ errors_issued = errors_issued + 1
+ else:
+ canonical_name = canonical_name[:tag_end]
+ if len(canonical_name) > 100:
+ # Certain things can cause the trim to fail; report error and avoid slamming the
+ # output with massive page source from a failed trim
+ pywikibot.stdout(' ERROR: The link "{}" is a redirect to "{2}…" (string overflow).'.format(page_slug, canonical_name[:100]))
+ errors_issued = errors_issued + 1
+ else:
+ canonical_name = canonical_name.replace('_', ' ')
+ if '#' in page_slug:
+ _, anchor_name = page_slug.split('#')
+ if debug: pywikibot.stdout(' The link "{0}" is a redirect to "{1}#{2}", which is a valid page. Checking section link….'.format(page_slug, canonical_name, anchor_name))
+ find_section(page_text, page_name, page_slug, True)
+ else:
+ pywikibot.stdout(' The link "{0}" is a redirect to "{1}", which is a valid page.'.format(page_slug, canonical_name))
+
+# Test an intrawiki link and look for a section link if applicable
+def test_intrawiki_link(iw_url, page_name, page_slug):
+ global advice_issued
+ global errors_issued
+
+ response = fetch(iw_url)
+
+ # One way we tell that a redirect occurred is by checking fetch's history, as it
+ # automatically follows redirects. This will catch formal redirects which come from pages
+ # such as Special:PermanentLink.
+ if response.history != []:
+ permalink1 = 'Special:PermanentLink/'.lower()
+ permalink2 = 'Special:Permalink/'.lower()
+ page_slug_lower = page_slug.lower()
+ if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
+ if debug:
+ possibly_print(page_name)
+ pywikibot.stdout(' Got redirection code "{0}" for permanent revision link "{1}". Checking the target page….'.format(response.history[0], page_slug))
+ find_canonical_link(response.text, page_name, page_slug)
+ else:
+ possibly_print(page_name)
+ pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for link "{1}". You should check the link manually.'.format(response.history[0], page_slug))
+ advice_issued += 1
+ elif response.status_code != 200:
+ possibly_print(page_name)
+ pywikibot.stdout(' ERROR: Got response code {0} on URL {1}. The target page may not exist.'.format(response.status_code, iw_url))
+ errors_issued += 1
+ # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily
+ # using JavaScript, while returning code OK 200 as if the link was correct; this happens
+ # when a redirect page is accessed. We must detect these soft redirects by looking at the
+ # page source to find the redirect note inserted at the top of the page for the reader.
+ elif 'Redirected from = 0 and ch_num_match <= 14:
+ ch_name = chapter_names[ch_num_match]
+ replace_pattern = re.compile(r"{{C" + ch_num.group(0) + r"}}")
+ page_slug = replace_pattern.sub(ch_name, page_slug)
+ if debug: pywikibot.stdout(' After performing transclusion, link is now "{}".'.format(page_slug))
+ else:
+ possibly_print(page_name)
+ pywikibot.stdout(' ERROR: Link {0} transcludes a chapter name using an out-of-range number, {1}.'.format(page_slug, ch_num_match))
+ errors_issued += 1
+ continue
+ else:
+ possibly_print(page_name)
+ pywikibot.stdout(' ADVICE: Link {} seems to be transcluding a chapter name, but this script couldn\'t read it.'.format(page_slug))
+ advice_issued += 1
+ continue
+ else:
+ possibly_print(page_name)
+ pywikibot.stdout(' ADVICE: Link {0} seems to use transclusion. This script can understand chapter name transclusions such as "{1}" but it doesn\'t recognize this one so it can\'t be verified. You should check the link manually.'.format(page_slug, "{{C7}}"))
+ advice_issued += 1
+ continue
+
+ # If this is a relative "/" link, use the current page as the basis for the URL. Note
+ # that only a leading slash is looked for, so if there's multiple steps down ("/x/y"),
+ # we're out of luck.
+ if page_slug.startswith('/'):
+ page_slug = page_name + page_slug
+ if debug: pywikibot.stdout(' Changed page_slug to {} on account of "/".'.format(page_slug))
+
+ # If this is a relative "../" link, find the parent page, set ourselves to that page,
+ # then remove the relative portion of the link. Note that this is only performed once,
+ # so if there's multiple steps back ("../../"), we're out of luck.
+ if page_slug.startswith('../'):
last_slash = page_name.rfind('/')
page_name2 = page_name[0:last_slash]
- #pywikibot.stdout('Changed page_name to {} on account of "../".'.format(page_name2))
- link_text = link_text[3:len(link_text)]
- #pywikibot.stdout('Changed link_text to {} on account of "../".'.format(link_text))
+ if debug: pywikibot.stdout(' Changed page_name to {} on account of "../".'.format(page_name2))
+ page_slug = page_slug[3:len(page_slug)]
+ if debug: pywikibot.stdout(' Changed page_slug to {} on account of "../".'.format(page_slug))
# If this is now going to be a bare section link for the parent page, don't add a
# slash, otherwise do because we are drilling down to another subpage
- if link_text.startswith('#'):
- link_text = page_name2 + link_text
+ if page_slug.startswith('#'):
+ page_slug = page_name2 + page_slug
else:
- link_text = page_name2 + '/' + link_text
-
- # If this is a bare section link, build URL based on this page
- if link_text.startswith('#'):
+ page_slug = page_name2 + '/' + page_slug
+
+ # If this is a bare section link, build URL based on this page
+ if page_slug.startswith('#'):
iw_url = onigalore_url + page_name2
- iw_found = iw_found + 1
- #pywikibot.stdout('Found link to this very page, {}.'.format(link_text))
+ iw_found += 1
+ if debug: pywikibot.stdout(' Found link to this very page, {}.'.format(page_slug))
found_iw_match = True
- link_text = page_name2 + link_text
-
- # If there's no ":" in the link (before the section link, where a colon would just be
- # part of the text) then it's a Main namespace article, so construct URL
- if found_iw_match == False:
- if not re.search(":.*#", link_text):
- iw_url = onigalore_url + link_text
- iw_found = iw_found + 1
- #pywikibot.stdout('Found link to OniGalore Main namespace page {}.'.format(link_text))
- found_iw_match = True
-
- # If there is a ":", match the prefix against the intrawiki prefixes on OniGalore
- if found_iw_match == False:
+ page_slug = page_name2 + page_slug
+
+ # If there's no ":" in the link (before the section link, where a colon would just be
+ # part of the text) then it's a Main namespace article; proceed with building URL
+ if found_iw_match == False:
+ if not re.search(":.*#", page_slug):
+ iw_url = onigalore_url + page_slug
+ iw_found += 1
+ if debug: pywikibot.stdout(' Link is to a Main namespace page.')
+ found_iw_match = True
+
+ # If there is a ":", match the prefix against the intrawiki prefixes on OniGalore
+ # before building URL
+ if found_iw_match == False:
for prefix in intrawiki_prefixes:
- #pywikibot.stdout('Comparing link against prefix {}.'.format(prefix))
- if prefix + ":" in link_text:
- iw_url = onigalore_url + link_text
- _, post_ns = link_text.split(':', 1)
- #pywikibot.stdout('Found link to OniGalore {0} namespace page {1}.'.format(prefix, post_ns))
- iw_found = iw_found + 1
- found_iw_match = True
- break
-
- # If we didn't match the prefix against any intrawiki prefixes, see if it matches
- # against an interwiki prefix; if so, this link can be ignored
- is_interwiki = False
- if found_iw_match == False:
- for prefix in interwiki_prefixes:
- if prefix + ":" in link_text:
- #pywikibot.stdout('Skipping link {} because it is an interwiki link.'.format(link_text))
- is_interwiki = True
- break
- if is_interwiki:
- continue
-
- # If we still haven't turned this match into a URL, something's gone wrong
- if (found_iw_match == False) or (iw_url == ""):
- pywikibot.stdout('ERROR: Couldn\'t figure out link {}.'.format(link_text))
+ if prefix + ":" in page_slug:
+ iw_url = onigalore_url + page_slug
+ if debug: pywikibot.stdout(' Identified namespace {}.'.format(prefix))
+ iw_found += 1
+ found_iw_match = True
+ break
+
+ # If we still haven't turned this match into a URL, something's gone wrong
+ if (found_iw_match == False) or (iw_url == ""):
+ possibly_print(page_name)
+ pywikibot.stdout(' ERROR: Couldn\'t figure out link {}.'.format(page_slug))
continue
- # Test the URL
- iw_url = iw_url.replace(' ', '_')
- #pywikibot.stdout('Reading page at {}...'.format(iw_url))
- response = fetch(iw_url)
-
- # Redirects are followed automatically by fetch() and treated as "200"s; the way we can
- # tell that a redirect occurred is by checking fetch's history
- if response.history != []:
- pywikibot.stdout('WARNING: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
- warnings_issued = warnings_issued + 1
- elif response.status_code != 200:
- pywikibot.stdout('WARNING: Got response code {0} on URL {1}.'.format(response.status_code, iw_url))
- warnings_issued = warnings_issued + 1
- else:
- # Isolate section link
- pre_section, section_name = link_text.split('#', 1)
- #pywikibot.stdout('Searching for section link {} on page.'.format(section_name))
-
- # Convert slash character to the dot-notation hex encoding that MediaWiki uses
- section_name = section_name.replace('/', '.2F')
-
- # Read linked page to see if it really has this anchor link
- soup = BeautifulSoup(response.text, 'html.parser')
- found_section = False
- for span_tag in soup.findAll('span'):
- span_name = span_tag.get('id', None)
- if span_name == section_name:
- #pywikibot.stdout('Found section!')
- found_section = True
- break
- if found_section == False:
- pywikibot.stdout('ERROR: Could not find section {0} on page {1}!'.format(section_name, pre_section))
- errors_issued = errors_issued + 1
+ # Test the URL
+ iw_url = iw_url.replace(' ', '_')
+ if debug: pywikibot.stdout(' Reading page at {}….'.format(iw_url))
+ test_intrawiki_link(iw_url, page_name, page_slug)
+
+# Print a wrap-up message
+def print_summary():
+ global pages_checked
+ global iw_found
+ global advice_issued
+ global errors_issued
+
+ page_str = "pages"
+ if pages_checked == 1:
+ page_str = "page"
+
+ link_str = "links"
+ if iw_found == 1:
+ link_str = "link"
+
+ pywikibot.stdout('Checked {0} {1} and found {2} intrawiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
+ pywikibot.stdout('While attempting to follow section links….')
+
+ if advice_issued == 0:
+ pywikibot.stdout(' No advice on potential problems was issued.')
+ elif advice_issued == 1:
+ pywikibot.stdout(' 1 piece of advice on a potential problem was issued.')
+ else:
+ pywikibot.stdout(' {} pieces of advice on potential problems were issued.'.format(advice_issued))
+
+ error_str = "errors were"
+ if errors_issued == 1:
+ error_str = "error was"
+ pywikibot.stdout(' {0} {1} encountered.'.format(errors_issued, error_str))
+# Main function
def main(*args):
- cat_name = ''
- global page_name
-
- local_args = pywikibot.handle_args(args)
- genFactory = pagegenerators.GeneratorFactory()
+ global debug
+ search_cat = ''
+ search_page = ''
+
+ # Process arguments
+ local_args = pywikibot.handle_args(args)
+ for arg in local_args:
+ if arg.startswith('-cat:'):
+ search_cat = arg[5:]
+ elif arg.startswith('-page:'):
+ search_page = arg[6:]
+ elif arg == '-dbg':
+ debug = 1
+ else:
+ pywikibot.stdout('Unknown argument "{}".'.format(arg))
+ return
+
+ site = pywikibot.Site()
+
+ # This line of code enumerates the methods in the 'page' class
+ #pywikibot.stdout(format(dir(page)))
+
+ # Check specified page or loop through specified category and check all pages
+ if search_cat != '':
+ cat_obj = pywikibot.Category(site, search_cat)
+ generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
+ for page in pagegenerators.PreloadingGenerator(generator, 100):
+ if debug: pywikibot.stdout('Checking page {0}'.format(page.title()))
+ scan_for_intrawiki_links(page.text, page.title())
+ elif search_page != '':
+ page = pywikibot.Page(site, search_page)
+ if debug: pywikibot.stdout('Checking page {0}'.format(page.title()))
+ scan_for_intrawiki_links(page.text, page.title())
- for arg in local_args:
- if arg.startswith('-cat:'):
- cat_name = arg[5:]
- elif arg.startswith('-page:'):
- page_name = arg[6:]
-
- site = pywikibot.Site()
-
- # This line of code enumerates the methods in the 'page' class
- #pywikibot.stdout(format(dir(page)))
-
- if cat_name != '':
- cat_obj = pywikibot.Category(site, cat_name)
- generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
- for page in pagegenerators.PreloadingGenerator(generator, 100):
- pywikibot.stdout('Checking page {0}'.format(page.title()))
- page_name = page.title()
- scan_for_iw_links(page.text)
- elif page_name != '':
- page = pywikibot.Page(site, page_name)
- pywikibot.stdout('Checking page {0}'.format(page.title()))
- scan_for_iw_links(page.text)
-
- global pages_checked
- global iw_found
- global advice_issued
- global warnings_issued
- global errors_issued
-
- page_str = "pages"
- if pages_checked == 1:
- page_str = "page"
-
- link_str = "links"
- if iw_found == 1:
- link_str = "link"
-
- pywikibot.stdout('Checked {0} {1} and found {2} intrawiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
- pywikibot.stdout('While attempting to follow section links...')
-
- if advice_issued == 0:
- pywikibot.stdout(' No advice on potential problems was issued.')
- elif advice_issued == 1:
- pywikibot.stdout(' 1 piece of advice on a potential problem was issued.')
- else:
- pywikibot.stdout(' {} pieces of advice on potential problems were issued.'.format(advice_issued))
-
- warning_str = "warnings were"
- if warnings_issued == 1:
- warning_str = "warning was"
- pywikibot.stdout(' {0} {1} issued.'.format(warnings_issued, warning_str))
-
- error_str = "errors were"
- if errors_issued == 1:
- error_str = "error was"
- pywikibot.stdout(' {0} {1} encountered.'.format(errors_issued, error_str))
+ # Print the results
+ print_summary()
if __name__ == '__main__':
- main()
+ main()