--- ValBot/Python/check_interwiki_links.py 2023/04/28 00:54:21 1180
+++ ValBot/Python/check_interwiki_links.py 2025/08/29 03:52:17 1197
@@ -1,26 +1,33 @@
# Check Interwiki Links
# by iritscen@yahoo.com
-# Looks at each link on a page (or in all the pages in a category) which uses a registered
+# Looks at each link on a page (or all the pages in a category) which uses a registered
# interwiki prefix and loads the linked page, verifying that it exists and that any section
# link, if present, is valid as well. The output will use the word "ERROR" when it cannot
# validate the interwiki link.
# Recommended viewing width:
# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
-import os
-
-from urllib.parse import urljoin
-
+import bs4
import pywikibot
import re
-import requests # for listing members with dir()
+import requests # for listing members with dir() when debugging
-from pywikibot.bot import QuitKeyboardInterrupt
+from bs4 import BeautifulSoup
from pywikibot import pagegenerators
-from pywikibot.tools.formatter import color_format
+from pywikibot.bot import QuitKeyboardInterrupt
from pywikibot.comms.http import fetch
from pywikibot.specialbots import UploadRobot
-from bs4 import BeautifulSoup
+from pywikibot.tools.formatter import color_format
+from urllib.parse import urljoin
+
+class IWLink:
+ def __init__(self, iw_prefix, prefix_url, full_url, page_name, page_slug, curl_response):
+ self.iw_prefix = iw_prefix # e.g. "wp"
+ self.prefix_url = prefix_url # e.g. "https://en.wikipedia.org/wiki/"
+ self.full_url = full_url # e.g. "https://en.wikipedia.org/wiki/Easter_egg"
+ self.page_name = page_name # "Easter egg"
+ self.page_slug = page_slug # "Easter_egg"
+ self.curl_response = curl_response # a class defined in the Requests library
# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
@@ -32,123 +39,205 @@ debug = 0
pages_checked = 0
iw_found = 0
errors_issued = 0
+unintended_redirects_found = 0
+name_printed = 0
+
+# Prints the name of a page on which something occurred, if it has not been printed before
+def possibly_print(page_name):
+ global debug
+ global name_printed
+
+ if not name_printed and not debug:
+ pywikibot.stdout('')
+ pywikibot.stdout('From page "{}":'.format(page_name))
+ name_printed = 1
+
+# Search a page for the section specified in the link
+def find_section(the_link, print_result):
+ global errors_issued
+
+ # Isolate section link
+ target_page_name, anchor_name = the_link.page_slug.split('#')
+ target_page_name_human = target_page_name.replace('_', ' ')
+
+ # Convert dot-notation hex entities to proper characters
+ replacements = [(r'\.22', '"'), (r'\.27', "'"), (r'\.28', '('), (r'\.29', ')')]
+ for pattern, replacement in replacements:
+ anchor_name = re.sub(pattern, replacement, anchor_name)
+
+ # Read linked page to see if it really has this anchor link
+ soup = BeautifulSoup(the_link.curl_response.text, 'html.parser')
+ tags_to_search = ['span', 'div', 'h2', 'h3', 'h4']
+ found_section = False
+ for tag_name in tags_to_search:
+ for the_tag in soup.find_all(tag_name):
+ if the_tag.get('id') == anchor_name:
+ found_section = True
+ break
+ if found_section:
+ break
+
+ # Tell user what we found
+ if found_section == False:
+ possibly_print(the_link.page_name)
+ pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, the_link.iw_prefix, target_page_name_human))
+ errors_issued = errors_issued + 1
+ elif print_result == True:
+ pywikibot.stdout(' The section "{0}" was found on {1} page "{2}".'.format(anchor_name, the_link.iw_prefix, target_page_name_human))
+
+# For a link that redirected us to another page, extract the name of the target page from
+# the target page's source
+def find_canonical_link(the_link):
+ # Extract link from this markup which contains name of redirected-to page:
+ #
+ canonical_name = the_link.curl_response.text.split('')
+
+ if tag_end == -1:
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(the_link.iw_prefix, the_link.page_slug))
+ errors_issued = errors_issued + 1
+ else:
+ canonical_name = canonical_name[:tag_end]
+ if len(canonical_name) > 100:
+ # Certain things can cause the trim to fail; report error and avoid slamming the
+ # output with massive page source from a failed trim
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(the_link.iw_prefix, the_link.page_slug, canonical_name[:100]))
+ errors_issued = errors_issued + 1
+ else:
+ canonical_name = canonical_name.replace('_', ' ')
+ if '#' in the_link.page_slug:
+ _, anchor_name = the_link.page_slug.split('#')
+ pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}#{3}", which is a valid page. Checking for section on that page….'.format(the_link.iw_prefix, the_link.page_slug, canonical_name, anchor_name))
+ the_link.page_slug = the_link.page_slug.replace(the_link.page_name, canonical_name) # update page slug so that find_section() uses the right page name in its messages
+ find_section(the_link, True)
+ else:
+ pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(the_link.iw_prefix, the_link.page_slug, canonical_name))
+
+# Test an interwiki link and look for a section link if applicable
+def test_interwiki_link(the_link):
+ global errors_issued
+ global unintended_redirects_found
+
+ the_link.curl_response = fetch(the_link.full_url)
+
+ # One way we tell that a redirect occurred is by checking fetch's history, as it
+ # automatically follows redirects. This will catch formal redirects which come from pages
+ # such as Special:PermanentLink.
+ if the_link.curl_response.history != []:
+ possibly_print(the_link.page_name)
+
+ # If linked page is in all caps, e.g. WP:BEANS, it's likely a deliberate use of a redirect
+ if the_link.page_slug.startswith('WP:') and the_link.page_slug == the_link.page_slug.upper():
+ pywikibot.stdout(' Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
+ find_canonical_link(the_link)
+ else:
+ permalink1 = 'Special:PermanentLink/'.lower()
+ permalink2 = 'Special:Permalink/'.lower()
+ page_slug_lower = the_link.page_slug.lower()
+ if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
+ pywikibot.stdout(' Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
+ find_canonical_link(the_link)
+ else:
+ pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
+ errors_issued = errors_issued + 1
+ elif the_link.curl_response.status_code != 200:
+ possibly_print(the_link.page_name)
+ pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(the_link.curl_response.status_code, the_link.iw_prefix, the_link.page_slug))
+ errors_issued = errors_issued + 1
+ # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily
+ # using JavaScript, while returning code OK 200 as if the link was correct; this happens
+ # when a redirect page is accessed. We must detect these soft redirects by looking at the
+ # page source to find the redirect note inserted at the top of the page for the reader.
+ elif 'Redirected from
- canonical_name = response.text.split('')
- if tag_end == -1:
- pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.', format(prefix, page_title))
- else:
- canonical_name = canonical_name[:tag_end]
- if len(canonical_name) > 100:
- # Certain things can cause the trim to fail; here we avoid slamming
- # the output with massive page source from a failed trim
- pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string trimmed to 100 chars).'.format(prefix, page_title, canonical_name[:100]))
- else:
- canonical_name = canonical_name.replace('_', ' ')
- pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}".'.format(prefix, page_title, canonical_name))
- errors_issued = errors_issued + 1
- elif '#' in page_title:
- # Isolate section link
- target_page_name, anchor_name = page_title.split('#')
-
- # Convert dot-notation hex entities to proper characters
- anchor_name = anchor_name.replace('.22', '"')
- anchor_name = anchor_name.replace('.27', '\'')
- anchor_name = anchor_name.replace('.28', '(')
- anchor_name = anchor_name.replace('.29', ')')
-
- # Read linked page to see if it really has this anchor link
- soup = BeautifulSoup(response.text, 'html.parser')
- found_section = False
- for span_tag in soup.findAll('span'):
- span_name = span_tag.get('id', None)
- if span_name == anchor_name:
- found_section = True
- break
- if found_section == False:
- if not name_printed and not debug:
- pywikibot.stdout('From page "{}":'.format(page_name))
- name_printed = 1
- target_page_name_human = target_page_name.replace('_', ' ')
- pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
- errors_issued = errors_issued + 1
- cur = cur + 1
+# Print a wrap-up message
+def print_summary():
+ global pages_checked
+ global iw_found
+ global errors_issued
+ global unintended_redirects_found
+
+ page_str = "pages"
+ if pages_checked == 1:
+ page_str = "page"
+
+ link_str = "links"
+ if iw_found == 1:
+ link_str = "link"
+
+ pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
+
+ error_str = "errors were"
+ if errors_issued == 1:
+ error_str = "error was"
+
+ pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
+ warning_str = "likely-unintended redirects were"
+ if unintended_redirects_found == 1:
+ warning_str = "likely-unintended redirect was"
+
+ pywikibot.stdout('{0} {1} encountered in validating these links.'.format(unintended_redirects_found, warning_str))
+
+# Main function
def main(*args):
global debug
search_cat = ''
search_page = ''
+ # Process arguments
local_args = pywikibot.handle_args(args)
- genFactory = pagegenerators.GeneratorFactory()
-
for arg in local_args:
if arg.startswith('-cat:'):
search_cat = arg[5:]
@@ -157,14 +246,15 @@ def main(*args):
elif arg == '-dbg':
debug = 1
else:
- pywikibot.stdout('Unknown argument "{}".'.format(arg))
+ pywikibot.stdout('Unknown argument "{}". Exiting.'.format(arg))
return
- site = pywikibot.Site()
-
#pywikibot.stdout('The members of the requests.models.Response class are:')
#pywikibot.stdout(format(dir(requests.models.Response)))
-
+ #return
+
+ # Check specified page or loop through specified category and check all pages
+ site = pywikibot.Site()
if search_cat != '':
cat_obj = pywikibot.Category(site, search_cat)
generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
@@ -176,25 +266,8 @@ def main(*args):
if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
scan_for_interwiki_links(page.text, page.title())
- global pages_checked
- global iw_found
- global errors_issued
-
- page_str = "pages"
- if pages_checked == 1:
- page_str = "page"
-
- link_str = "links"
- if iw_found == 1:
- link_str = "link"
-
- pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
-
- error_str = "errors were"
- if errors_issued == 1:
- error_str = "error was"
-
- pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
+ # Print the results
+ print_summary()
if __name__ == '__main__':
main()