--- ValBot/Python/check_interwiki_links.py 2022/03/21 21:22:33 1170
+++ ValBot/Python/check_interwiki_links.py 2025/09/21 21:50:56 1198
@@ -1,152 +1,265 @@
# Check Interwiki Links
# by iritscen@yahoo.com
-# Looks at each link on a page (or in all the pages in a category) which uses a registered
-# interwiki prefix and loads the linked page, verifying that it exists and that any section
-# link, if present, is valid as well. The output will use the word "ERROR" when it cannot
-# validate the interwiki link.
+# Looks at each link on a page (or all the pages in a category) which uses a registered interwiki prefix and loads the linked page, verifying that it exists and that
+# any section link, if present, is valid as well. The output will use the word "ERROR" when it cannot validate the interwiki link.
# Recommended viewing width:
-# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
-
-import os
-
-from urllib.parse import urljoin
+# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----|
+import bs4
import pywikibot
import re
-import requests
+import requests # for listing members with dir() when debugging
-from pywikibot.bot import QuitKeyboardInterrupt
+from bs4 import BeautifulSoup
from pywikibot import pagegenerators
-from pywikibot.tools.formatter import color_format
+from pywikibot.bot import QuitKeyboardInterrupt
from pywikibot.comms.http import fetch
from pywikibot.specialbots import UploadRobot
-from bs4 import BeautifulSoup
+from pywikibot.tools.formatter import color_format
+from urllib.parse import urljoin
+
+class IWLink:
+ def __init__(self, iw_prefix, prefix_url, full_url, page_name, page_name_only, page_slug, hosting_page, curl_response):
+ self.iw_prefix = iw_prefix # e.g. "wp"
+ self.prefix_url = prefix_url # e.g. "https://en.wikipedia.org/wiki/"
+ self.full_url = full_url # e.g. "https://en.wikipedia.org/wiki/Marathon_(series)#Rampancy"
+ self.page_name = page_name # "Marathon (series)#Rampancy"
+ self.page_name_only = page_name # "Marathon (series)"
+ self.page_slug = page_slug # "Marathon_(series)#Rampancy"
+ self.hosting_page = hosting_page # "Easter eggs"; page where the link was found
+ self.curl_response = curl_response # a class defined in the Requests library
# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
+# Initialize globals
+debug = 0
pages_checked = 0
iw_found = 0
errors_issued = 0
+unintended_redirects_found = 0
+name_printed = 0
-# Searches the given page text for interwiki links
-def scan_for_iw_links(page_text):
- global pages_checked
- global iw_found
- global errors_issued
- pages_checked = pages_checked + 1
- cur = 0
-
- for prefix in interwiki_prefixes:
- # Isolate strings that start with "[[prefix:" and end with "|" or "]"
- iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
- for match in re.finditer(iw_link, page_text):
- # Extract just the page title from this regex match
- s = match.start() + 2 + len(prefix) + 1
- e = match.end() - 1
-
- # Sometimes we used a space char. instead of a '_', so fix that before querying
- page_title = page_text[s:e].replace(' ', '_')
-
- # Construct full URL for the particular wiki
- iw_url = interwiki_urls[cur] + page_title
- pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title))
- iw_found = iw_found + 1
-
- # Adjust URL if this is a foreign-language WP link
- if re.match("^[a-zA-Z]{2}:", page_title):
- lang_code = page_title[0:2] + "."
- # "wp:" is the Wikipedia: namespace, not a language
- if lang_code != "wp." and lang_code != "WP.":
- iw_url = iw_url.replace('en.', lang_code)
- iw_url = iw_url.replace(page_title[0:3], '')
-
- # Test the URL
- #pywikibot.stdout(' Testing URL "{}"'.format(iw_url))
- response = fetch(iw_url)
-
- # Redirects are followed automatically by fetch() and treated as "200"s, so the
- # way we tell that a redirect occurred is by checking the history
- if response.history != []:
- pywikibot.stdout(' ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
- errors_issued = errors_issued + 1
- elif response.status_code != 200:
- pywikibot.stdout(' ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url))
- errors_issued = errors_issued + 1
- elif '#' in page_title:
- # Isolate section link
- page_name, anchor_name = page_title.split('#')
-
- # Convert dot-notation hex entities to proper characters
- anchor_name = anchor_name.replace('.22', '"')
- anchor_name = anchor_name.replace('.27', '\'')
- anchor_name = anchor_name.replace('.28', '(')
- anchor_name = anchor_name.replace('.29', ')')
-
- # Read linked page to see if it really has this anchor link
- soup = BeautifulSoup(response.text, 'html.parser')
- found_section = False
- for span_tag in soup.findAll('span'):
- span_name = span_tag.get('id', None)
- if span_name == anchor_name:
- #pywikibot.stdout('Found section!')
- found_section = True
- break
- if found_section == False:
- pywikibot.stdout(' ERROR: Could not find section {0} on page {1}.'.format(anchor_name, page_name))
- errors_issued = errors_issued + 1
- cur = cur + 1
+# Prints the name of a page on which something occurred, if it has not been printed before
+def possibly_print(the_link):
+ global debug
+ global name_printed
+
+ if not name_printed and not debug:
+ pywikibot.stdout('')
+ pywikibot.stdout('From page "{}":'.format(the_link.hosting_page))
+ name_printed = 1
+
+# Search a page for the section specified in the link
+def find_section(the_link, print_result):
+ global errors_issued
+
+ # Isolate section link
+ _, anchor_name = the_link.page_slug.split('#')
+
+ # Convert dot-notation hex entities to proper characters
+ replacements = [(r'\.22', '"'), (r'\.27', "'"), (r'\.28', '('), (r'\.29', ')')]
+ for pattern, replacement in replacements:
+ anchor_name = re.sub(pattern, replacement, anchor_name)
+
+ # Read linked page to see if it really has this anchor link
+ soup = BeautifulSoup(the_link.curl_response.text, 'html.parser')
+ tags_to_search = ['span', 'div', 'h2', 'h3', 'h4']
+ found_section = False
+ for tag_name in tags_to_search:
+ for the_tag in soup.find_all(tag_name):
+ if the_tag.get('id') == anchor_name:
+ found_section = True
+ break
+ if found_section:
+ break
+
+ # Tell user what we found
+ if found_section == False:
+ possibly_print(the_link)
+ pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, the_link.iw_prefix, the_link.page_name))
+ errors_issued = errors_issued + 1
+ elif print_result == True:
+ pywikibot.stdout(' The section "{0}" was found on {1} page "{2}".'.format(anchor_name, the_link.iw_prefix, the_link.page_name))
+
+# For a link that redirected us to another page, extract the name of the target page from the target page's source
+def find_canonical_link(the_link):
+ # Extract link from this markup which contains name of redirected-to page:
+ #
+ canonical_name = the_link.curl_response.text.split('')
+
+ if tag_end == -1:
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(the_link.iw_prefix, the_link.page_slug))
+ errors_issued = errors_issued + 1
+ else:
+ canonical_name = canonical_name[:tag_end]
+ if len(canonical_name) > 100:
+ # Certain things can cause the trim to fail; report error and avoid slamming the output with massive page source from a failed trim
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(the_link.iw_prefix, the_link.page_slug, canonical_name[:100]))
+ errors_issued = errors_issued + 1
+ else:
+ the_link.page_name = canonical_name.replace('_', ' ')
+ if '#' in the_link.page_slug:
+ the_link.page_name_only, _ = the_link.page_slug.split('#')
+ pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page. Checking for section on that page….'.format(the_link.iw_prefix, the_link.page_name_only, the_link.page_name))
+ find_section(the_link, True)
+ else:
+ pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(the_link.iw_prefix, the_link.page_slug, the_link.page_name))
+
+# Test an interwiki link and look for a section link if applicable
+def test_interwiki_link(the_link):
+ global errors_issued
+ global unintended_redirects_found
+
+ the_link.curl_response = fetch(the_link.full_url)
+
+ # One way we tell that a redirect occurred is by checking fetch's history, as it automatically follows redirects. This will catch formal redirects which come from
+ # pages such as Special:PermanentLink.
+ if the_link.curl_response.history != []:
+ possibly_print(the_link)
+
+ # If linked page is in all caps, e.g. WP:BEANS, it's likely a deliberate use of a redirect
+ if the_link.page_slug.startswith('WP:') and the_link.page_slug == the_link.page_slug.upper():
+ pywikibot.stdout(' Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
+ find_canonical_link(the_link)
+ else:
+ permalink1 = 'Special:PermanentLink/'.lower()
+ permalink2 = 'Special:Permalink/'.lower()
+ page_slug_lower = the_link.page_slug.lower()
+ if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
+ pywikibot.stdout(' Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
+ find_canonical_link(the_link)
+ else:
+ pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
+ errors_issued = errors_issued + 1
+ elif the_link.curl_response.status_code != 200:
+ possibly_print(the_link)
+ pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(the_link.curl_response.status_code, the_link.iw_prefix, the_link.page_slug))
+ errors_issued = errors_issued + 1
+ # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily using JavaScript, while returning code OK 200 as if the link was correct; this
+ # happens when a redirect page is accessed. We must detect these soft redirects by looking at the page source to find the redirect note inserted at the top of the
+ # page for the reader.
+ elif 'Redirected from