--- ValBot/Python/check_interwiki_links.py 2022/03/21 21:22:33 1170
+++ ValBot/Python/check_interwiki_links.py 2023/04/28 00:54:21 1180
@@ -13,7 +13,7 @@ from urllib.parse import urljoin
import pywikibot
import re
-import requests
+import requests # for listing members with dir()
from pywikibot.bot import QuitKeyboardInterrupt
from pywikibot import pagegenerators
@@ -27,126 +27,174 @@ interwiki_prefixes = ('acronym', 'cache'
interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
+# Initialize globals
+debug = 0
pages_checked = 0
iw_found = 0
errors_issued = 0
# Searches the given page text for interwiki links
-def scan_for_iw_links(page_text):
- global pages_checked
- global iw_found
- global errors_issued
- pages_checked = pages_checked + 1
- cur = 0
-
- for prefix in interwiki_prefixes:
- # Isolate strings that start with "[[prefix:" and end with "|" or "]"
- iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
- for match in re.finditer(iw_link, page_text):
- # Extract just the page title from this regex match
- s = match.start() + 2 + len(prefix) + 1
- e = match.end() - 1
-
- # Sometimes we used a space char. instead of a '_', so fix that before querying
- page_title = page_text[s:e].replace(' ', '_')
-
- # Construct full URL for the particular wiki
- iw_url = interwiki_urls[cur] + page_title
- pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title))
- iw_found = iw_found + 1
-
- # Adjust URL if this is a foreign-language WP link
- if re.match("^[a-zA-Z]{2}:", page_title):
- lang_code = page_title[0:2] + "."
- # "wp:" is the Wikipedia: namespace, not a language
- if lang_code != "wp." and lang_code != "WP.":
- iw_url = iw_url.replace('en.', lang_code)
- iw_url = iw_url.replace(page_title[0:3], '')
-
- # Test the URL
- #pywikibot.stdout(' Testing URL "{}"'.format(iw_url))
- response = fetch(iw_url)
-
- # Redirects are followed automatically by fetch() and treated as "200"s, so the
- # way we tell that a redirect occurred is by checking the history
- if response.history != []:
- pywikibot.stdout(' ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
- errors_issued = errors_issued + 1
- elif response.status_code != 200:
- pywikibot.stdout(' ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url))
- errors_issued = errors_issued + 1
- elif '#' in page_title:
- # Isolate section link
- page_name, anchor_name = page_title.split('#')
-
- # Convert dot-notation hex entities to proper characters
- anchor_name = anchor_name.replace('.22', '"')
- anchor_name = anchor_name.replace('.27', '\'')
- anchor_name = anchor_name.replace('.28', '(')
- anchor_name = anchor_name.replace('.29', ')')
-
- # Read linked page to see if it really has this anchor link
- soup = BeautifulSoup(response.text, 'html.parser')
- found_section = False
- for span_tag in soup.findAll('span'):
- span_name = span_tag.get('id', None)
- if span_name == anchor_name:
- #pywikibot.stdout('Found section!')
- found_section = True
- break
- if found_section == False:
- pywikibot.stdout(' ERROR: Could not find section {0} on page {1}.'.format(anchor_name, page_name))
- errors_issued = errors_issued + 1
- cur = cur + 1
+def scan_for_interwiki_links(page_text, page_name):
+ global debug
+ global pages_checked
+ global iw_found
+ global errors_issued
+ pages_checked = pages_checked + 1
+ cur = 0
+ name_printed = 0
+
+ for prefix in interwiki_prefixes:
+ # Isolate strings that start with "[[prefix:" and end with "|" or "]"
+ iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
+ for match in re.finditer(iw_link, page_text):
+ # Extract just the page title from this regex match
+ s = match.start() + 2 + len(prefix) + 1
+ e = match.end() - 1
+
+ # Sometimes we used a space char. instead of a '_', so fix that before querying
+ page_title = page_text[s:e].replace(' ', '_')
+
+ # Use only spaces for title when printing it
+ page_title_human = page_title.replace('_', ' ')
+ if debug: pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title_human))
+ iw_found = iw_found + 1
+
+ # Construct full URL for the particular wiki
+ iw_url = interwiki_urls[cur] + page_title
+
+ # Adjust URL if this is a foreign-language WP link
+ if re.match("^[a-zA-Z]{2}:", page_title):
+ lang_code = page_title[0:2] + "."
+ # "wp:" is the Wikipedia: namespace, not a language
+ if lang_code != "wp." and lang_code != "WP.":
+ iw_url = iw_url.replace('en.', lang_code)
+ iw_url = iw_url.replace(page_title[0:3], '')
+
+ # Test the URL
+ response = fetch(iw_url)
+
+ # One way we tell that a redirect occurred is by checking the history
+ if response.history != []:
+ if not name_printed and not debug:
+ pywikibot.stdout('From page "{}":'.format(page_name))
+ name_printed = 1
+ if page_title.startswith('WP:') and page_title == page_title.upper():
+ pywikibot.stdout(' ERROR: Got redirection code ({0}) for {1} link "{2}", but this appears to be a deliberate use of a Wikipedia shortcut. You should check the link manually.'.format(response.history[0], prefix, page_title))
+ else:
+ pywikibot.stdout(' ERROR: Got redirection code ({0}) for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_title))
+ errors_issued = errors_issued + 1
+ elif response.status_code != 200:
+ if not name_printed and not debug:
+ pywikibot.stdout('From page "{}":'.format(page_name))
+ name_printed = 1
+ pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_title))
+ errors_issued = errors_issued + 1
+ # The usual way that a redirect occurs is that MediaWiki redirects us sneakily
+ # using JavaScript, while returning code OK 200 as if the link was correct; we
+ # must detect this from the page source
+ elif 'Redirected from
+ canonical_name = response.text.split('')
+ if tag_end == -1:
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.', format(prefix, page_title))
+ else:
+ canonical_name = canonical_name[:tag_end]
+ if len(canonical_name) > 100:
+ # Certain things can cause the trim to fail; here we avoid slamming
+ # the output with massive page source from a failed trim
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string trimmed to 100 chars).'.format(prefix, page_title, canonical_name[:100]))
+ else:
+ canonical_name = canonical_name.replace('_', ' ')
+ pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}".'.format(prefix, page_title, canonical_name))
+ errors_issued = errors_issued + 1
+ elif '#' in page_title:
+ # Isolate section link
+ target_page_name, anchor_name = page_title.split('#')
+
+ # Convert dot-notation hex entities to proper characters
+ anchor_name = anchor_name.replace('.22', '"')
+ anchor_name = anchor_name.replace('.27', '\'')
+ anchor_name = anchor_name.replace('.28', '(')
+ anchor_name = anchor_name.replace('.29', ')')
+
+ # Read linked page to see if it really has this anchor link
+ soup = BeautifulSoup(response.text, 'html.parser')
+ found_section = False
+ for span_tag in soup.findAll('span'):
+ span_name = span_tag.get('id', None)
+ if span_name == anchor_name:
+ found_section = True
+ break
+ if found_section == False:
+ if not name_printed and not debug:
+ pywikibot.stdout('From page "{}":'.format(page_name))
+ name_printed = 1
+ target_page_name_human = target_page_name.replace('_', ' ')
+ pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
+ errors_issued = errors_issued + 1
+ cur = cur + 1
def main(*args):
- cat_name = ''
- page_name = ''
+ global debug
+ search_cat = ''
+ search_page = ''
+
+ local_args = pywikibot.handle_args(args)
+ genFactory = pagegenerators.GeneratorFactory()
+
+ for arg in local_args:
+ if arg.startswith('-cat:'):
+ search_cat = arg[5:]
+ elif arg.startswith('-page:'):
+ search_page = arg[6:]
+ elif arg == '-dbg':
+ debug = 1
+ else:
+ pywikibot.stdout('Unknown argument "{}".'.format(arg))
+ return
+
+ site = pywikibot.Site()
+
+ #pywikibot.stdout('The members of the requests.models.Response class are:')
+ #pywikibot.stdout(format(dir(requests.models.Response)))
+
+ if search_cat != '':
+ cat_obj = pywikibot.Category(site, search_cat)
+ generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
+ for page in pagegenerators.PreloadingGenerator(generator, 100):
+ if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
+ scan_for_interwiki_links(page.text, page.title())
+ elif search_page != '':
+ page = pywikibot.Page(site, search_page)
+ if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
+ scan_for_interwiki_links(page.text, page.title())
+
+ global pages_checked
+ global iw_found
+ global errors_issued
+
+ page_str = "pages"
+ if pages_checked == 1:
+ page_str = "page"
+
+ link_str = "links"
+ if iw_found == 1:
+ link_str = "link"
+
+ pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
+
+ error_str = "errors were"
+ if errors_issued == 1:
+ error_str = "error was"
- local_args = pywikibot.handle_args(args)
- genFactory = pagegenerators.GeneratorFactory()
-
- for arg in local_args:
- if arg.startswith('-cat:'):
- cat_name = arg[5:]
- elif arg.startswith('-page:'):
- page_name = arg[6:]
-
- site = pywikibot.Site()
-
- #pywikibot.stdout('The members of the requests.models.Response class are:')
- #pywikibot.stdout(format(dir(requests.models.Response)))
-
- if cat_name != '':
- cat_obj = pywikibot.Category(site, cat_name)
- generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
- for page in pagegenerators.PreloadingGenerator(generator, 100):
- pywikibot.stdout('Checking page "{}"'.format(page.title()))
- scan_for_iw_links(page.text)
- elif page_name != '':
- page = pywikibot.Page(site, page_name)
- pywikibot.stdout('Checking page "{}"'.format(page.title()))
- scan_for_iw_links(page.text)
-
- global pages_checked
- global iw_found
- global errors_issued
-
- page_str = "pages"
- if pages_checked == 1:
- page_str = "page"
-
- link_str = "links"
- if iw_found == 1:
- link_str = "link"
-
- pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
-
- error_str = "errors were"
- if errors_issued == 1:
- error_str = "error was"
-
- pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
+ pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
if __name__ == '__main__':
- main()
+ main()