--- ValBot/Python/check_interwiki_links.py 2022/03/21 21:22:33 1170 +++ ValBot/Python/check_interwiki_links.py 2024/09/16 23:08:26 1192 @@ -12,8 +12,9 @@ import os from urllib.parse import urljoin import pywikibot +import bs4 import re -import requests +import requests # for listing members with dir() from pywikibot.bot import QuitKeyboardInterrupt from pywikibot import pagegenerators @@ -27,126 +28,246 @@ interwiki_prefixes = ('acronym', 'cache' interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/') +# Initialize globals +debug = 0 pages_checked = 0 iw_found = 0 errors_issued = 0 +name_printed = 0 -# Searches the given page text for interwiki links -def scan_for_iw_links(page_text): - global pages_checked - global iw_found - global errors_issued - pages_checked = pages_checked + 1 - cur = 0 - - for prefix in interwiki_prefixes: - # Isolate strings that start with "[[prefix:" and end with "|" or "]" - iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])" - for match in re.finditer(iw_link, page_text): - # Extract just the page title from this regex match - s = match.start() + 2 + len(prefix) + 1 - e = match.end() - 1 - - # Sometimes we used a space char. instead of a '_', so fix that before querying - page_title = page_text[s:e].replace(' ', '_') - - # Construct full URL for the particular wiki - iw_url = interwiki_urls[cur] + page_title - pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title)) - iw_found = iw_found + 1 - - # Adjust URL if this is a foreign-language WP link - if re.match("^[a-zA-Z]{2}:", page_title): - lang_code = page_title[0:2] + "." - # "wp:" is the Wikipedia: namespace, not a language - if lang_code != "wp." and lang_code != "WP.": - iw_url = iw_url.replace('en.', lang_code) - iw_url = iw_url.replace(page_title[0:3], '') - - # Test the URL - #pywikibot.stdout(' Testing URL "{}"'.format(iw_url)) - response = fetch(iw_url) - - # Redirects are followed automatically by fetch() and treated as "200"s, so the - # way we tell that a redirect occurred is by checking the history - if response.history != []: - pywikibot.stdout(' ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url)) - errors_issued = errors_issued + 1 - elif response.status_code != 200: - pywikibot.stdout(' ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url)) - errors_issued = errors_issued + 1 - elif '#' in page_title: - # Isolate section link - page_name, anchor_name = page_title.split('#') - - # Convert dot-notation hex entities to proper characters - anchor_name = anchor_name.replace('.22', '"') - anchor_name = anchor_name.replace('.27', '\'') - anchor_name = anchor_name.replace('.28', '(') - anchor_name = anchor_name.replace('.29', ')') - - # Read linked page to see if it really has this anchor link - soup = BeautifulSoup(response.text, 'html.parser') - found_section = False - for span_tag in soup.findAll('span'): - span_name = span_tag.get('id', None) - if span_name == anchor_name: - #pywikibot.stdout('Found section!') - found_section = True - break - if found_section == False: - pywikibot.stdout(' ERROR: Could not find section {0} on page {1}.'.format(anchor_name, page_name)) - errors_issued = errors_issued + 1 - cur = cur + 1 +# Prints the name of a page on which something occurred, if it has not been printed before +def possibly_print(page_name): + global debug + global name_printed + + if not name_printed and not debug: + pywikibot.stdout('') + pywikibot.stdout('From page "{}":'.format(page_name)) + name_printed = 1 + +# Search a page for the section specified in the link +def find_section(page_text, page_name, page_slug, prefix, print_result): + global errors_issued + + # Isolate section link + target_page_name, anchor_name = page_slug.split('#') + target_page_name_human = target_page_name.replace('_', ' ') + + # Convert dot-notation hex entities to proper characters + anchor_name = anchor_name.replace('.22', '"') + anchor_name = anchor_name.replace('.27', '\'') + anchor_name = anchor_name.replace('.28', '(') + anchor_name = anchor_name.replace('.29', ')') + + # Read linked page to see if it really has this anchor link + soup = BeautifulSoup(page_text, 'html.parser') + found_section = False + for the_tag in soup.findAll('span'): # search for span with ID matching the section name + tag_name = the_tag.get('id', None) + if tag_name == anchor_name: + found_section = True + break + if found_section == False: + for the_tag in soup.findAll('div'): # search for div with ID matching the section name + tag_name = the_tag.get('id', None) + if tag_name == anchor_name: + found_section = True + break + if found_section == False: + for the_tag in soup.findAll('h2'): # search for h2 with ID matching the section name + tag_name = the_tag.get('id', None) + if tag_name == anchor_name: + found_section = True + break + if found_section == False: + for the_tag in soup.findAll('h3'): # search for h3 with ID matching the section name + tag_name = the_tag.get('id', None) + if tag_name == anchor_name: + found_section = True + break + if found_section == False: + for the_tag in soup.findAll('h4'): # search for h4 with ID matching the section name + tag_name = the_tag.get('id', None) + if tag_name == anchor_name: + found_section = True + break + if found_section == False: + possibly_print(page_name) + pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human)) + errors_issued = errors_issued + 1 + elif print_result == True: + pywikibot.stdout(' The section "{0}" was found on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human)) + +# For a link that redirected us to another page, extract the name of the target page from +# the target page's source +def find_canonical_link(page_text, page_name, page_slug, prefix, prefix_url): + # Extract link from this markup which contains name of redirected-to page: + # + canonical_name = page_text.split('') + + if tag_end == -1: + pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(prefix, page_slug)) + errors_issued = errors_issued + 1 + else: + canonical_name = canonical_name[:tag_end] + if len(canonical_name) > 100: + # Certain things can cause the trim to fail; report error and avoid slamming the + # output with massive page source from a failed trim + pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(prefix, page_slug, canonical_name[:100])) + errors_issued = errors_issued + 1 + else: + canonical_name = canonical_name.replace('_', ' ') + if '#' in page_slug: + _, anchor_name = page_slug.split('#') + pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}#{3}", which is a valid page. Checking section link….'.format(prefix, page_slug, canonical_name, anchor_name)) + find_section(page_text, page_name, page_slug, prefix, True) + else: + pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(prefix, page_slug, canonical_name)) + +# Test an interwiki link and look for a section link if applicable +def test_interwiki_link(prefix, prefix_url, iw_url, page_name, page_slug): + global errors_issued + + response = fetch(iw_url) + + # One way we tell that a redirect occurred is by checking fetch's history, as it + # automatically follows redirects. This will catch formal redirects which come from pages + # such as Special:PermanentLink. + if response.history != []: + possibly_print(page_name) + + if page_slug.startswith('WP:') and page_slug == page_slug.upper(): + pywikibot.stdout(' Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(response.history[0], prefix, page_slug)) + find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) + else: + permalink1 = 'Special:PermanentLink/'.lower() + permalink2 = 'Special:Permalink/'.lower() + page_slug_lower = page_slug.lower() + if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2): + pywikibot.stdout(' Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(response.history[0], prefix, page_slug)) + find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) + else: + pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_slug)) + errors_issued = errors_issued + 1 + elif response.status_code != 200: + possibly_print(page_name) + pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_slug)) + errors_issued = errors_issued + 1 + # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily + # using JavaScript, while returning code OK 200 as if the link was correct; this happens + # when a redirect page is accessed. We must detect these soft redirects by looking at the + # page source to find the redirect note inserted at the top of the page for the reader. + elif 'Redirected from