1 |
# Check Interwiki Links |
2 |
# by iritscen@yahoo.com |
3 |
# Looks at each link on a page (or in all the pages in a category) which uses a registered |
4 |
# interwiki prefix and loads the linked page, verifying that it exists and that any section |
5 |
# link, if present, is valid as well. The output will use the word "ERROR" when it cannot |
6 |
# validate the interwiki link. |
7 |
# Recommended viewing width: |
8 |
# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---| |
9 |
|
10 |
import os |
11 |
|
12 |
from urllib.parse import urljoin |
13 |
|
14 |
import pywikibot |
15 |
import bs4 |
16 |
import re |
17 |
import requests # for listing members with dir() |
18 |
|
19 |
from pywikibot.bot import QuitKeyboardInterrupt |
20 |
from pywikibot import pagegenerators |
21 |
from pywikibot.tools.formatter import color_format |
22 |
from pywikibot.comms.http import fetch |
23 |
from pywikibot.specialbots import UploadRobot |
24 |
from bs4 import BeautifulSoup |
25 |
|
26 |
# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki |
27 |
interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp') |
28 |
|
29 |
interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/') |
30 |
|
31 |
# Initialize globals |
32 |
debug = 0 |
33 |
pages_checked = 0 |
34 |
iw_found = 0 |
35 |
errors_issued = 0 |
36 |
name_printed = 0 |
37 |
|
38 |
# Prints the name of a page on which something occurred, if it has not been printed before |
39 |
def possibly_print(page_name): |
40 |
global debug |
41 |
global name_printed |
42 |
|
43 |
if not name_printed and not debug: |
44 |
pywikibot.stdout('') |
45 |
pywikibot.stdout('From page "{}":'.format(page_name)) |
46 |
name_printed = 1 |
47 |
|
48 |
# Search a page for the section specified in the link |
49 |
def find_section(page_text, page_name, page_slug, prefix, print_result): |
50 |
global errors_issued |
51 |
|
52 |
# Isolate section link |
53 |
target_page_name, anchor_name = page_slug.split('#') |
54 |
target_page_name_human = target_page_name.replace('_', ' ') |
55 |
|
56 |
# Convert dot-notation hex entities to proper characters |
57 |
anchor_name = anchor_name.replace('.22', '"') |
58 |
anchor_name = anchor_name.replace('.27', '\'') |
59 |
anchor_name = anchor_name.replace('.28', '(') |
60 |
anchor_name = anchor_name.replace('.29', ')') |
61 |
|
62 |
# Read linked page to see if it really has this anchor link |
63 |
soup = BeautifulSoup(page_text, 'html.parser') |
64 |
found_section = False |
65 |
for span_tag in soup.findAll('span'): # search for span with ID matching the section name |
66 |
span_name = span_tag.get('id', None) |
67 |
if span_name == anchor_name: |
68 |
found_section = True |
69 |
break |
70 |
if found_section == False: |
71 |
for span_tag in soup.findAll('div'): # search for div with ID matching the section name |
72 |
span_name = span_tag.get('id', None) |
73 |
if span_name == anchor_name: |
74 |
found_section = True |
75 |
break |
76 |
if found_section == False: |
77 |
possibly_print(page_name) |
78 |
pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human)) |
79 |
errors_issued = errors_issued + 1 |
80 |
elif print_result == True: |
81 |
pywikibot.stdout(' The section "{0}" was found on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human)) |
82 |
|
83 |
# For a link that redirected us to another page, extract the name of the target page from |
84 |
# the target page's source |
85 |
def find_canonical_link(page_text, page_name, page_slug, prefix, prefix_url): |
86 |
# Extract link from this markup which contains name of redirected-to page: |
87 |
# <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/> |
88 |
canonical_name = page_text.split('<link rel="canonical" href="')[-1] |
89 |
prefix_length = len(prefix_url) |
90 |
canonical_name = canonical_name[prefix_length:] |
91 |
tag_end = canonical_name.find('">') |
92 |
|
93 |
if tag_end == -1: |
94 |
pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(prefix, page_slug)) |
95 |
errors_issued = errors_issued + 1 |
96 |
else: |
97 |
canonical_name = canonical_name[:tag_end] |
98 |
if len(canonical_name) > 100: |
99 |
# Certain things can cause the trim to fail; report error and avoid slamming the |
100 |
# output with massive page source from a failed trim |
101 |
pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(prefix, page_slug, canonical_name[:100])) |
102 |
errors_issued = errors_issued + 1 |
103 |
else: |
104 |
canonical_name = canonical_name.replace('_', ' ') |
105 |
if '#' in page_slug: |
106 |
_, anchor_name = page_slug.split('#') |
107 |
pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}#{3}", which is a valid page. Checking section link….'.format(prefix, page_slug, canonical_name, anchor_name)) |
108 |
find_section(page_text, page_name, page_slug, prefix, True) |
109 |
else: |
110 |
pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(prefix, page_slug, canonical_name)) |
111 |
|
112 |
# Test an interwiki link and look for a section link if applicable |
113 |
def test_interwiki_link(prefix, prefix_url, iw_url, page_name, page_slug): |
114 |
global errors_issued |
115 |
|
116 |
response = fetch(iw_url) |
117 |
|
118 |
# One way we tell that a redirect occurred is by checking fetch's history, as it |
119 |
# automatically follows redirects. This will catch formal redirects which come from pages |
120 |
# such as Special:PermanentLink. |
121 |
if response.history != []: |
122 |
possibly_print(page_name) |
123 |
|
124 |
if page_slug.startswith('WP:') and page_slug == page_slug.upper(): |
125 |
pywikibot.stdout(' Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(response.history[0], prefix, page_slug)) |
126 |
find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) |
127 |
else: |
128 |
permalink1 = 'Special:PermanentLink/'.lower() |
129 |
permalink2 = 'Special:Permalink/'.lower() |
130 |
page_slug_lower = page_slug.lower() |
131 |
if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2): |
132 |
pywikibot.stdout(' Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(response.history[0], prefix, page_slug)) |
133 |
find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) |
134 |
else: |
135 |
pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_slug)) |
136 |
errors_issued = errors_issued + 1 |
137 |
elif response.status_code != 200: |
138 |
possibly_print(page_name) |
139 |
pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_slug)) |
140 |
errors_issued = errors_issued + 1 |
141 |
# However the usual way that a redirect occurs is that MediaWiki redirects us sneakily |
142 |
# using JavaScript, while returning code OK 200 as if the link was correct; this happens |
143 |
# when a redirect page is accessed. We must detect these soft redirects by looking at the |
144 |
# page source to find the redirect note inserted at the top of the page for the reader. |
145 |
elif 'Redirected from <a' in response.text: |
146 |
possibly_print(page_name) |
147 |
pywikibot.stdout(' Got silently redirected by {0} link "{1}". Checking the target page….'.format(prefix, page_slug)) |
148 |
find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) |
149 |
elif '#' in page_slug: |
150 |
find_section(response.text, page_name, page_slug, prefix, False) |
151 |
|
152 |
# Searches the given page text for interwiki links |
153 |
def scan_for_interwiki_links(page_text, page_name): |
154 |
global debug |
155 |
global pages_checked |
156 |
global iw_found |
157 |
global name_printed |
158 |
pages_checked = pages_checked + 1 |
159 |
cur_prefix = 0 |
160 |
name_printed = 0 |
161 |
|
162 |
for prefix in interwiki_prefixes: |
163 |
# Isolate strings that start with "[[prefix:" and end with "|" or "]" |
164 |
iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])" |
165 |
for match in re.finditer(iw_link, page_text): |
166 |
# Extract just the page title from this regex match |
167 |
s = match.start() + 2 + len(prefix) + 1 |
168 |
e = match.end() - 1 |
169 |
|
170 |
# Commonly we use spaces instead of underscores, so fix that before querying |
171 |
page_slug = page_text[s:e].replace(' ', '_') |
172 |
|
173 |
# But use spaces for title when printing it |
174 |
page_title_human = page_slug.replace('_', ' ') |
175 |
if debug: pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title_human)) |
176 |
iw_found = iw_found + 1 |
177 |
|
178 |
# Construct full URL for the particular wiki |
179 |
iw_url = interwiki_urls[cur_prefix] + page_slug |
180 |
|
181 |
# Adjust URL if this is a foreign-language WP link |
182 |
if re.match("^[a-zA-Z]{2}:", page_slug): |
183 |
lang_code = page_slug[0:2] + "." |
184 |
# "wp:" is the Wikipedia: namespace, not a language |
185 |
if lang_code != "wp." and lang_code != "WP.": |
186 |
iw_url = iw_url.replace('en.', lang_code) |
187 |
iw_url = iw_url.replace(page_slug[0:3], '') |
188 |
|
189 |
# Test the URL |
190 |
test_interwiki_link(prefix, interwiki_urls[cur_prefix], iw_url, page_name, page_slug) |
191 |
cur_prefix = cur_prefix + 1 |
192 |
|
193 |
# Print a wrap-up message |
194 |
def print_summary(): |
195 |
global pages_checked |
196 |
global iw_found |
197 |
global errors_issued |
198 |
|
199 |
page_str = "pages" |
200 |
if pages_checked == 1: |
201 |
page_str = "page" |
202 |
|
203 |
link_str = "links" |
204 |
if iw_found == 1: |
205 |
link_str = "link" |
206 |
|
207 |
pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str)) |
208 |
|
209 |
error_str = "errors were" |
210 |
if errors_issued == 1: |
211 |
error_str = "error was" |
212 |
|
213 |
pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str)) |
214 |
|
215 |
# Main function |
216 |
def main(*args): |
217 |
global debug |
218 |
search_cat = '' |
219 |
search_page = '' |
220 |
|
221 |
# Process arguments |
222 |
local_args = pywikibot.handle_args(args) |
223 |
for arg in local_args: |
224 |
if arg.startswith('-cat:'): |
225 |
search_cat = arg[5:] |
226 |
elif arg.startswith('-page:'): |
227 |
search_page = arg[6:] |
228 |
elif arg == '-dbg': |
229 |
debug = 1 |
230 |
else: |
231 |
pywikibot.stdout('Unknown argument "{}". Exiting.'.format(arg)) |
232 |
return |
233 |
|
234 |
#pywikibot.stdout('The members of the requests.models.Response class are:') |
235 |
#pywikibot.stdout(format(dir(requests.models.Response))) |
236 |
#return |
237 |
|
238 |
# Check specified page or loop through specified category and check all pages |
239 |
site = pywikibot.Site() |
240 |
if search_cat != '': |
241 |
cat_obj = pywikibot.Category(site, search_cat) |
242 |
generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True) |
243 |
for page in pagegenerators.PreloadingGenerator(generator, 100): |
244 |
if debug: pywikibot.stdout('Checking page "{}"'.format(page.title())) |
245 |
scan_for_interwiki_links(page.text, page.title()) |
246 |
elif search_page != '': |
247 |
page = pywikibot.Page(site, search_page) |
248 |
if debug: pywikibot.stdout('Checking page "{}"'.format(page.title())) |
249 |
scan_for_interwiki_links(page.text, page.title()) |
250 |
|
251 |
# Print the results |
252 |
print_summary() |
253 |
|
254 |
if __name__ == '__main__': |
255 |
main() |