1 |
# Check Interwiki Links |
2 |
# by iritscen@yahoo.com |
3 |
# Looks at each link on a page (or in all the pages in a category) which uses a registered |
4 |
# interwiki prefix and loads the linked page, verifying that it exists and that any section |
5 |
# link, if present, is valid as well. The output will use the word "ERROR" when it cannot |
6 |
# validate the interwiki link. |
7 |
# Recommended viewing width: |
8 |
# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---| |
9 |
|
10 |
import os |
11 |
|
12 |
from urllib.parse import urljoin |
13 |
|
14 |
import pywikibot |
15 |
import bs4 |
16 |
import re |
17 |
import requests # for listing members with dir() |
18 |
|
19 |
from pywikibot.bot import QuitKeyboardInterrupt |
20 |
from pywikibot import pagegenerators |
21 |
from pywikibot.tools.formatter import color_format |
22 |
from pywikibot.comms.http import fetch |
23 |
from pywikibot.specialbots import UploadRobot |
24 |
from bs4 import BeautifulSoup |
25 |
|
26 |
# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki |
27 |
interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp') |
28 |
|
29 |
interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/') |
30 |
|
31 |
# Initialize globals |
32 |
debug = 0 |
33 |
pages_checked = 0 |
34 |
iw_found = 0 |
35 |
errors_issued = 0 |
36 |
name_printed = 0 |
37 |
|
38 |
# Prints the name of a page on which something occurred, if it has not been printed before |
39 |
def possibly_print(page_name): |
40 |
global debug |
41 |
global name_printed |
42 |
|
43 |
if not name_printed and not debug: |
44 |
pywikibot.stdout('') |
45 |
pywikibot.stdout('From page "{}":'.format(page_name)) |
46 |
name_printed = 1 |
47 |
|
48 |
# Search a page for the section specified in the link |
49 |
def find_section(page_text, page_name, page_slug, prefix, print_result): |
50 |
global errors_issued |
51 |
|
52 |
# Isolate section link |
53 |
target_page_name, anchor_name = page_slug.split('#') |
54 |
target_page_name_human = target_page_name.replace('_', ' ') |
55 |
|
56 |
# Convert dot-notation hex entities to proper characters |
57 |
anchor_name = anchor_name.replace('.22', '"') |
58 |
anchor_name = anchor_name.replace('.27', '\'') |
59 |
anchor_name = anchor_name.replace('.28', '(') |
60 |
anchor_name = anchor_name.replace('.29', ')') |
61 |
|
62 |
# Read linked page to see if it really has this anchor link |
63 |
soup = BeautifulSoup(page_text, 'html.parser') |
64 |
found_section = False |
65 |
for the_tag in soup.findAll('span'): # search for span with ID matching the section name |
66 |
tag_name = the_tag.get('id', None) |
67 |
if tag_name == anchor_name: |
68 |
found_section = True |
69 |
break |
70 |
if found_section == False: |
71 |
for the_tag in soup.findAll('div'): # search for div with ID matching the section name |
72 |
tag_name = the_tag.get('id', None) |
73 |
if tag_name == anchor_name: |
74 |
found_section = True |
75 |
break |
76 |
if found_section == False: |
77 |
for the_tag in soup.findAll('h2'): # search for h2 with ID matching the section name |
78 |
tag_name = the_tag.get('id', None) |
79 |
if tag_name == anchor_name: |
80 |
found_section = True |
81 |
break |
82 |
if found_section == False: |
83 |
for the_tag in soup.findAll('h3'): # search for h3 with ID matching the section name |
84 |
tag_name = the_tag.get('id', None) |
85 |
if tag_name == anchor_name: |
86 |
found_section = True |
87 |
break |
88 |
if found_section == False: |
89 |
for the_tag in soup.findAll('h4'): # search for h4 with ID matching the section name |
90 |
tag_name = the_tag.get('id', None) |
91 |
if tag_name == anchor_name: |
92 |
found_section = True |
93 |
break |
94 |
if found_section == False: |
95 |
possibly_print(page_name) |
96 |
pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human)) |
97 |
errors_issued = errors_issued + 1 |
98 |
elif print_result == True: |
99 |
pywikibot.stdout(' The section "{0}" was found on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human)) |
100 |
|
101 |
# For a link that redirected us to another page, extract the name of the target page from |
102 |
# the target page's source |
103 |
def find_canonical_link(page_text, page_name, page_slug, prefix, prefix_url): |
104 |
# Extract link from this markup which contains name of redirected-to page: |
105 |
# <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/> |
106 |
canonical_name = page_text.split('<link rel="canonical" href="')[-1] |
107 |
prefix_length = len(prefix_url) |
108 |
canonical_name = canonical_name[prefix_length:] |
109 |
tag_end = canonical_name.find('">') |
110 |
|
111 |
if tag_end == -1: |
112 |
pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(prefix, page_slug)) |
113 |
errors_issued = errors_issued + 1 |
114 |
else: |
115 |
canonical_name = canonical_name[:tag_end] |
116 |
if len(canonical_name) > 100: |
117 |
# Certain things can cause the trim to fail; report error and avoid slamming the |
118 |
# output with massive page source from a failed trim |
119 |
pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(prefix, page_slug, canonical_name[:100])) |
120 |
errors_issued = errors_issued + 1 |
121 |
else: |
122 |
canonical_name = canonical_name.replace('_', ' ') |
123 |
if '#' in page_slug: |
124 |
_, anchor_name = page_slug.split('#') |
125 |
pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}#{3}", which is a valid page. Checking section link….'.format(prefix, page_slug, canonical_name, anchor_name)) |
126 |
find_section(page_text, page_name, page_slug, prefix, True) |
127 |
else: |
128 |
pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(prefix, page_slug, canonical_name)) |
129 |
|
130 |
# Test an interwiki link and look for a section link if applicable |
131 |
def test_interwiki_link(prefix, prefix_url, iw_url, page_name, page_slug): |
132 |
global errors_issued |
133 |
|
134 |
response = fetch(iw_url) |
135 |
|
136 |
# One way we tell that a redirect occurred is by checking fetch's history, as it |
137 |
# automatically follows redirects. This will catch formal redirects which come from pages |
138 |
# such as Special:PermanentLink. |
139 |
if response.history != []: |
140 |
possibly_print(page_name) |
141 |
|
142 |
if page_slug.startswith('WP:') and page_slug == page_slug.upper(): |
143 |
pywikibot.stdout(' Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(response.history[0], prefix, page_slug)) |
144 |
find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) |
145 |
else: |
146 |
permalink1 = 'Special:PermanentLink/'.lower() |
147 |
permalink2 = 'Special:Permalink/'.lower() |
148 |
page_slug_lower = page_slug.lower() |
149 |
if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2): |
150 |
pywikibot.stdout(' Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(response.history[0], prefix, page_slug)) |
151 |
find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) |
152 |
else: |
153 |
pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_slug)) |
154 |
errors_issued = errors_issued + 1 |
155 |
elif response.status_code != 200: |
156 |
possibly_print(page_name) |
157 |
pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_slug)) |
158 |
errors_issued = errors_issued + 1 |
159 |
# However the usual way that a redirect occurs is that MediaWiki redirects us sneakily |
160 |
# using JavaScript, while returning code OK 200 as if the link was correct; this happens |
161 |
# when a redirect page is accessed. We must detect these soft redirects by looking at the |
162 |
# page source to find the redirect note inserted at the top of the page for the reader. |
163 |
elif 'Redirected from <a' in response.text: |
164 |
possibly_print(page_name) |
165 |
pywikibot.stdout(' Got silently redirected by {0} link "{1}". Checking the target page….'.format(prefix, page_slug)) |
166 |
find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url) |
167 |
elif '#' in page_slug: |
168 |
find_section(response.text, page_name, page_slug, prefix, False) |
169 |
|
170 |
# Searches the given page text for interwiki links |
171 |
def scan_for_interwiki_links(page_text, page_name): |
172 |
global debug |
173 |
global pages_checked |
174 |
global iw_found |
175 |
global name_printed |
176 |
pages_checked = pages_checked + 1 |
177 |
cur_prefix = 0 |
178 |
name_printed = 0 |
179 |
|
180 |
for prefix in interwiki_prefixes: |
181 |
# Isolate strings that start with "[[prefix:" and end with "|" or "]" |
182 |
iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])" |
183 |
for match in re.finditer(iw_link, page_text): |
184 |
# Extract just the page title from this regex match |
185 |
s = match.start() + 2 + len(prefix) + 1 |
186 |
e = match.end() - 1 |
187 |
|
188 |
# Commonly we use spaces instead of underscores, so fix that before querying |
189 |
page_slug = page_text[s:e].replace(' ', '_') |
190 |
|
191 |
# But use spaces for title when printing it |
192 |
page_title_human = page_slug.replace('_', ' ') |
193 |
if debug: pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title_human)) |
194 |
iw_found = iw_found + 1 |
195 |
|
196 |
# Construct full URL for the particular wiki |
197 |
iw_url = interwiki_urls[cur_prefix] + page_slug |
198 |
|
199 |
# Adjust URL if this is a foreign-language WP link |
200 |
if re.match("^[a-zA-Z]{2}:", page_slug): |
201 |
lang_code = page_slug[0:2] + "." |
202 |
# "wp:" is the Wikipedia: namespace, not a language |
203 |
if lang_code != "wp." and lang_code != "WP.": |
204 |
iw_url = iw_url.replace('en.', lang_code) |
205 |
iw_url = iw_url.replace(page_slug[0:3], '') |
206 |
|
207 |
# Test the URL |
208 |
test_interwiki_link(prefix, interwiki_urls[cur_prefix], iw_url, page_name, page_slug) |
209 |
cur_prefix = cur_prefix + 1 |
210 |
|
211 |
# Print a wrap-up message |
212 |
def print_summary(): |
213 |
global pages_checked |
214 |
global iw_found |
215 |
global errors_issued |
216 |
|
217 |
page_str = "pages" |
218 |
if pages_checked == 1: |
219 |
page_str = "page" |
220 |
|
221 |
link_str = "links" |
222 |
if iw_found == 1: |
223 |
link_str = "link" |
224 |
|
225 |
pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str)) |
226 |
|
227 |
error_str = "errors were" |
228 |
if errors_issued == 1: |
229 |
error_str = "error was" |
230 |
|
231 |
pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str)) |
232 |
|
233 |
# Main function |
234 |
def main(*args): |
235 |
global debug |
236 |
search_cat = '' |
237 |
search_page = '' |
238 |
|
239 |
# Process arguments |
240 |
local_args = pywikibot.handle_args(args) |
241 |
for arg in local_args: |
242 |
if arg.startswith('-cat:'): |
243 |
search_cat = arg[5:] |
244 |
elif arg.startswith('-page:'): |
245 |
search_page = arg[6:] |
246 |
elif arg == '-dbg': |
247 |
debug = 1 |
248 |
else: |
249 |
pywikibot.stdout('Unknown argument "{}". Exiting.'.format(arg)) |
250 |
return |
251 |
|
252 |
#pywikibot.stdout('The members of the requests.models.Response class are:') |
253 |
#pywikibot.stdout(format(dir(requests.models.Response))) |
254 |
#return |
255 |
|
256 |
# Check specified page or loop through specified category and check all pages |
257 |
site = pywikibot.Site() |
258 |
if search_cat != '': |
259 |
cat_obj = pywikibot.Category(site, search_cat) |
260 |
generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True) |
261 |
for page in pagegenerators.PreloadingGenerator(generator, 100): |
262 |
if debug: pywikibot.stdout('Checking page "{}"'.format(page.title())) |
263 |
scan_for_interwiki_links(page.text, page.title()) |
264 |
elif search_page != '': |
265 |
page = pywikibot.Page(site, search_page) |
266 |
if debug: pywikibot.stdout('Checking page "{}"'.format(page.title())) |
267 |
scan_for_interwiki_links(page.text, page.title()) |
268 |
|
269 |
# Print the results |
270 |
print_summary() |
271 |
|
272 |
if __name__ == '__main__': |
273 |
main() |