ViewVC Help
View File | Revision Log | View Changeset | Root Listing
root/Oni2/ValBot/Python/check_interwiki_links.py
(Generate patch)

Comparing ValBot/Python/check_interwiki_links.py (file contents):
Revision 1169 by iritscen, Mon Feb 21 23:59:20 2022 UTC vs.
Revision 1191 by iritscen, Sat Jul 6 22:01:44 2024 UTC

# Line 1 | Line 1
1 + # Check Interwiki Links
2 + # by iritscen@yahoo.com
3 + # Looks at each link on a page (or in all the pages in a category) which uses a registered
4 + # interwiki prefix and loads the linked page, verifying that it exists and that any section
5 + # link, if present, is valid as well. The output will use the word "ERROR" when it cannot
6 + # validate the interwiki link.
7 + # Recommended viewing width:
8 + # |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
9 +
10   import os
11  
12   from urllib.parse import urljoin
13  
14   import pywikibot
15 + import bs4
16   import re
17 + import requests # for listing members with dir()
18  
19   from pywikibot.bot import QuitKeyboardInterrupt
20   from pywikibot import pagegenerators
# Line 17 | Line 28 | interwiki_prefixes = ('acronym', 'cache'
28  
29   interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
30  
31 + # Initialize globals
32 + debug = 0
33   pages_checked = 0
34   iw_found = 0
35 < problems_found = 0
35 > errors_issued = 0
36 > name_printed = 0
37 >
38 > # Prints the name of a page on which something occurred, if it has not been printed before
39 > def possibly_print(page_name):
40 >   global debug
41 >   global name_printed
42 >  
43 >   if not name_printed and not debug:
44 >      pywikibot.stdout('')
45 >      pywikibot.stdout('From page "{}":'.format(page_name))
46 >      name_printed = 1
47 >
48 > # Search a page for the section specified in the link
49 > def find_section(page_text, page_name, page_slug, prefix, print_result):
50 >   global errors_issued
51 >
52 >   # Isolate section link
53 >   target_page_name, anchor_name = page_slug.split('#')
54 >   target_page_name_human = target_page_name.replace('_', ' ')
55 >  
56 >   # Convert dot-notation hex entities to proper characters
57 >   anchor_name = anchor_name.replace('.22', '"')
58 >   anchor_name = anchor_name.replace('.27', '\'')
59 >   anchor_name = anchor_name.replace('.28', '(')
60 >   anchor_name = anchor_name.replace('.29', ')')
61 >  
62 >   # Read linked page to see if it really has this anchor link
63 >   soup = BeautifulSoup(page_text, 'html.parser')
64 >   found_section = False
65 >   for the_tag in soup.findAll('span'): # search for span with ID matching the section name
66 >      tag_name = the_tag.get('id', None)
67 >      if tag_name == anchor_name:
68 >         found_section = True
69 >         break
70 >   if found_section == False:
71 >      for the_tag in soup.findAll('div'): # search for div with ID matching the section name
72 >         tag_name = the_tag.get('id', None)
73 >         if tag_name == anchor_name:
74 >            found_section = True
75 >            break
76 >   if found_section == False:
77 >      for the_tag in soup.findAll('h2'): # search for h2 with ID matching the section name
78 >         tag_name = the_tag.get('id', None)
79 >         if tag_name == anchor_name:
80 >            found_section = True
81 >            break
82 >   if found_section == False:
83 >      for the_tag in soup.findAll('h3'): # search for h3 with ID matching the section name
84 >         tag_name = the_tag.get('id', None)
85 >         if tag_name == anchor_name:
86 >            found_section = True
87 >            break
88 >   if found_section == False:
89 >      for the_tag in soup.findAll('h4'): # search for h4 with ID matching the section name
90 >         tag_name = the_tag.get('id', None)
91 >         if tag_name == anchor_name:
92 >            found_section = True
93 >            break
94 >   if found_section == False:
95 >      possibly_print(page_name)
96 >      pywikibot.stdout('   ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
97 >      errors_issued = errors_issued + 1
98 >   elif print_result == True:
99 >      pywikibot.stdout('   The section "{0}" was found on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
100 >
101 > # For a link that redirected us to another page, extract the name of the target page from
102 > # the target page's source
103 > def find_canonical_link(page_text, page_name, page_slug, prefix, prefix_url):
104 >   # Extract link from this markup which contains name of redirected-to page:
105 >   # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
106 >   canonical_name = page_text.split('<link rel="canonical" href="')[-1]
107 >   prefix_length = len(prefix_url)
108 >   canonical_name = canonical_name[prefix_length:]
109 >   tag_end = canonical_name.find('">')
110 >  
111 >   if tag_end == -1:
112 >      pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(prefix, page_slug))
113 >      errors_issued = errors_issued + 1
114 >   else:
115 >      canonical_name = canonical_name[:tag_end]
116 >      if len(canonical_name) > 100:
117 >         # Certain things can cause the trim to fail; report error and avoid slamming the
118 >         # output with massive page source from a failed trim
119 >         pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(prefix, page_slug, canonical_name[:100]))
120 >         errors_issued = errors_issued + 1
121 >      else:
122 >         canonical_name = canonical_name.replace('_', ' ')
123 >         if '#' in page_slug:
124 >            _, anchor_name = page_slug.split('#')
125 >            pywikibot.stdout('   The {0} link "{1}" is a redirect to "{2}#{3}", which is a valid page. Checking section link….'.format(prefix, page_slug, canonical_name, anchor_name))
126 >            find_section(page_text, page_name, page_slug, prefix, True)
127 >         else:
128 >            pywikibot.stdout('   The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(prefix, page_slug, canonical_name))
129 >
130 > # Test an interwiki link and look for a section link if applicable
131 > def test_interwiki_link(prefix, prefix_url, iw_url, page_name, page_slug):
132 >   global errors_issued
133 >  
134 >   response = fetch(iw_url)
135 >
136 >   # One way we tell that a redirect occurred is by checking fetch's history, as it
137 >   # automatically follows redirects. This will catch formal redirects which come from pages
138 >   # such as Special:PermanentLink.
139 >   if response.history != []:
140 >      possibly_print(page_name)
141 >        
142 >      if page_slug.startswith('WP:') and page_slug == page_slug.upper():
143 >         pywikibot.stdout('   Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(response.history[0], prefix, page_slug))
144 >         find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url)
145 >      else:
146 >         permalink1 = 'Special:PermanentLink/'.lower()
147 >         permalink2 = 'Special:Permalink/'.lower()
148 >         page_slug_lower = page_slug.lower()
149 >         if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
150 >            pywikibot.stdout('   Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(response.history[0], prefix, page_slug))
151 >            find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url)
152 >         else:
153 >            pywikibot.stdout('   ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_slug))
154 >            errors_issued = errors_issued + 1
155 >   elif response.status_code != 200:
156 >      possibly_print(page_name)
157 >      pywikibot.stdout('   ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_slug))
158 >      errors_issued = errors_issued + 1
159 >   # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily
160 >   # using JavaScript, while returning code OK 200 as if the link was correct; this happens
161 >   # when a redirect page is accessed. We must detect these soft redirects by looking at the
162 >   # page source to find the redirect note inserted at the top of the page for the reader.
163 >   elif 'Redirected from <a' in response.text:
164 >      possibly_print(page_name)
165 >      pywikibot.stdout('   Got silently redirected by {0} link "{1}". Checking the target page….'.format(prefix, page_slug))
166 >      find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url)
167 >   elif '#' in page_slug:
168 >      find_section(response.text, page_name, page_slug, prefix, False)
169  
170   # Searches the given page text for interwiki links
171 < def scan_for_iw_links(page_text):
172 <    global pages_checked
173 <    global iw_found
174 <    global problems_found
175 <    pages_checked = pages_checked + 1
176 <    cur = 0
177 <
178 <    for prefix in interwiki_prefixes:
179 <        # Isolate strings that start with "[[prefix:" and end with "|" or "]"
180 <        iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
181 <        for match in re.finditer(iw_link, page_text):
182 <            # Extract just the page title from this regex match
183 <            s = match.start() + 2 + len(prefix) + 1
184 <            e = match.end() - 1
185 <
186 <            # Sometimes we used a space char. instead of a '_', so fix that before querying
187 <            page_title = page_text[s:e].replace(' ', '_')
188 <
189 <            # Construct full URL for the particular wiki
190 <            iw_url = interwiki_urls[cur] + page_title
191 <            pywikibot.output('Found {0} link {1}.'.format(prefix, page_title))
192 <            iw_found = iw_found + 1
193 <
194 <            # Adjust URL if this is a foreign-language WP link
195 <            if re.match("^[a-zA-Z]{2}:", page_title):
196 <                lang_code = page_title[0:2] + "."
197 <                # "wp:" is the Wikipedia: namespace, not a language
198 <                if lang_code != "wp." and lang_code != "WP.":
199 <                    iw_url = iw_url.replace('en.', lang_code)
200 <                    iw_url = iw_url.replace(page_title[0:3], '')
201 <
202 <            # Test the URL
203 <            #pywikibot.output('Testing URL {}...'.format(iw_url))
204 <            response = fetch(iw_url)
205 <
206 <            # Redirects are followed automatically by fetch() and treated as "200"s, so the
207 <            # way we tell that a redirect occurred is by checking the history
208 <            if response.history != []:
209 <                pywikibot.output('WARNING: Redirected from {}.'.format(response.history))
210 <                problems_found = problems_found + 1
211 <            elif response.status_code != 200:
212 <                #pywikibot.output('WARNING: Got response code {}.'.format(response.status_code)) # commented out because fetch() already prints such a msg
213 <                problems_found = problems_found + 1
214 <            elif '#' in page_title:
215 <                # Isolate section link
216 <                pywikibot.output('Detected section link on page {0}.'.format(page_title))
217 <                page_name, anchor_name = page_title.split('#')
218 <                
219 <                # Convert dot-notation hex entities to proper characters
220 <                anchor_name = anchor_name.replace('.22', '"')
221 <                anchor_name = anchor_name.replace('.27', '\'')
222 <                anchor_name = anchor_name.replace('.28', '(')
223 <                anchor_name = anchor_name.replace('.29', ')')
224 <                
225 <                # Read linked page to see if it really has this anchor link
226 <                soup = BeautifulSoup(response.text, 'html.parser')
227 <                found_section = False
228 <                for tag in soup.findAll('a'):
229 <                    link = tag.get('href', None)
84 <                    if not link:
85 <                        #pywikibot.output('It is not a link.')
86 <                        continue
87 <                    #pywikibot.output('Got link {0}.'.format(link))
88 <                    if not link.startswith('#'):
89 <                        continue
90 <                        
91 <                    if link == '#' + anchor_name:
92 <                        pywikibot.output('Found section link!')
93 <                        found_section = True
94 <                        break
95 <                if found_section == False:
96 <                    pywikibot.output('Could not find section {0} on page {1}.'.format(anchor_name, page_name))
97 <                    problems_found = problems_found + 1
98 <        cur = cur + 1
171 > def scan_for_interwiki_links(page_text, page_name):
172 >   global debug
173 >   global pages_checked
174 >   global iw_found
175 >   global name_printed
176 >   pages_checked = pages_checked + 1
177 >   cur_prefix = 0
178 >   name_printed = 0
179 >
180 >   for prefix in interwiki_prefixes:
181 >      # Isolate strings that start with "[[prefix:" and end with "|" or "]"
182 >      iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
183 >      for match in re.finditer(iw_link, page_text):
184 >         # Extract just the page title from this regex match
185 >         s = match.start() + 2 + len(prefix) + 1
186 >         e = match.end() - 1
187 >
188 >         # Commonly we use spaces instead of underscores, so fix that before querying
189 >         page_slug = page_text[s:e].replace(' ', '_')
190 >
191 >         # But use spaces for title when printing it
192 >         page_title_human = page_slug.replace('_', ' ')
193 >         if debug: pywikibot.stdout('   Validating {0} link "{1}"'.format(prefix, page_title_human))
194 >         iw_found = iw_found + 1
195 >
196 >         # Construct full URL for the particular wiki
197 >         iw_url = interwiki_urls[cur_prefix] + page_slug
198 >
199 >         # Adjust URL if this is a foreign-language WP link
200 >         if re.match("^[a-zA-Z]{2}:", page_slug):
201 >            lang_code = page_slug[0:2] + "."
202 >            # "wp:" is the Wikipedia: namespace, not a language
203 >            if lang_code != "wp." and lang_code != "WP.":
204 >               iw_url = iw_url.replace('en.', lang_code)
205 >               iw_url = iw_url.replace(page_slug[0:3], '')
206 >
207 >         # Test the URL
208 >         test_interwiki_link(prefix, interwiki_urls[cur_prefix], iw_url, page_name, page_slug)
209 >      cur_prefix = cur_prefix + 1
210 >
211 > # Print a wrap-up message
212 > def print_summary():
213 >   global pages_checked
214 >   global iw_found
215 >   global errors_issued
216 >
217 >   page_str = "pages"
218 >   if pages_checked == 1:
219 >      page_str = "page"
220 >
221 >   link_str = "links"
222 >   if iw_found == 1:
223 >      link_str = "link"
224 >
225 >   pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
226 >
227 >   error_str = "errors were"
228 >   if errors_issued == 1:
229 >      error_str = "error was"
230  
231 < def main(*args):
101 <    cat_name = ''
102 <    page_name = ''
231 >   pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
232  
233 <    local_args = pywikibot.handle_args(args)
234 <    genFactory = pagegenerators.GeneratorFactory()
233 > # Main function
234 > def main(*args):
235 >   global debug
236 >   search_cat = ''
237 >   search_page = ''
238 >
239 >   # Process arguments
240 >   local_args = pywikibot.handle_args(args)
241 >   for arg in local_args:
242 >      if arg.startswith('-cat:'):
243 >         search_cat = arg[5:]
244 >      elif arg.startswith('-page:'):
245 >         search_page = arg[6:]
246 >      elif arg == '-dbg':
247 >         debug = 1
248 >      else:
249 >         pywikibot.stdout('Unknown argument "{}". Exiting.'.format(arg))
250 >         return
251 >
252 >   #pywikibot.stdout('The members of the requests.models.Response class are:')
253 >   #pywikibot.stdout(format(dir(requests.models.Response)))
254 >   #return
255 >  
256 >   # Check specified page or loop through specified category and check all pages
257 >   site = pywikibot.Site()
258 >   if search_cat != '':
259 >      cat_obj = pywikibot.Category(site, search_cat)
260 >      generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
261 >      for page in pagegenerators.PreloadingGenerator(generator, 100):
262 >         if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
263 >         scan_for_interwiki_links(page.text, page.title())
264 >   elif search_page != '':
265 >      page = pywikibot.Page(site, search_page)
266 >      if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
267 >      scan_for_interwiki_links(page.text, page.title())
268  
269 <    for arg in local_args:
270 <        if arg.startswith('-cat:'):
109 <            cat_name = arg[5:]
110 <        elif arg.startswith('-page:'):
111 <            page_name = arg[6:]
112 <
113 <    site = pywikibot.Site()
114 <
115 <    # This line of code enumerates the methods in the 'page' class
116 <    #pywikibot.stdout(format(dir(page)))
117 <
118 <    if cat_name != '':
119 <        cat_obj = pywikibot.Category(site, cat_name)
120 <        generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
121 <        for page in pagegenerators.PreloadingGenerator(generator, 100):
122 <            pywikibot.stdout('Checking page {0}'.format(page.title()))
123 <            scan_for_iw_links(page.text)
124 <    elif page_name != '':
125 <        page = pywikibot.Page(site, page_name)
126 <        pywikibot.stdout('Checking page {0}'.format(page.title()))
127 <        scan_for_iw_links(page.text)
128 <
129 <    global pages_checked
130 <    global iw_found
131 <    global problems_found
132 <    pywikibot.stdout('Checked {0} page(s) and found {1} interwiki link(s) with {2} problem(s).'.format(pages_checked, iw_found, problems_found))
269 >   # Print the results
270 >   print_summary()
271  
272   if __name__ == '__main__':
273 <    main()
273 >   main()

Diff Legend

Removed lines
+ Added lines
< Changed lines (old)
> Changed lines (new)