ViewVC Help
View File | Revision Log | View Changeset | Root Listing
root/Oni2/ValBot/Python/check_interwiki_links.py
(Generate patch)

Comparing ValBot/Python/check_interwiki_links.py (file contents):
Revision 1169 by iritscen, Mon Feb 21 23:59:20 2022 UTC vs.
Revision 1174 by iritscen, Tue Jun 28 22:11:41 2022 UTC

# Line 1 | Line 1
1 + # Check Interwiki Links
2 + # by iritscen@yahoo.com
3 + # Looks at each link on a page (or in all the pages in a category) which uses a registered
4 + # interwiki prefix and loads the linked page, verifying that it exists and that any section
5 + # link, if present, is valid as well. The output will use the word "ERROR" when it cannot
6 + # validate the interwiki link.
7 + # Recommended viewing width:
8 + # |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
9 +
10   import os
11  
12   from urllib.parse import urljoin
13  
14   import pywikibot
15   import re
16 + import requests # for listing members with dir()
17  
18   from pywikibot.bot import QuitKeyboardInterrupt
19   from pywikibot import pagegenerators
# Line 19 | Line 29 | interwiki_urls = ('http://www.acronymfin
29  
30   pages_checked = 0
31   iw_found = 0
32 < problems_found = 0
32 > errors_issued = 0
33  
34   # Searches the given page text for interwiki links
35   def scan_for_iw_links(page_text):
36      global pages_checked
37      global iw_found
38 <    global problems_found
38 >    global errors_issued
39      pages_checked = pages_checked + 1
40      cur = 0
41  
# Line 40 | Line 50 | def scan_for_iw_links(page_text):
50              # Sometimes we used a space char. instead of a '_', so fix that before querying
51              page_title = page_text[s:e].replace(' ', '_')
52  
53 +            # Use only spaces for title when printing it
54 +            page_title_human = page_title.replace('_', ' ')
55 +            pywikibot.stdout('   Validating {0} link "{1}"'.format(prefix, page_title_human))
56 +            iw_found = iw_found + 1
57 +
58              # Construct full URL for the particular wiki
59              iw_url = interwiki_urls[cur] + page_title
45            pywikibot.output('Found {0} link {1}.'.format(prefix, page_title))
46            iw_found = iw_found + 1
60  
61              # Adjust URL if this is a foreign-language WP link
62              if re.match("^[a-zA-Z]{2}:", page_title):
# Line 54 | Line 67 | def scan_for_iw_links(page_text):
67                      iw_url = iw_url.replace(page_title[0:3], '')
68  
69              # Test the URL
57            #pywikibot.output('Testing URL {}...'.format(iw_url))
70              response = fetch(iw_url)
71  
72 <            # Redirects are followed automatically by fetch() and treated as "200"s, so the
61 <            # way we tell that a redirect occurred is by checking the history
72 >            # One way we tell that a redirect occurred is by checking the history
73              if response.history != []:
74 <                pywikibot.output('WARNING: Redirected from {}.'.format(response.history))
75 <                problems_found = problems_found + 1
74 >                pywikibot.stdout('   ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
75 >                errors_issued = errors_issued + 1
76              elif response.status_code != 200:
77 <                #pywikibot.output('WARNING: Got response code {}.'.format(response.status_code)) # commented out because fetch() already prints such a msg
78 <                problems_found = problems_found + 1
77 >                pywikibot.stdout('   ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url))
78 >                errors_issued = errors_issued + 1
79 >            # The usual way that a redirect occurs is that MediaWiki redirects us sneakily
80 >            # using JavaScript, while returning code OK 200 as if the link was correct; we
81 >            # must detect this from the page source
82 >            elif 'Redirected from <a' in response.text:
83 >                # Extract link from this source which contains name of redirected-to page:
84 >                # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
85 >                canonical_name = response.text.split('<link rel="canonical" href="')[-1]
86 >                prefix_length = len(interwiki_urls[cur])
87 >                canonical_name = canonical_name[prefix_length:]
88 >                tag_end = canonical_name.find('"/>')
89 >                if tag_end == -1:
90 >                   pywikibot.stdout('   ERROR: This is a redirect page (but I could not isolate the correct page name).')
91 >                else:
92 >                   canonical_name = canonical_name[:tag_end]
93 >                   if len(canonical_name) > 100:
94 >                      # Certain things can cause the trim to fail; here we avoid slamming
95 >                      # the output with massive page source from a failed trim
96 >                      pywikibot.stdout('   ERROR: This is a redirect to "{}" (string trimmed to 100 chars due to excessive length).'.format(canonical_name[:100]))
97 >                   else:
98 >                      canonical_name = canonical_name.replace('_', ' ')
99 >                      pywikibot.stdout('   ERROR: This is a redirect to "{}".'.format(canonical_name))
100 >                errors_issued = errors_issued + 1
101              elif '#' in page_title:
102                  # Isolate section link
70                pywikibot.output('Detected section link on page {0}.'.format(page_title))
103                  page_name, anchor_name = page_title.split('#')
104                  
105                  # Convert dot-notation hex entities to proper characters
# Line 79 | Line 111 | def scan_for_iw_links(page_text):
111                  # Read linked page to see if it really has this anchor link
112                  soup = BeautifulSoup(response.text, 'html.parser')
113                  found_section = False
114 <                for tag in soup.findAll('a'):
115 <                    link = tag.get('href', None)
116 <                    if not link:
85 <                        #pywikibot.output('It is not a link.')
86 <                        continue
87 <                    #pywikibot.output('Got link {0}.'.format(link))
88 <                    if not link.startswith('#'):
89 <                        continue
90 <                        
91 <                    if link == '#' + anchor_name:
92 <                        pywikibot.output('Found section link!')
114 >                for span_tag in soup.findAll('span'):
115 >                    span_name = span_tag.get('id', None)
116 >                    if span_name == anchor_name:
117                          found_section = True
118                          break
119                  if found_section == False:
120 <                    pywikibot.output('Could not find section {0} on page {1}.'.format(anchor_name, page_name))
121 <                    problems_found = problems_found + 1
120 >                    pywikibot.stdout('   ERROR: Could not find section {0} on page {1}.'.format(anchor_name, page_name))
121 >                    errors_issued = errors_issued + 1
122          cur = cur + 1
123  
124   def main(*args):
# Line 112 | Line 136 | def main(*args):
136  
137      site = pywikibot.Site()
138  
139 <    # This line of code enumerates the methods in the 'page' class
140 <    #pywikibot.stdout(format(dir(page)))
139 >    #pywikibot.stdout('The members of the requests.models.Response class are:')
140 >    #pywikibot.stdout(format(dir(requests.models.Response)))
141  
142      if cat_name != '':
143          cat_obj = pywikibot.Category(site, cat_name)
144          generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
145          for page in pagegenerators.PreloadingGenerator(generator, 100):
146 <            pywikibot.stdout('Checking page {0}'.format(page.title()))
146 >            pywikibot.stdout('Checking page "{}"'.format(page.title()))
147              scan_for_iw_links(page.text)
148      elif page_name != '':
149          page = pywikibot.Page(site, page_name)
150 <        pywikibot.stdout('Checking page {0}'.format(page.title()))
150 >        pywikibot.stdout('Checking page "{}"'.format(page.title()))
151          scan_for_iw_links(page.text)
152  
153      global pages_checked
154      global iw_found
155 <    global problems_found
156 <    pywikibot.stdout('Checked {0} page(s) and found {1} interwiki link(s) with {2} problem(s).'.format(pages_checked, iw_found, problems_found))
155 >    global errors_issued
156 >
157 >    page_str = "pages"
158 >    if pages_checked == 1:
159 >        page_str = "page"
160 >
161 >    link_str = "links"
162 >    if iw_found == 1:
163 >        link_str = "link"
164 >
165 >    pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
166 >
167 >    error_str = "errors were"
168 >    if errors_issued == 1:
169 >        error_str = "error was"
170 >
171 >    pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
172  
173   if __name__ == '__main__':
174      main()

Diff Legend

Removed lines
+ Added lines
< Changed lines (old)
> Changed lines (new)