from threading import Thread from queue import Queue from urllib.robotparser import RobotFileParser from urllib.error import URLError from urllib.parse import urlparse from ssl import CertificateError from random import sample, randrange, randint import re from datetime import datetime, timedelta import asyncio import aiohttp import async_timeout from bs4 import BeautifulSoup, Comment from profiles.models import BrowserFingerprint # Ugly hack to use this module alone instead of integrating it with Django # from django.conf import settings # Gets all the direct bookmarks in the html. # We want this to avoid following this kind of bookmark HARD_LIMIT = 20 MAX_PER_PAGE = 10 FOOTER_URL = re.compile(".*footer.*") SEARCH_ENGINE = [] class Settings: USER_AGENT = 'Default User' settings = Settings() def url_getter(html, current_page, root_url): links_list = [] # The final resutl soup = BeautifulSoup(html, "html.parser") # Get only the body body = soup.find('body') if not body: return links_list # remove the body if body.footer: body.footer.decompose() # remove all comments comments = soup.findAll(text=lambda text: isinstance(text, Comment)) for comment in comments: comment.extract() footers = soup.findAll(id=FOOTER_URL) for footer in footers: footer.extract() # Remove all bookmark links pointing to the current html page. links = map(lambda link: link.get("href", ""), body.find_all("a")) for link in links: if link: #Edge case, if no href found. if link.startswith("http"): links_list.append(link) elif link.startswith('/'): #Internal link, linking to page root url links_list.append(root_url + link) elif link.startswith("#"): continue else: links_list.append(current_page + "/" + link) ## uniqifier works with python <= 3.6 #seen = set() #links_list = [x for x in links_list if x not in seen and not seen.add(x)] # uniqifier # Works only with python >= 3.6 links_list = list(dict.fromkeys(links_list)) forbidden_words = ['login', 'agreement', 'mailto'] links_list = [link for link in links_list if not any(word in link.lower() for word in forbidden_words)] return links_list class WebsiteSchedulerMeta(type): """ Meta-class for WebsiteScheduler, allowing a singleton class-like interface, but spawning one instance per canonical website URL """ _instances = {} _canonicalize = re.compile(r'(https?://)?([^/]+)(/?|$)') def canonical_url(cls, url): """ Canonicalize a url """ return cls._canonicalize.search(url).groups()[1] def __call__(cls, url, *args, **kwargs): canonical = cls.canonical_url(url) if canonical not in cls._instances: cls._instances[canonical] = \ super(WebsiteSchedulerMeta, cls) \ .__call__(canonical, *args, **kwargs) return cls._instances[canonical] class WebsiteScheduler(metaclass=WebsiteSchedulerMeta): """ Schedule the accesses to a website as of robots.txt """ def __init__(self, name, user_agent): self.name = name self.last_crawled = datetime.fromtimestamp(0) self.dead = False self.can_fetch_b = False self.user_agent = (user_agent if user_agent is not None else settings.USER_AGENT) if any(self.urlroot() in item for item in SEARCH_ENGINE): print("found a search engine for %s" % self.urlroot()) self.crawl_delay = timedelta(seconds=5) self.can_fetch_b = True else: try: robots_url = self.urlroot() + 'robots.txt' self.robot_parser = RobotFileParser(robots_url) self.robot_parser.read() # TODO async? except (URLError, CertificateError): try: robots_url = self.unsafe_urlroot() + 'robots.txt' self.robot_parser = RobotFileParser(robots_url) self.robot_parser.read() except URLError: # Almost surely an offline website. self.dead = True self.crawl_delay = 0 except Exception as e: print(e) raise e if not self.robot_parser.default_entry: self.dead = True if not self.dead: delay = self.robot_parser.crawl_delay(self.user_agent) if delay is None: req_rate = self.robot_parser.request_rate(self.user_agent) if req_rate is None: delay = 5 else: delay = req_rate.requests, req_rate.seconds self.crawl_delay = timedelta(seconds=delay) def urlroot(self): ''' Get the root url for this website ''' return 'https://{}/'.format(self.name) def unsafe_urlroot(self): return 'http://{}/'.format(self.name) def fetch_delay(self): ''' Get the delay needed before fetching a page is possible ''' can_fetch_time = self.last_crawled + self.crawl_delay if can_fetch_time < datetime.now(): return timedelta(0) return can_fetch_time - datetime.now() def can_fetch(self, url): ''' Check whether this program can fetch a given page ''' return ((self.can_fetch_b) or ((not self.dead) and self.robot_parser.can_fetch(self.user_agent, url))) def fetching(self): ''' Tell the scheduler that a page is being fetched now ''' self.last_crawled = datetime.now() class CrawlingThread(Thread): """ A separate thread for the crawling task. This is needed to use asyncio, since the thread will need its own event loop. """ def __init__(self, user, url, engine_list, queue): global settings global SEARCH_ENGINE SEARCH_ENGINE = engine_list nb_fingerprint = len(BrowserFingerprint.objects.all()) fingerprint = BrowserFingerprint.objects.all()[ randint(0, nb_fingerprint - 1)] self.headers = fingerprint.serialize_headers() self.queue = queue super(CrawlingThread, self).__init__() self.url = url def run(self): tasks = [] #tasks.append(async_crawler("http://plus.google.com/+Python")) #tasks.append(async_crawler('https://python.org/')) tasks.append(async_crawler(self.url, self.queue, self.headers)) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.wait(tasks)) loop.close() class PageGetter: """ Asynchronously get a webpage, abiding by robots.txt """ headers = None def __init__(self, session, url, user_agent): self.url = url self.session = session self.user_agent = user_agent async def get(self, ssl=True): """ Actually retrieve the webpage """ scheduler = WebsiteScheduler(self.url, self.user_agent) if not scheduler.can_fetch(self.url): return None delay = scheduler.fetch_delay() while delay > timedelta(0): await asyncio.sleep(delay.total_seconds()) delay = scheduler.fetch_delay() scheduler.fetching() async with async_timeout.timeout(10): async with self.session.get(self.url, verify_ssl=ssl) as resp: print("Resp status %s" % resp.status) try: return await resp.text() except UnicodeDecodeError: return None async def async_print(url): """ Debug function to follow what's actually happening """ async with aiohttp.ClientSession() as session: html = await PageGetter(session, url).get(ssl=False) print('GOT {}HTML for {}'.format( 'None ' if html is None else '', url, )) async def async_crawler(url, queue, headers=None): if headers is None: headers = { 'User-Agent': settings.USER_AGENT, } queued = [url] crawled = [] while queued and (len(crawled) < HARD_LIMIT): async with aiohttp.ClientSession(headers=headers) as session: try: url = queued.pop(0) except IndexError: print("Error queue is empty") return crawled parsed_url = urlparse(url) print("Crawling {}".format(url)) html = await PageGetter(session, url).get(ssl=False) if html: new_urls = url_getter( html, url, parsed_url.scheme + "://" + parsed_url.netloc ) crawled += [url] if new_urls: sampled = sample( new_urls, randrange(min(MAX_PER_PAGE, len(new_urls))) ) queued += [sample_url for sample_url in sampled if sample_url not in queued and sample_url not in crawled] else: print("No html received") print(crawled) queue.put(crawled) if __name__ == '__main__': queue = Queue() crawl = CrawlingThread(None, "https://google.com/search?q=fabriquer+masque+manif", ["https://google.com/search/"], queue) crawl.start() crawl.join()