A web scraper build to search specific information for a given compound (and its pseudonyms)

Added documentation for the sourceloader

+17
+17
sourceloader.py
··· 1 1 import inspect 2 2 import os 3 3 import re 4 + 4 5 from FourmiCrawler.sources.source import Source 5 6 6 7 ··· 8 9 sources = [] 9 10 10 11 def __init__(self, rel_dir="FourmiCrawler/sources"): 12 + """ 13 + The initiation of a SourceLoader, selects and indexes a directory for usable sources. 14 + :param rel_dir: A relative path to a directory. 15 + """ 11 16 path = os.path.dirname(os.path.abspath(__file__)) 12 17 path += "/" + rel_dir 13 18 known_parser = set() ··· 21 26 known_parser.add(cls) 22 27 23 28 def include(self, source_names): 29 + """ 30 + This function excludes all sources that don't match the given regular expressions. 31 + :param source_names: A list of regular expression (strings) 32 + """ 24 33 new = set() 25 34 for name in source_names: 26 35 new.update([src for src in self.sources if re.match(name, src.__class__.__name__)]) 27 36 self.sources = list(new) 28 37 29 38 def exclude(self, source_names): 39 + """ 40 + This function excludes all sources that match the given regular expressions. 41 + :param source_names: A list of regular expression (strings) 42 + """ 30 43 exclude = [] 31 44 for name in source_names: 32 45 exclude.extend([src for src in self.sources if re.match(name, src.__class__.__name__)]) 33 46 self.sources = [src for src in self.sources if src not in exclude] 34 47 35 48 def __str__(self): 49 + """ 50 + This function returns a string with all sources currently available in the SourceLoader. 51 + :return: a string with all available sources. 52 + """ 36 53 string = "" 37 54 for src in self.sources: 38 55 string += "Source: " + src.__class__.__name__