A web scraper build to search specific information for a given compound (and its pseudonyms)

Merge branch 'develop' into feature/executable

Conflicts:
sourceloader.py

+404 -33
+1 -3
FourmiCrawler/items.py
··· 1 - # Define here the models for your scraped items 2 - # 3 - # See documentation in: 1 + # For more information on item definitions, see the Scrapy documentation in: 4 2 # http://doc.scrapy.org/en/latest/topics/items.html 5 3 6 4 from scrapy.item import Item, Field
+19 -4
FourmiCrawler/pipelines.py
··· 1 - # Define your item pipelines here 2 - # 3 - # Don't forget to add your pipeline to the ITEM_PIPELINES setting 4 - # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html 1 + # For more information on item pipelines, see the Scrapy documentation in: 2 + # http://doc.scrapy.org/en/latest/topics/item-pipeline.html 5 3 import re 4 + 6 5 from scrapy.exceptions import DropItem 7 6 7 + class RemoveNonePipeline(object): 8 + 9 + def __init__(self): 10 + self.known_values = set() 11 + 12 + def process_item(self, item, spider): 13 + """ 14 + Processing the items so None values are replaced by empty strings 15 + :param item: The incoming item 16 + :param spider: The spider which scraped the spider 17 + :return: :raise DropItem: Returns the item if unique or drops them if it's already known 18 + """ 19 + for key in item: 20 + if item[key] is None: 21 + item[key] = "" 22 + return item 8 23 9 24 class DuplicatePipeline(object): 10 25
+3 -2
FourmiCrawler/settings.py
··· 11 11 SPIDER_MODULES = ['FourmiCrawler'] 12 12 NEWSPIDER_MODULE = 'FourmiCrawler' 13 13 ITEM_PIPELINES = { 14 - 'FourmiCrawler.pipelines.AttributeSelectionPipeline': 100, 15 - 'FourmiCrawler.pipelines.DuplicatePipeline': 200, 14 + "FourmiCrawler.pipelines.RemoveNonePipeline": 100, 15 + 'FourmiCrawler.pipelines.AttributeSelectionPipeline': 200, 16 + 'FourmiCrawler.pipelines.DuplicatePipeline': 300, 16 17 } 17 18 FEED_URI = 'results.json' 18 19 FEED_FORMAT = 'jsonlines'
+273
FourmiCrawler/sources/NIST.py
··· 1 + from source import Source 2 + from scrapy import log 3 + from scrapy.http import Request 4 + from scrapy.selector import Selector 5 + from FourmiCrawler.items import Result 6 + import re 7 + 8 + # [TODO]: values can be '128.', perhaps remove the dot in that case? 9 + # [TODO]: properties have references and comments which do not exist in the 10 + # Result item, but should be included eventually. 11 + 12 + class NIST(Source): 13 + """NIST Scraper plugin 14 + 15 + This plugin manages searching for a chemical on the NIST website 16 + and parsing the resulting page if the chemical exists on NIST. 17 + """ 18 + website = "http://webbook.nist.gov/*" 19 + 20 + search = 'cgi/cbook.cgi?Name=%s&Units=SI&cTP=on' 21 + 22 + ignore_list = set() 23 + 24 + def __init__(self): 25 + Source.__init__(self) 26 + 27 + def parse(self, response): 28 + sel = Selector(response) 29 + 30 + title = sel.xpath('head/title/text()').extract()[0] 31 + if title == 'Name Not Found': 32 + log.msg('NIST: Chemical not found!', level=log.ERROR) 33 + return 34 + if title not in self.ignore_list: 35 + self.ignore_list.update(title) 36 + log.msg('NIST emit synonym: %s' % title, level=log.DEBUG) 37 + self._spider.get_synonym_requests(title) 38 + 39 + requests = [] 40 + 41 + requests.extend(self.parse_generic_info(sel)) 42 + 43 + symbol_table = {} 44 + tds = sel.xpath('//table[@class="symbol_table"]/tr/td') 45 + for (symbol_td, name_td) in zip(tds[::2], tds[1::2]): 46 + symbol = ''.join(symbol_td.xpath('node()').extract()) 47 + name = name_td.xpath('text()').extract()[0] 48 + symbol_table[symbol] = name 49 + log.msg('NIST symbol: |%s|, name: |%s|' % (symbol, name), 50 + level=log.DEBUG) 51 + 52 + for table in sel.xpath('//table[@class="data"]'): 53 + summary = table.xpath('@summary').extract()[0] 54 + if summary == 'One dimensional data': 55 + log.msg('NIST table: Aggregrate data', level=log.DEBUG) 56 + requests.extend( 57 + self.parse_aggregate_data(table, symbol_table)) 58 + elif table.xpath('tr/th="Initial Phase"').extract()[0] == '1': 59 + log.msg('NIST table; Enthalpy/entropy of phase transition', 60 + level=log.DEBUG) 61 + requests.extend(self.parse_transition_data(table, summary)) 62 + elif table.xpath('tr[1]/td'): 63 + log.msg('NIST table: Horizontal table', level=log.DEBUG) 64 + elif summary == 'Antoine Equation Parameters': 65 + log.msg('NIST table: Antoine Equation Parameters', 66 + level=log.DEBUG) 67 + requests.extend(self.parse_antoine_data(table, summary)) 68 + elif len(table.xpath('tr[1]/th')) == 5: 69 + log.msg('NIST table: generic 5 columns', level=log.DEBUG) 70 + # Symbol (unit) Temperature (K) Method Reference Comment 71 + requests.extend(self.parse_generic_data(table, summary)) 72 + elif len(table.xpath('tr[1]/th')) == 4: 73 + log.msg('NIST table: generic 4 columns', level=log.DEBUG) 74 + # Symbol (unit) Temperature (K) Reference Comment 75 + requests.extend(self.parse_generic_data(table, summary)) 76 + else: 77 + log.msg('NIST table: NOT SUPPORTED', level=log.WARNING) 78 + continue #Assume unsupported 79 + return requests 80 + 81 + def parse_generic_info(self, sel): 82 + """Parses: synonyms, chemical formula, molecular weight, InChI, 83 + InChiKey, CAS number 84 + """ 85 + ul = sel.xpath('body/ul[li/strong="IUPAC Standard InChI:"]') 86 + li = ul.xpath('li') 87 + 88 + raw_synonyms = ul.xpath('li[strong="Other names:"]/text()').extract() 89 + for synonym in raw_synonyms[0].strip().split(';\n'): 90 + log.msg('NIST synonym: %s' % synonym, level=log.DEBUG) 91 + self.ignore_list.update(synonym) 92 + self._spider.get_synonym_requests(synonym) 93 + 94 + data = {} 95 + 96 + raw_formula = ul.xpath('li[strong/a="Formula"]//text()').extract() 97 + data['Chemical formula'] = ''.join(raw_formula[2:]).strip() 98 + 99 + raw_mol_weight = ul.xpath('li[strong/a="Molecular weight"]/text()') 100 + data['Molecular weight'] = raw_mol_weight.extract()[0].strip() 101 + 102 + raw_inchi = ul.xpath('li[strong="IUPAC Standard InChI:"]//tt/text()') 103 + data['IUPAC Standard InChI'] = raw_inchi.extract()[0] 104 + 105 + raw_inchikey = ul.xpath('li[strong="IUPAC Standard InChIKey:"]' 106 + '/tt/text()') 107 + data['IUPAC Standard InChIKey'] = raw_inchikey.extract()[0] 108 + 109 + raw_cas_number = ul.xpath('li[strong="CAS Registry Number:"]/text()') 110 + data['CAS Registry Number'] = raw_cas_number.extract()[0].strip() 111 + 112 + requests = [] 113 + for key, value in data.iteritems(): 114 + result = Result({ 115 + 'attribute': key, 116 + 'value': value, 117 + 'source': 'NIST', 118 + 'reliability': 'Unknown', 119 + 'conditions': '' 120 + }) 121 + requests.append(result) 122 + 123 + return requests 124 + 125 + def parse_aggregate_data(self, table, symbol_table): 126 + """Parses the table(s) which contain possible links to individual 127 + data points 128 + """ 129 + results = [] 130 + for tr in table.xpath('tr[td]'): 131 + extra_data_url = tr.xpath('td[last()][a="Individual data points"]' 132 + '/a/@href').extract() 133 + if extra_data_url: 134 + request = Request(url=self.website[:-1] + extra_data_url[0], 135 + callback=self.parse_individual_datapoints) 136 + results.append(request) 137 + continue 138 + data = [] 139 + for td in tr.xpath('td'): 140 + data.append(''.join(td.xpath('node()').extract())) 141 + 142 + name = symbol_table[data[0]] 143 + condition = '' 144 + 145 + m = re.match(r'(.*) at (.*)', name) 146 + if m: 147 + name = m.group(1) 148 + condition = m.group(2) 149 + 150 + result = Result({ 151 + 'attribute': name, 152 + 'value': data[1] + ' ' + data[2], 153 + 'source': 'NIST', 154 + 'reliability': 'Unknown', 155 + 'conditions': condition 156 + }) 157 + log.msg('NIST: |%s|' % data, level=log.DEBUG) 158 + results.append(result) 159 + return results 160 + 161 + @staticmethod 162 + def parse_transition_data(table, summary): 163 + """Parses the table containing properties regarding phase changes""" 164 + results = [] 165 + 166 + tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract()) 167 + m = re.search(r'\((.*)\)', tr_unit) 168 + unit = '!' 169 + if m: 170 + unit = m.group(1) 171 + 172 + for tr in table.xpath('tr[td]'): 173 + tds = tr.xpath('td/text()').extract() 174 + result = Result({ 175 + 'attribute': summary, 176 + 'value': tds[0] + ' ' + unit, 177 + 'source': 'NIST', 178 + 'reliability': 'Unknown', 179 + 'conditions': '%s K, (%s -> %s)' % (tds[1], tds[2], tds[3]) 180 + }) 181 + results.append(result) 182 + 183 + 184 + return results 185 + 186 + @staticmethod 187 + def parse_generic_data(table, summary): 188 + """Parses the common tables of 4 and 5 rows. Assumes they are of the 189 + form: 190 + Symbol (unit)|Temperature (K)|Method|Reference|Comment 191 + Symbol (unit)|Temperature (K)|Reference|Comment 192 + """ 193 + results = [] 194 + 195 + tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract()) 196 + m = re.search(r'\((.*)\)', tr_unit) 197 + unit = '!' 198 + if m: 199 + unit = m.group(1) 200 + 201 + for tr in table.xpath('tr[td]'): 202 + tds = tr.xpath('td/text()').extract() 203 + result = Result({ 204 + 'attribute': summary, 205 + 'value': tds[0] + ' ' + unit, 206 + 'source': 'NIST', 207 + 'reliability': 'Unknown', 208 + 'conditions': '%s K' % tds[1] 209 + }) 210 + results.append(result) 211 + return results 212 + 213 + @staticmethod 214 + def parse_antoine_data(table, summary): 215 + """Parse table containing parameters for the Antione equation""" 216 + results = [] 217 + 218 + for tr in table.xpath('tr[td]'): 219 + tds = tr.xpath('td/text()').extract() 220 + result = Result({ 221 + 'attribute': summary, 222 + 'value': 'A=%s, B=%s, C=%s' % (tds[1], tds[2], tds[3]), 223 + 'source': 'NIST', 224 + 'reliability': 'Unknown', 225 + 'conditions': '%s K' % tds[0] 226 + }) 227 + results.append(result) 228 + 229 + return results 230 + 231 + def parse_individual_datapoints(self, response): 232 + """Parses the page linked from aggregate data""" 233 + sel = Selector(response) 234 + table = sel.xpath('//table[@class="data"]')[0] 235 + 236 + results = [] 237 + 238 + name = table.xpath('@summary').extract()[0] 239 + condition = '' 240 + m = re.match(r'(.*) at (.*)', name) 241 + if m: 242 + name = m.group(1) 243 + condition = m.group(2) 244 + 245 + tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract()) 246 + m = re.search(r'\((.*)\)', tr_unit) 247 + unit = '!' 248 + if m: 249 + unit = m.group(1) 250 + 251 + for tr in table.xpath('tr[td]'): 252 + tds = tr.xpath('td/text()').extract() 253 + uncertainty = '' 254 + m = re.search('Uncertainty assigned by TRC = (.*?) ', tds[-1]) 255 + if m: 256 + uncertainty = '+- %s ' % m.group(1) 257 + # [TODO]: get the plusminus sign working in here 258 + result = Result({ 259 + 'attribute': name, 260 + 'value': '%s %s%s' % (tds[0], uncertainty, unit), 261 + 'source': 'NIST', 262 + 'reliability': 'Unknown', 263 + 'conditions': condition 264 + }) 265 + results.append(result) 266 + 267 + return results 268 + 269 + def new_compound_request(self, compound): 270 + if compound not in self.ignore_list: 271 + self.ignore_list.update(compound) 272 + return Request(url=self.website[:-1] + self.search % compound, 273 + callback=self.parse)
+19 -2
FourmiCrawler/sources/source.py
··· 7 7 _spider = None 8 8 9 9 def __init__(self): 10 + """ 11 + Initiation of a new Source 12 + """ 10 13 pass 11 14 12 - def parse(self, reponse): 13 - log.msg("The parse function of the empty parser was used.", level=log.WARNING) 15 + def parse(self, response): 16 + """ 17 + This function should be able to parse all Scrapy Response objects with a URL matching the website Regex. 18 + :param response: A Scrapy Response object 19 + :return: A list of Result items and new Scrapy Requests 20 + """ 21 + log.msg("The parse function of the empty source was used.", level=log.WARNING) 14 22 pass 15 23 16 24 def new_compound_request(self, compound): 25 + """ 26 + This function should return a Scrapy Request for the given compound request. 27 + :param compound: A compound name. 28 + :return: A new Scrapy Request 29 + """ 17 30 # return Request(url=self.website[:-1] + compound, callback=self.parse) 18 31 pass 19 32 20 33 def set_spider(self, spider): 34 + """ 35 + A Function to save the associated spider. 36 + :param spider: A FourmiSpider object 37 + """ 21 38 self._spider = spider
+46 -14
FourmiCrawler/spider.py
··· 1 + import re 2 + 1 3 from scrapy.spider import Spider 2 4 from scrapy import log 3 - import re 4 5 5 6 6 7 class FourmiSpider(Spider): 8 + """ 9 + A spider writen for the Fourmi Project which calls upon all available sources to request and scrape data. 10 + """ 7 11 name = "FourmiSpider" 8 - __parsers = [] 12 + __sources = [] 9 13 synonyms = [] 10 14 11 15 def __init__(self, compound=None, selected_attributes=[".*"], *args, **kwargs): 16 + """ 17 + Initiation of the Spider 18 + :param compound: compound that will be searched. 19 + :param selected_attributes: A list of regular expressions that the attributes should match. 20 + """ 12 21 super(FourmiSpider, self).__init__(*args, **kwargs) 13 22 self.synonyms.append(compound) 14 23 self.selected_attributes = selected_attributes; 15 24 16 - def parse(self, reponse): 17 - for parser in self.__parsers: 18 - if re.match(parser.website, reponse.url): 19 - log.msg("Url: " + reponse.url + " -> Source: " + parser.website, level=log.DEBUG) 20 - return parser.parse(reponse) 25 + def parse(self, response): 26 + """ 27 + The function that is called when a response to a request is available. This function distributes this to a 28 + source which should be able to handle parsing the data. 29 + :param response: A Scrapy Response object that should be parsed 30 + :return: A list of Result items and new Request to be handled by the scrapy core. 31 + """ 32 + for source in self.__sources: 33 + if re.match(source.website, response.url): 34 + log.msg("Url: " + response.url + " -> Source: " + source.website, level=log.DEBUG) 35 + return source.parse(response) 21 36 return None 22 37 23 38 def get_synonym_requests(self, compound): 39 + """ 40 + A function that generates new Scrapy Request for each source given a new synonym of a compound. 41 + :param compound: A compound name 42 + :return: A list of Scrapy Request objects 43 + """ 24 44 requests = [] 25 - for parser in self.__parsers: 45 + for parser in self.__sources: 26 46 parser_requests = parser.new_compound_request(compound) 27 47 if parser_requests is not None: 28 48 requests.append(parser_requests) 29 49 return requests 30 50 31 51 def start_requests(self): 52 + """ 53 + The function called by Scrapy for it's first Requests 54 + :return: A list of Scrapy Request generated from the known synonyms using the available sources. 55 + """ 32 56 requests = [] 33 57 for synonym in self.synonyms: 34 58 requests.extend(self.get_synonym_requests(synonym)) 35 59 return requests 36 60 37 - def add_parsers(self, parsers): 38 - for parser in parsers: 39 - self.add_parser(parser) 61 + def add_sources(self, sources): 62 + """ 63 + A function to add a new Parser objects to the list of available sources. 64 + :param sources: A list of Source Objects. 65 + """ 66 + for parser in sources: 67 + self.add_source(parser) 40 68 41 - def add_parser(self, parser): 42 - self.__parsers.append(parser) 43 - parser.set_spider(self) 69 + def add_source(self, source): 70 + """ 71 + A function add a new Parser object to the list of available parsers. 72 + :param source: A Source Object 73 + """ 74 + self.__sources.append(source) 75 + source.set_spider(self)
+27 -5
fourmi.py
··· 33 33 from sourceloader import SourceLoader 34 34 35 35 36 - def setup_crawler(searchable, settings, source_loader, attributes): 37 - spider = FourmiSpider(compound=searchable, selected_attributes=attributes) 38 - spider.add_parsers(source_loader.sources) 36 + def setup_crawler(compound, settings, source_loader, attributes): 37 + """ 38 + This function prepares and start the crawler which starts the actual search on the internet 39 + :param compound: The compound which should be searched 40 + :param settings: A scrapy settings object 41 + :param source_loader: A fully functional SourceLoader object which contains only the sources that should be used. 42 + :param attributes: A list of regular expressions which the attribute names should match. 43 + """ 44 + spider = FourmiSpider(compound=compound, selected_attributes=attributes) 45 + spider.add_sources(source_loader.sources) 39 46 crawler = Crawler(settings) 40 47 crawler.signals.connect(reactor.stop, signal=signals.spider_closed) 41 48 crawler.configure() ··· 44 51 45 52 46 53 def scrapy_settings_manipulation(docopt_arguments): 54 + """ 55 + This function manipulates the Scrapy settings that normally would be set in the settings file. In the Fourmi 56 + project these are command line arguments. 57 + :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments. 58 + """ 47 59 settings = get_project_settings() 48 - # [todo] - add at least a warning for files that already exist 60 + 49 61 if docopt_arguments["--output"] != 'result.*format*': 50 62 settings.overrides["FEED_URI"] = docopt_arguments["--output"] 51 63 elif docopt_arguments["--format"] == "jsonlines": ··· 60 72 61 73 62 74 def start_log(docopt_arguments): 75 + """ 76 + This function starts the logging functionality of Scrapy using the settings given by the CLI. 77 + :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments. 78 + """ 63 79 if docopt_arguments["--log"] is not None: 64 80 if docopt_arguments["--verbose"]: 65 81 log.start(logfile=docopt_arguments["--log"], logstdout=False, loglevel=log.DEBUG) ··· 73 89 74 90 75 91 def search(docopt_arguments, source_loader): 92 + """ 93 + The function that facilitates the search for a specific compound. 94 + :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments. 95 + :param source_loader: An initiated SourceLoader object pointed at the directory with the sources. 96 + """ 76 97 start_log(docopt_arguments) 77 98 settings = scrapy_settings_manipulation(docopt_arguments) 78 99 setup_crawler(docopt_arguments["<compound>"], settings, source_loader, docopt_arguments["--attributes"].split(',')) 79 100 reactor.run() 80 101 81 102 103 + # The start for the Fourmi Command Line interface. 82 104 if __name__ == '__main__': 83 - arguments = docopt.docopt(__doc__, version='Fourmi - V0.3.0') 105 + arguments = docopt.docopt(__doc__, version='Fourmi - V0.4.0') 84 106 loader = SourceLoader() 85 107 86 108 if arguments["--include"]:
+16 -3
sourceloader.py
··· 2 2 import sys 3 3 import os 4 4 import re 5 + 5 6 from FourmiCrawler.sources.source import Source 6 7 7 8 8 9 class SourceLoader: 9 10 sources = [] 10 11 11 - def __init__(self, rel_dir="FourmiCrawler\\sources"): 12 + def __init__(self, rel_dir="FourmiCrawler/sources"): 12 13 13 14 if hasattr(sys,'frozen'): 14 15 path = os.path.dirname(sys.executable) 15 16 else: 16 17 path = os.path.dirname(os.path.abspath(__file__)) 17 18 18 - path += "\\" + rel_dir 19 + path += "/" + rel_dir 19 20 known_parser = set() 20 21 21 22 for py in [f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py']: 22 - mod = __import__('.'.join([rel_dir.replace('\\', "."), py]), fromlist=[py]) 23 + mod = __import__('.'.join([rel_dir.replace('/', "."), py]), fromlist=[py]) 23 24 classes = [getattr(mod, x) for x in dir(mod) if inspect.isclass(getattr(mod, x))] 24 25 for cls in classes: 25 26 if issubclass(cls, Source) and cls not in known_parser: ··· 27 28 # known_parser.add(cls) 28 29 29 30 def include(self, source_names): 31 + """ 32 + This function excludes all sources that don't match the given regular expressions. 33 + :param source_names: A list of regular expression (strings) 34 + """ 30 35 new = set() 31 36 for name in source_names: 32 37 new.update([src for src in self.sources if re.match(name, src.__class__.__name__)]) 33 38 self.sources = list(new) 34 39 35 40 def exclude(self, source_names): 41 + """ 42 + This function excludes all sources that match the given regular expressions. 43 + :param source_names: A list of regular expression (strings) 44 + """ 36 45 exclude = [] 37 46 for name in source_names: 38 47 exclude.extend([src for src in self.sources if re.match(name, src.__class__.__name__)]) 39 48 self.sources = [src for src in self.sources if src not in exclude] 40 49 41 50 def __str__(self): 51 + """ 52 + This function returns a string with all sources currently available in the SourceLoader. 53 + :return: a string with all available sources. 54 + """ 42 55 string = "" 43 56 for src in self.sources: 44 57 string += "Source: " + src.__class__.__name__