···11-# Define here the models for your scraped items
22-#
33-# See documentation in:
11+# For more information on item definitions, see the Scrapy documentation in:
42# http://doc.scrapy.org/en/latest/topics/items.html
5364from scrapy.item import Item, Field
+19-4
FourmiCrawler/pipelines.py
···11-# Define your item pipelines here
22-#
33-# Don't forget to add your pipeline to the ITEM_PIPELINES setting
44-# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
11+# For more information on item pipelines, see the Scrapy documentation in:
22+# http://doc.scrapy.org/en/latest/topics/item-pipeline.html
53import re
44+65from scrapy.exceptions import DropItem
7677+class RemoveNonePipeline(object):
88+99+ def __init__(self):
1010+ self.known_values = set()
1111+1212+ def process_item(self, item, spider):
1313+ """
1414+ Processing the items so None values are replaced by empty strings
1515+ :param item: The incoming item
1616+ :param spider: The spider which scraped the spider
1717+ :return: :raise DropItem: Returns the item if unique or drops them if it's already known
1818+ """
1919+ for key in item:
2020+ if item[key] is None:
2121+ item[key] = ""
2222+ return item
823924class DuplicatePipeline(object):
1025
···11+from source import Source
22+from scrapy import log
33+from scrapy.http import Request
44+from scrapy.selector import Selector
55+from FourmiCrawler.items import Result
66+import re
77+88+# [TODO]: values can be '128.', perhaps remove the dot in that case?
99+# [TODO]: properties have references and comments which do not exist in the
1010+# Result item, but should be included eventually.
1111+1212+class NIST(Source):
1313+ """NIST Scraper plugin
1414+1515+ This plugin manages searching for a chemical on the NIST website
1616+ and parsing the resulting page if the chemical exists on NIST.
1717+ """
1818+ website = "http://webbook.nist.gov/*"
1919+2020+ search = 'cgi/cbook.cgi?Name=%s&Units=SI&cTP=on'
2121+2222+ ignore_list = set()
2323+2424+ def __init__(self):
2525+ Source.__init__(self)
2626+2727+ def parse(self, response):
2828+ sel = Selector(response)
2929+3030+ title = sel.xpath('head/title/text()').extract()[0]
3131+ if title == 'Name Not Found':
3232+ log.msg('NIST: Chemical not found!', level=log.ERROR)
3333+ return
3434+ if title not in self.ignore_list:
3535+ self.ignore_list.update(title)
3636+ log.msg('NIST emit synonym: %s' % title, level=log.DEBUG)
3737+ self._spider.get_synonym_requests(title)
3838+3939+ requests = []
4040+4141+ requests.extend(self.parse_generic_info(sel))
4242+4343+ symbol_table = {}
4444+ tds = sel.xpath('//table[@class="symbol_table"]/tr/td')
4545+ for (symbol_td, name_td) in zip(tds[::2], tds[1::2]):
4646+ symbol = ''.join(symbol_td.xpath('node()').extract())
4747+ name = name_td.xpath('text()').extract()[0]
4848+ symbol_table[symbol] = name
4949+ log.msg('NIST symbol: |%s|, name: |%s|' % (symbol, name),
5050+ level=log.DEBUG)
5151+5252+ for table in sel.xpath('//table[@class="data"]'):
5353+ summary = table.xpath('@summary').extract()[0]
5454+ if summary == 'One dimensional data':
5555+ log.msg('NIST table: Aggregrate data', level=log.DEBUG)
5656+ requests.extend(
5757+ self.parse_aggregate_data(table, symbol_table))
5858+ elif table.xpath('tr/th="Initial Phase"').extract()[0] == '1':
5959+ log.msg('NIST table; Enthalpy/entropy of phase transition',
6060+ level=log.DEBUG)
6161+ requests.extend(self.parse_transition_data(table, summary))
6262+ elif table.xpath('tr[1]/td'):
6363+ log.msg('NIST table: Horizontal table', level=log.DEBUG)
6464+ elif summary == 'Antoine Equation Parameters':
6565+ log.msg('NIST table: Antoine Equation Parameters',
6666+ level=log.DEBUG)
6767+ requests.extend(self.parse_antoine_data(table, summary))
6868+ elif len(table.xpath('tr[1]/th')) == 5:
6969+ log.msg('NIST table: generic 5 columns', level=log.DEBUG)
7070+ # Symbol (unit) Temperature (K) Method Reference Comment
7171+ requests.extend(self.parse_generic_data(table, summary))
7272+ elif len(table.xpath('tr[1]/th')) == 4:
7373+ log.msg('NIST table: generic 4 columns', level=log.DEBUG)
7474+ # Symbol (unit) Temperature (K) Reference Comment
7575+ requests.extend(self.parse_generic_data(table, summary))
7676+ else:
7777+ log.msg('NIST table: NOT SUPPORTED', level=log.WARNING)
7878+ continue #Assume unsupported
7979+ return requests
8080+8181+ def parse_generic_info(self, sel):
8282+ """Parses: synonyms, chemical formula, molecular weight, InChI,
8383+ InChiKey, CAS number
8484+ """
8585+ ul = sel.xpath('body/ul[li/strong="IUPAC Standard InChI:"]')
8686+ li = ul.xpath('li')
8787+8888+ raw_synonyms = ul.xpath('li[strong="Other names:"]/text()').extract()
8989+ for synonym in raw_synonyms[0].strip().split(';\n'):
9090+ log.msg('NIST synonym: %s' % synonym, level=log.DEBUG)
9191+ self.ignore_list.update(synonym)
9292+ self._spider.get_synonym_requests(synonym)
9393+9494+ data = {}
9595+9696+ raw_formula = ul.xpath('li[strong/a="Formula"]//text()').extract()
9797+ data['Chemical formula'] = ''.join(raw_formula[2:]).strip()
9898+9999+ raw_mol_weight = ul.xpath('li[strong/a="Molecular weight"]/text()')
100100+ data['Molecular weight'] = raw_mol_weight.extract()[0].strip()
101101+102102+ raw_inchi = ul.xpath('li[strong="IUPAC Standard InChI:"]//tt/text()')
103103+ data['IUPAC Standard InChI'] = raw_inchi.extract()[0]
104104+105105+ raw_inchikey = ul.xpath('li[strong="IUPAC Standard InChIKey:"]'
106106+ '/tt/text()')
107107+ data['IUPAC Standard InChIKey'] = raw_inchikey.extract()[0]
108108+109109+ raw_cas_number = ul.xpath('li[strong="CAS Registry Number:"]/text()')
110110+ data['CAS Registry Number'] = raw_cas_number.extract()[0].strip()
111111+112112+ requests = []
113113+ for key, value in data.iteritems():
114114+ result = Result({
115115+ 'attribute': key,
116116+ 'value': value,
117117+ 'source': 'NIST',
118118+ 'reliability': 'Unknown',
119119+ 'conditions': ''
120120+ })
121121+ requests.append(result)
122122+123123+ return requests
124124+125125+ def parse_aggregate_data(self, table, symbol_table):
126126+ """Parses the table(s) which contain possible links to individual
127127+ data points
128128+ """
129129+ results = []
130130+ for tr in table.xpath('tr[td]'):
131131+ extra_data_url = tr.xpath('td[last()][a="Individual data points"]'
132132+ '/a/@href').extract()
133133+ if extra_data_url:
134134+ request = Request(url=self.website[:-1] + extra_data_url[0],
135135+ callback=self.parse_individual_datapoints)
136136+ results.append(request)
137137+ continue
138138+ data = []
139139+ for td in tr.xpath('td'):
140140+ data.append(''.join(td.xpath('node()').extract()))
141141+142142+ name = symbol_table[data[0]]
143143+ condition = ''
144144+145145+ m = re.match(r'(.*) at (.*)', name)
146146+ if m:
147147+ name = m.group(1)
148148+ condition = m.group(2)
149149+150150+ result = Result({
151151+ 'attribute': name,
152152+ 'value': data[1] + ' ' + data[2],
153153+ 'source': 'NIST',
154154+ 'reliability': 'Unknown',
155155+ 'conditions': condition
156156+ })
157157+ log.msg('NIST: |%s|' % data, level=log.DEBUG)
158158+ results.append(result)
159159+ return results
160160+161161+ @staticmethod
162162+ def parse_transition_data(table, summary):
163163+ """Parses the table containing properties regarding phase changes"""
164164+ results = []
165165+166166+ tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
167167+ m = re.search(r'\((.*)\)', tr_unit)
168168+ unit = '!'
169169+ if m:
170170+ unit = m.group(1)
171171+172172+ for tr in table.xpath('tr[td]'):
173173+ tds = tr.xpath('td/text()').extract()
174174+ result = Result({
175175+ 'attribute': summary,
176176+ 'value': tds[0] + ' ' + unit,
177177+ 'source': 'NIST',
178178+ 'reliability': 'Unknown',
179179+ 'conditions': '%s K, (%s -> %s)' % (tds[1], tds[2], tds[3])
180180+ })
181181+ results.append(result)
182182+183183+184184+ return results
185185+186186+ @staticmethod
187187+ def parse_generic_data(table, summary):
188188+ """Parses the common tables of 4 and 5 rows. Assumes they are of the
189189+ form:
190190+ Symbol (unit)|Temperature (K)|Method|Reference|Comment
191191+ Symbol (unit)|Temperature (K)|Reference|Comment
192192+ """
193193+ results = []
194194+195195+ tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
196196+ m = re.search(r'\((.*)\)', tr_unit)
197197+ unit = '!'
198198+ if m:
199199+ unit = m.group(1)
200200+201201+ for tr in table.xpath('tr[td]'):
202202+ tds = tr.xpath('td/text()').extract()
203203+ result = Result({
204204+ 'attribute': summary,
205205+ 'value': tds[0] + ' ' + unit,
206206+ 'source': 'NIST',
207207+ 'reliability': 'Unknown',
208208+ 'conditions': '%s K' % tds[1]
209209+ })
210210+ results.append(result)
211211+ return results
212212+213213+ @staticmethod
214214+ def parse_antoine_data(table, summary):
215215+ """Parse table containing parameters for the Antione equation"""
216216+ results = []
217217+218218+ for tr in table.xpath('tr[td]'):
219219+ tds = tr.xpath('td/text()').extract()
220220+ result = Result({
221221+ 'attribute': summary,
222222+ 'value': 'A=%s, B=%s, C=%s' % (tds[1], tds[2], tds[3]),
223223+ 'source': 'NIST',
224224+ 'reliability': 'Unknown',
225225+ 'conditions': '%s K' % tds[0]
226226+ })
227227+ results.append(result)
228228+229229+ return results
230230+231231+ def parse_individual_datapoints(self, response):
232232+ """Parses the page linked from aggregate data"""
233233+ sel = Selector(response)
234234+ table = sel.xpath('//table[@class="data"]')[0]
235235+236236+ results = []
237237+238238+ name = table.xpath('@summary').extract()[0]
239239+ condition = ''
240240+ m = re.match(r'(.*) at (.*)', name)
241241+ if m:
242242+ name = m.group(1)
243243+ condition = m.group(2)
244244+245245+ tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
246246+ m = re.search(r'\((.*)\)', tr_unit)
247247+ unit = '!'
248248+ if m:
249249+ unit = m.group(1)
250250+251251+ for tr in table.xpath('tr[td]'):
252252+ tds = tr.xpath('td/text()').extract()
253253+ uncertainty = ''
254254+ m = re.search('Uncertainty assigned by TRC = (.*?) ', tds[-1])
255255+ if m:
256256+ uncertainty = '+- %s ' % m.group(1)
257257+ # [TODO]: get the plusminus sign working in here
258258+ result = Result({
259259+ 'attribute': name,
260260+ 'value': '%s %s%s' % (tds[0], uncertainty, unit),
261261+ 'source': 'NIST',
262262+ 'reliability': 'Unknown',
263263+ 'conditions': condition
264264+ })
265265+ results.append(result)
266266+267267+ return results
268268+269269+ def new_compound_request(self, compound):
270270+ if compound not in self.ignore_list:
271271+ self.ignore_list.update(compound)
272272+ return Request(url=self.website[:-1] + self.search % compound,
273273+ callback=self.parse)
+19-2
FourmiCrawler/sources/source.py
···77 _spider = None
8899 def __init__(self):
1010+ """
1111+ Initiation of a new Source
1212+ """
1013 pass
11141212- def parse(self, reponse):
1313- log.msg("The parse function of the empty parser was used.", level=log.WARNING)
1515+ def parse(self, response):
1616+ """
1717+ This function should be able to parse all Scrapy Response objects with a URL matching the website Regex.
1818+ :param response: A Scrapy Response object
1919+ :return: A list of Result items and new Scrapy Requests
2020+ """
2121+ log.msg("The parse function of the empty source was used.", level=log.WARNING)
1422 pass
15231624 def new_compound_request(self, compound):
2525+ """
2626+ This function should return a Scrapy Request for the given compound request.
2727+ :param compound: A compound name.
2828+ :return: A new Scrapy Request
2929+ """
1730 # return Request(url=self.website[:-1] + compound, callback=self.parse)
1831 pass
19322033 def set_spider(self, spider):
3434+ """
3535+ A Function to save the associated spider.
3636+ :param spider: A FourmiSpider object
3737+ """
2138 self._spider = spider
+46-14
FourmiCrawler/spider.py
···11+import re
22+13from scrapy.spider import Spider
24from scrapy import log
33-import re
455667class FourmiSpider(Spider):
88+ """
99+ A spider writen for the Fourmi Project which calls upon all available sources to request and scrape data.
1010+ """
711 name = "FourmiSpider"
88- __parsers = []
1212+ __sources = []
913 synonyms = []
10141115 def __init__(self, compound=None, selected_attributes=[".*"], *args, **kwargs):
1616+ """
1717+ Initiation of the Spider
1818+ :param compound: compound that will be searched.
1919+ :param selected_attributes: A list of regular expressions that the attributes should match.
2020+ """
1221 super(FourmiSpider, self).__init__(*args, **kwargs)
1322 self.synonyms.append(compound)
1423 self.selected_attributes = selected_attributes;
15241616- def parse(self, reponse):
1717- for parser in self.__parsers:
1818- if re.match(parser.website, reponse.url):
1919- log.msg("Url: " + reponse.url + " -> Source: " + parser.website, level=log.DEBUG)
2020- return parser.parse(reponse)
2525+ def parse(self, response):
2626+ """
2727+ The function that is called when a response to a request is available. This function distributes this to a
2828+ source which should be able to handle parsing the data.
2929+ :param response: A Scrapy Response object that should be parsed
3030+ :return: A list of Result items and new Request to be handled by the scrapy core.
3131+ """
3232+ for source in self.__sources:
3333+ if re.match(source.website, response.url):
3434+ log.msg("Url: " + response.url + " -> Source: " + source.website, level=log.DEBUG)
3535+ return source.parse(response)
2136 return None
22372338 def get_synonym_requests(self, compound):
3939+ """
4040+ A function that generates new Scrapy Request for each source given a new synonym of a compound.
4141+ :param compound: A compound name
4242+ :return: A list of Scrapy Request objects
4343+ """
2444 requests = []
2525- for parser in self.__parsers:
4545+ for parser in self.__sources:
2646 parser_requests = parser.new_compound_request(compound)
2747 if parser_requests is not None:
2848 requests.append(parser_requests)
2949 return requests
30503151 def start_requests(self):
5252+ """
5353+ The function called by Scrapy for it's first Requests
5454+ :return: A list of Scrapy Request generated from the known synonyms using the available sources.
5555+ """
3256 requests = []
3357 for synonym in self.synonyms:
3458 requests.extend(self.get_synonym_requests(synonym))
3559 return requests
36603737- def add_parsers(self, parsers):
3838- for parser in parsers:
3939- self.add_parser(parser)
6161+ def add_sources(self, sources):
6262+ """
6363+ A function to add a new Parser objects to the list of available sources.
6464+ :param sources: A list of Source Objects.
6565+ """
6666+ for parser in sources:
6767+ self.add_source(parser)
40684141- def add_parser(self, parser):
4242- self.__parsers.append(parser)
4343- parser.set_spider(self)6969+ def add_source(self, source):
7070+ """
7171+ A function add a new Parser object to the list of available parsers.
7272+ :param source: A Source Object
7373+ """
7474+ self.__sources.append(source)
7575+ source.set_spider(self)
+27-5
fourmi.py
···3333from sourceloader import SourceLoader
343435353636-def setup_crawler(searchable, settings, source_loader, attributes):
3737- spider = FourmiSpider(compound=searchable, selected_attributes=attributes)
3838- spider.add_parsers(source_loader.sources)
3636+def setup_crawler(compound, settings, source_loader, attributes):
3737+ """
3838+ This function prepares and start the crawler which starts the actual search on the internet
3939+ :param compound: The compound which should be searched
4040+ :param settings: A scrapy settings object
4141+ :param source_loader: A fully functional SourceLoader object which contains only the sources that should be used.
4242+ :param attributes: A list of regular expressions which the attribute names should match.
4343+ """
4444+ spider = FourmiSpider(compound=compound, selected_attributes=attributes)
4545+ spider.add_sources(source_loader.sources)
3946 crawler = Crawler(settings)
4047 crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
4148 crawler.configure()
···445145524653def scrapy_settings_manipulation(docopt_arguments):
5454+ """
5555+ This function manipulates the Scrapy settings that normally would be set in the settings file. In the Fourmi
5656+ project these are command line arguments.
5757+ :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
5858+ """
4759 settings = get_project_settings()
4848- # [todo] - add at least a warning for files that already exist
6060+4961 if docopt_arguments["--output"] != 'result.*format*':
5062 settings.overrides["FEED_URI"] = docopt_arguments["--output"]
5163 elif docopt_arguments["--format"] == "jsonlines":
···607261736274def start_log(docopt_arguments):
7575+ """
7676+ This function starts the logging functionality of Scrapy using the settings given by the CLI.
7777+ :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
7878+ """
6379 if docopt_arguments["--log"] is not None:
6480 if docopt_arguments["--verbose"]:
6581 log.start(logfile=docopt_arguments["--log"], logstdout=False, loglevel=log.DEBUG)
···738974907591def search(docopt_arguments, source_loader):
9292+ """
9393+ The function that facilitates the search for a specific compound.
9494+ :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
9595+ :param source_loader: An initiated SourceLoader object pointed at the directory with the sources.
9696+ """
7697 start_log(docopt_arguments)
7798 settings = scrapy_settings_manipulation(docopt_arguments)
7899 setup_crawler(docopt_arguments["<compound>"], settings, source_loader, docopt_arguments["--attributes"].split(','))
79100 reactor.run()
8010181102103103+# The start for the Fourmi Command Line interface.
82104if __name__ == '__main__':
8383- arguments = docopt.docopt(__doc__, version='Fourmi - V0.3.0')
105105+ arguments = docopt.docopt(__doc__, version='Fourmi - V0.4.0')
84106 loader = SourceLoader()
8510786108 if arguments["--include"]:
+16-3
sourceloader.py
···22import sys
33import os
44import re
55+56from FourmiCrawler.sources.source import Source
677889class SourceLoader:
910 sources = []
10111111- def __init__(self, rel_dir="FourmiCrawler\\sources"):
1212+ def __init__(self, rel_dir="FourmiCrawler/sources"):
12131314 if hasattr(sys,'frozen'):
1415 path = os.path.dirname(sys.executable)
1516 else:
1617 path = os.path.dirname(os.path.abspath(__file__))
17181818- path += "\\" + rel_dir
1919+ path += "/" + rel_dir
1920 known_parser = set()
20212122 for py in [f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py']:
2222- mod = __import__('.'.join([rel_dir.replace('\\', "."), py]), fromlist=[py])
2323+ mod = __import__('.'.join([rel_dir.replace('/', "."), py]), fromlist=[py])
2324 classes = [getattr(mod, x) for x in dir(mod) if inspect.isclass(getattr(mod, x))]
2425 for cls in classes:
2526 if issubclass(cls, Source) and cls not in known_parser:
···2728 # known_parser.add(cls)
28292930 def include(self, source_names):
3131+ """
3232+ This function excludes all sources that don't match the given regular expressions.
3333+ :param source_names: A list of regular expression (strings)
3434+ """
3035 new = set()
3136 for name in source_names:
3237 new.update([src for src in self.sources if re.match(name, src.__class__.__name__)])
3338 self.sources = list(new)
34393540 def exclude(self, source_names):
4141+ """
4242+ This function excludes all sources that match the given regular expressions.
4343+ :param source_names: A list of regular expression (strings)
4444+ """
3645 exclude = []
3746 for name in source_names:
3847 exclude.extend([src for src in self.sources if re.match(name, src.__class__.__name__)])
3948 self.sources = [src for src in self.sources if src not in exclude]
40494150 def __str__(self):
5151+ """
5252+ This function returns a string with all sources currently available in the SourceLoader.
5353+ :return: a string with all available sources.
5454+ """
4255 string = ""
4356 for src in self.sources:
4457 string += "Source: " + src.__class__.__name__