Neuer Ansatz für den Wettkampfsammler
Changes to be committed: - kleinere Korrekturen/Verbesserungen modified: homepage/redesign2018/markdownExperiment/Makefile modified: homepage/redesign2018/markdownExperiment/src/jsonSd/cwsvJudo.json - Neuer Ansatz für den Wettkampfsammler new file: wkOrg/src/wkScraper/scrapyDocAuthorSpider.py new file: wkOrg/src/wkScraper/scrapyDocQuoteSpider.py new file: wkOrg/src/wkScraper/scrapyJvsKalender.py
This commit is contained in:
26
wkOrg/src/wkScraper/scrapyDocAuthorSpider.py
Normal file
26
wkOrg/src/wkScraper/scrapyDocAuthorSpider.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import scrapy
|
||||
|
||||
|
||||
class AuthorSpider(scrapy.Spider):
|
||||
name = 'author'
|
||||
|
||||
start_urls = ['http://quotes.toscrape.com/']
|
||||
|
||||
def parse(self, response):
|
||||
# follow links to author pages
|
||||
for href in response.css('.author + a::attr(href)'):
|
||||
yield response.follow(href, self.parse_author)
|
||||
|
||||
# follow pagination links
|
||||
for href in response.css('li.next a::attr(href)'):
|
||||
yield response.follow(href, self.parse)
|
||||
|
||||
def parse_author(self, response):
|
||||
def extract_with_css(query):
|
||||
return response.css(query).extract_first().strip()
|
||||
|
||||
yield {
|
||||
'name': extract_with_css('h3.author-title::text'),
|
||||
'birthdate': extract_with_css('.author-born-date::text'),
|
||||
'bio': extract_with_css('.author-description::text'),
|
||||
}
|
||||
22
wkOrg/src/wkScraper/scrapyDocQuoteSpider.py
Normal file
22
wkOrg/src/wkScraper/scrapyDocQuoteSpider.py
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
import scrapy
|
||||
|
||||
|
||||
class QuotesSpider(scrapy.Spider):
|
||||
name = "quotes"
|
||||
start_urls = [
|
||||
'http://quotes.toscrape.com/tag/humor/',
|
||||
]
|
||||
|
||||
def parse(self, response):
|
||||
for quote in response.css('div.quote'):
|
||||
yield {
|
||||
'text': quote.css('span.text::text').extract_first(),
|
||||
'author': quote.xpath('span/small/text()').extract_first(),
|
||||
}
|
||||
|
||||
next_page = response.css('li.next a::attr("href")').extract_first()
|
||||
if next_page is not None:
|
||||
yield response.follow(next_page, self.parse)
|
||||
37
wkOrg/src/wkScraper/scrapyJvsKalender.py
Normal file
37
wkOrg/src/wkScraper/scrapyJvsKalender.py
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
import scrapy
|
||||
|
||||
|
||||
class QuotesSpider(scrapy.Spider):
|
||||
name = "quotes"
|
||||
start_urls = [
|
||||
# 'https://judoverbandsachsen.de/kalender/',
|
||||
"https://judoverbandsachsen.de/kalender/?term_id%5B%5D=48&altersklassen%5B%5D=m-U09&altersklassen%5B%5D=w-U09",
|
||||
]
|
||||
|
||||
def eventExtract( self, url, callbackHandler ):
|
||||
yield self.follow( url, callbackHandler )
|
||||
|
||||
def parseJvsKalenderEvent(self, response):
|
||||
def extractingJvsEvent(query):
|
||||
return "someStuff"
|
||||
|
||||
yield {
|
||||
'stuff': extractingJvsEvent("someSelector"),
|
||||
}
|
||||
|
||||
def parse(self, response):
|
||||
for post in response.css('div.posts'):
|
||||
url = post.css('div.col-4>a::attr(href)').extract_first()
|
||||
# details = yield response.follow( url, self.parseJvsKalenderEvent )
|
||||
|
||||
yield {
|
||||
'date': " ".join( post.css('div.col-2>time::text').extract_first().split() ),
|
||||
'name': " ".join( post.css('div.col-4>a::text').extract_first().split() ),
|
||||
'url': url,
|
||||
'ort' : " ".join( post.css('div.col-3::text').extract_first().split() ),
|
||||
}
|
||||
|
||||
yield response.follow( url, self.parseJvsKalenderEvent )
|
||||
Reference in New Issue
Block a user