Neuer Ansatz für den Wettkampfsammler

Changes to be committed:
- kleinere Korrekturen/Verbesserungen
	modified:   homepage/redesign2018/markdownExperiment/Makefile
	modified:   homepage/redesign2018/markdownExperiment/src/jsonSd/cwsvJudo.json
- Neuer Ansatz für den Wettkampfsammler
	new file:   wkOrg/src/wkScraper/scrapyDocAuthorSpider.py
	new file:   wkOrg/src/wkScraper/scrapyDocQuoteSpider.py
	new file:   wkOrg/src/wkScraper/scrapyJvsKalender.py
This commit is contained in:
marko
2018-09-20 14:38:52 +02:00
parent 3ef14f9998
commit 8af477fca8
5 changed files with 90 additions and 5 deletions

View File

@@ -172,14 +172,14 @@ build/graphiken/cwsvLogoWappen/cwsvLogoWappen.png: graphiken/cwsvLogoWappen.xcf
build/graphiken/cwsvJudoLogoWappen/cwsvJudoLogoWappen.%w.png: build/graphiken/cwsvJudoLogoWappen/cwsvJudoLogoWappen.png
mkdir -p $(dir $@)
convert -layers merge -background transparent -resize $*x $< $@
zopflipng --iterations=500 --filters=01234mepb --lossy_8bit --lossy_transparent $@ $@
# zopflipng -my $@ $@
pngquant --speed 1 --ext .png --force $@
zopflipng -y --iterations=500 --filters=01234mepb --lossy_8bit --lossy_transparent $@ $@
build/graphiken/cwsvLogoWappen/cwsvLogoWappen.%w.png: build/graphiken/cwsvLogoWappen/cwsvLogoWappen.png
mkdir -p $(dir $@)
convert -layers flatten -background transparent -resize $*x $< $@
zopflipng --iterations=500 --filters=01234mepb --lossy_8bit --lossy_transparent $@ $@
# zopflipng -my $@ $@
pngquant --speed 1 --ext .png --force $@
zopflipng -y --iterations=500 --filters=01234mepb --lossy_8bit --lossy_transparent $@ $@
.PHONY: favIcons

View File

@@ -3,7 +3,7 @@
"@type" : "SportsClub",
"name": "Chemnitzer Freizeit- und Wohngebietssportverein - Abteilung Judo",
"url": "http://cwsvjudo.bplaced.net",
"image": "http://cwsvjudo.bplaced.net/ressourcen/graphiken/logos/cwsvJudoLogo.x512.png",
"image": "http://cwsvjudo.bplaced.net/ressourcen/graphiken/logos/cwsvJudoLogoWappen.512w.png",
"email": "cwsv.sb@web.de",
"telephone": "+493712823370",
"sameAs": [

View File

@@ -0,0 +1,26 @@
import scrapy
class AuthorSpider(scrapy.Spider):
name = 'author'
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
# follow links to author pages
for href in response.css('.author + a::attr(href)'):
yield response.follow(href, self.parse_author)
# follow pagination links
for href in response.css('li.next a::attr(href)'):
yield response.follow(href, self.parse)
def parse_author(self, response):
def extract_with_css(query):
return response.css(query).extract_first().strip()
yield {
'name': extract_with_css('h3.author-title::text'),
'birthdate': extract_with_css('.author-born-date::text'),
'bio': extract_with_css('.author-description::text'),
}

View File

@@ -0,0 +1,22 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'http://quotes.toscrape.com/tag/humor/',
]
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').extract_first(),
'author': quote.xpath('span/small/text()').extract_first(),
}
next_page = response.css('li.next a::attr("href")').extract_first()
if next_page is not None:
yield response.follow(next_page, self.parse)

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
# 'https://judoverbandsachsen.de/kalender/',
"https://judoverbandsachsen.de/kalender/?term_id%5B%5D=48&altersklassen%5B%5D=m-U09&altersklassen%5B%5D=w-U09",
]
def eventExtract( self, url, callbackHandler ):
yield self.follow( url, callbackHandler )
def parseJvsKalenderEvent(self, response):
def extractingJvsEvent(query):
return "someStuff"
yield {
'stuff': extractingJvsEvent("someSelector"),
}
def parse(self, response):
for post in response.css('div.posts'):
url = post.css('div.col-4>a::attr(href)').extract_first()
# details = yield response.follow( url, self.parseJvsKalenderEvent )
yield {
'date': " ".join( post.css('div.col-2>time::text').extract_first().split() ),
'name': " ".join( post.css('div.col-4>a::text').extract_first().split() ),
'url': url,
'ort' : " ".join( post.css('div.col-3::text').extract_first().split() ),
}
yield response.follow( url, self.parseJvsKalenderEvent )