-
-
Notifications
You must be signed in to change notification settings - Fork 293
/
dobelyuwai.py
82 lines (65 loc) · 2.84 KB
/
dobelyuwai.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# -*- coding: utf-8 -*-
import logging
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
class Dobelyuwai(Crawler):
has_mtl = True
base_url = "https://dobelyuwai.wordpress.com/"
def initialize(self) -> None:
self.cleaner.bad_text_regex.update(["Prev", "ToC", "Next"])
def read_novel_info(self):
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
possible_title = soup.select_one('meta[property="og:title"]')
assert possible_title, "No novel title"
self.novel_title = possible_title["content"]
logger.info("Novel title: %s", self.novel_title)
possible_novel_cover = soup.select_one('meta[property="og:image"]')
if possible_novel_cover:
self.novel_cover = self.absolute_url(possible_novel_cover["content"])
if "blank.jpg" in self.novel_cover:
self.novel_cover = None
logger.info("Novel cover: %s", self.novel_cover)
# try:
# self.novel_author = soup.select_one('div.entry-content > p:nth-child(2)').text.strip()
# except Exception as e:
# logger.warning('Failed to get novel auth. Error: %s', e)
# logger.info('%s', self.novel_author)
# Removes none TOC links from bottom of page.
toc_parts = soup.select_one("div.entry-content")
for notoc in toc_parts.select(
".sharedaddy, .inline-ad-slot, .code-block, script, .adsbygoogle"
):
notoc.extract()
# Extract volume-wise chapter entries
# Stops external links being selected as chapters
chapters = soup.select(
'div.entry-content a[href*="https://dobelyuwai.wordpress.com/2"]'
)
for a in chapters:
chap_id = len(self.chapters) + 1
vol_id = 1 + len(self.chapters) // 100
if len(self.volumes) < vol_id:
self.volumes.append({"id": vol_id})
self.chapters.append(
{
"id": chap_id,
"volume": vol_id,
"url": self.absolute_url(a["href"]),
"title": a.text.strip() or ("Chapter %d" % chap_id),
}
)
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
body_parts = soup.select_one("div.entry-content")
# Remoeves bad text from chapters.
# Fixes images, so they can be downloaded.
# all_imgs = soup.find_all('img')
# for img in all_imgs:
# if img.has_attr('data-orig-file'):
# src_url = img['src']
# parent = img.parent
# img.extract()
# new_tag = soup.new_tag("img", src=src_url)
# parent.append(new_tag)
return self.cleaner.extract_contents(body_parts)