From faaf98658f7a4f217868116dfb55fdeb484fa61d Mon Sep 17 00:00:00 2001 From: po9eakyfz <3055861661@qq.com> Date: Tue, 23 Apr 2024 19:07:07 +0800 Subject: [PATCH] ADD file via upload --- 2.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 2.py diff --git a/2.py b/2.py new file mode 100644 index 0000000..186eb8f --- /dev/null +++ b/2.py @@ -0,0 +1,77 @@ +import requests +from lxml import etree +import os +import csv +url = "https://www.xingyueboke.com/sudongpozhuan/" +headers={"User-Agent": +"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0"} + +def get_source(url): + response = requests.get(url, headers=headers) + if response.status_code == 200: + response.encoding = "utf-8" + return response.text + # return response.content.decode('utf8') + else: + print("请求失败,状态码为{}".format(response.status_code)) + return "" +def get_page_source(url): + response = requests.get(url) + if response.status_code == 200: + response.encoding = "utf-8" + return response.text + else: + return None + +chapter_url = "https://www.xingyueboke.com/sudongpozhuan/85210.html" +chapter_source = get_page_source(chapter_url) +print(chapter_source) +source = get_source(url) +print(source) + +def get_chapter_urls(start_source): + selector = etree.HTML(start_source) + urls = selector.xpath('//div[@class="book-list clearfix"]/ul/li/a/@href') + rights_urls = [] + for url in urls: + rights_urls.append(url) + return rights_urls + +url1 = get_chapter_urls(source) +# print(url1) + +def get_article(article_html): + selectors = etree.HTML(article_html) + title = selectors.xpath('//h1/text()')[0] + content = selectors.xpath('string(//div[@id="nr1"]/div)') + return title,content + +def save(title,content): + a= "苏东坡传/"+title +".txt" + b = os.path.dirname(a) + if not os.path.exists(b): + os.makedirs(b) + with open(a,"a+",encoding='utf-8') as f: + f.write(content) + +def saveCsv(articles_list): + a = "苏东坡传/苏东坡传.csv" + if not os.path.exists("苏东坡传"): + os.makedirs("苏东坡传") + with open(a, "w", encoding="utf-8",newline='') as f: + w = csv.writer(f) + for article in articles_list: + w.writerow(article) +articles_list =[] + + +for chapter_url in url1: + article_html = get_source(chapter_url) + if article_html: + title, content = get_article(article_html) + print(title) + # print(content) + save(title, content) + articles_list.append([chapter_url, title, len(content)]) +saveCsv(articles_list) \ No newline at end of file