2019-11-08 03:27:23 +00:00
|
|
|
|
#!/usr/bin/env python
|
|
|
|
|
# coding: utf-8
|
|
|
|
|
|
2024-07-03 09:41:44 +00:00
|
|
|
|
from random import randint
|
2021-02-27 15:01:45 +00:00
|
|
|
|
import os, re
|
2019-11-08 03:27:23 +00:00
|
|
|
|
import requests
|
|
|
|
|
from bs4 import BeautifulSoup, Comment
|
|
|
|
|
from .tomd import Tomd
|
2024-07-03 09:41:44 +00:00
|
|
|
|
import requests
|
|
|
|
|
import json
|
|
|
|
|
import re
|
|
|
|
|
import cloudscraper
|
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def replace_chinese(text, old_chinese, new_chinese):
|
|
|
|
|
# 使用正则表达式匹配中文字符
|
|
|
|
|
pattern = re.compile(re.escape(old_chinese), re.IGNORECASE)
|
|
|
|
|
return pattern.sub(new_chinese, text)
|
2024-11-16 19:09:14 +00:00
|
|
|
|
def replace_chinese(text, old_chinese, new_chinese):
|
|
|
|
|
# Ensure old_chinese is a string
|
|
|
|
|
if not isinstance(old_chinese, str):
|
|
|
|
|
raise ValueError("old_chinese must be a string")
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# 使用正则表达式匹配中文字符
|
|
|
|
|
pattern = re.compile(re.escape(old_chinese), re.IGNORECASE)
|
|
|
|
|
return pattern.sub(new_chinese, text)
|
|
|
|
|
except re.error as e:
|
|
|
|
|
print(f"Regex error: {e}")
|
|
|
|
|
return text
|
2019-11-08 03:27:23 +00:00
|
|
|
|
def result_file(folder_username, file_name, folder_name):
|
|
|
|
|
folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", folder_name, folder_username)
|
|
|
|
|
if not os.path.exists(folder):
|
2019-11-08 03:57:43 +00:00
|
|
|
|
try:
|
|
|
|
|
os.makedirs(folder)
|
|
|
|
|
except Exception:
|
|
|
|
|
pass
|
2019-11-08 03:27:23 +00:00
|
|
|
|
path = os.path.join(folder, file_name)
|
|
|
|
|
file = open(path,"w")
|
|
|
|
|
file.close()
|
|
|
|
|
else:
|
|
|
|
|
path = os.path.join(folder, file_name)
|
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
2019-11-08 03:57:43 +00:00
|
|
|
|
def get_headers(cookie_path:str):
|
|
|
|
|
cookies = {}
|
|
|
|
|
with open(cookie_path, "r", encoding="utf-8") as f:
|
|
|
|
|
cookie_list = f.readlines()
|
|
|
|
|
for line in cookie_list:
|
|
|
|
|
cookie = line.split(":")
|
|
|
|
|
cookies[cookie[0]] = str(cookie[1]).strip()
|
|
|
|
|
return cookies
|
|
|
|
|
|
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
def delete_ele(soup:BeautifulSoup, tags:list):
|
|
|
|
|
for ele in tags:
|
|
|
|
|
for useless_tag in soup.select(ele):
|
|
|
|
|
useless_tag.decompose()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def delete_ele_attr(soup:BeautifulSoup, attrs:list):
|
|
|
|
|
for attr in attrs:
|
|
|
|
|
for useless_attr in soup.find_all():
|
|
|
|
|
del useless_attr[attr]
|
|
|
|
|
|
|
|
|
|
|
2024-11-16 19:09:14 +00:00
|
|
|
|
"""
|
|
|
|
|
删除 BeautifulSoup 对象中所有空白文本的元素,除了指定的例外元素。
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
soup (BeautifulSoup): 要处理的 BeautifulSoup 对象。
|
|
|
|
|
eles_except (list): 一个包含不应删除的元素名称的列表。
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
无返回值。直接在原 BeautifulSoup 对象上进行修改。
|
|
|
|
|
"""
|
2019-11-08 03:27:23 +00:00
|
|
|
|
def delete_blank_ele(soup:BeautifulSoup, eles_except:list):
|
|
|
|
|
for useless_attr in soup.find_all():
|
|
|
|
|
try:
|
|
|
|
|
if useless_attr.name not in eles_except and useless_attr.text == "":
|
|
|
|
|
useless_attr.decompose()
|
|
|
|
|
except Exception:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class CSDN(object):
|
2019-11-08 03:57:43 +00:00
|
|
|
|
def __init__(self, username, folder_name, cookie_path):
|
|
|
|
|
self.headers = get_headers(cookie_path)
|
|
|
|
|
self.s = requests.Session()
|
2019-11-08 03:27:23 +00:00
|
|
|
|
self.username = username
|
2021-02-27 15:01:45 +00:00
|
|
|
|
self.TaskQueue = list()
|
2019-11-08 03:27:23 +00:00
|
|
|
|
self.folder_name = folder_name
|
|
|
|
|
self.url_num = 1
|
2024-08-28 07:23:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def start2(self):
|
|
|
|
|
num = 0
|
|
|
|
|
articles = [None]
|
|
|
|
|
while len(articles) > 0:
|
|
|
|
|
num += 1
|
2024-11-16 19:09:14 +00:00
|
|
|
|
url = u'https://lanceli.blog.csdn.net/?type=blog'
|
2024-08-28 07:23:08 +00:00
|
|
|
|
print(url)
|
|
|
|
|
# url = u'https://' + self.username + u'.blog.csdn.net/article/list/' + str(num)
|
|
|
|
|
scraper = cloudscraper.create_scraper() # returns a CloudScraper instance
|
|
|
|
|
response = scraper.get(url,headers=self.headers)
|
2019-11-08 03:27:23 +00:00
|
|
|
|
|
2024-08-28 07:23:08 +00:00
|
|
|
|
# response = self.s.get(url=url, headers=self.headers)
|
|
|
|
|
html = response.text
|
|
|
|
|
soup = BeautifulSoup(html, "html.parser")
|
2024-11-16 19:09:14 +00:00
|
|
|
|
articles = soup.find_all('article', attrs={"class":"blog-list-box"})
|
2024-08-28 07:23:08 +00:00
|
|
|
|
for article in articles:
|
2024-11-16 19:09:14 +00:00
|
|
|
|
article_title = article('h4')[0].text
|
|
|
|
|
article_href = article('a')[0]['href']
|
2024-08-28 07:23:08 +00:00
|
|
|
|
self.TaskQueue.append((article_title, article_href))
|
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
def start(self):
|
|
|
|
|
num = 0
|
|
|
|
|
articles = [None]
|
|
|
|
|
while len(articles) > 0:
|
2024-08-28 07:23:08 +00:00
|
|
|
|
num += 1
|
2019-11-08 03:27:23 +00:00
|
|
|
|
url = u'https://blog.csdn.net/' + self.username + '/article/list/' + str(num)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
scraper = cloudscraper.create_scraper() # returns a CloudScraper instance
|
2024-08-28 07:23:08 +00:00
|
|
|
|
response = scraper.get(url,headers=self.headers)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
|
|
|
|
# response = self.s.get(url=url, headers=self.headers)
|
2019-11-08 03:27:23 +00:00
|
|
|
|
html = response.text
|
|
|
|
|
soup = BeautifulSoup(html, "html.parser")
|
|
|
|
|
articles = soup.find_all('div', attrs={"class":"article-item-box csdn-tracking-statistics"})
|
|
|
|
|
for article in articles:
|
|
|
|
|
article_title = article.a.text.strip().replace(' ',':')
|
|
|
|
|
article_href = article.a['href']
|
2021-02-27 15:01:45 +00:00
|
|
|
|
self.TaskQueue.append((article_title, article_href))
|
2019-11-08 03:27:23 +00:00
|
|
|
|
|
|
|
|
|
def get_md(self, url):
|
2024-07-03 09:41:44 +00:00
|
|
|
|
scraper = cloudscraper.create_scraper() # returns a CloudScraper instance
|
|
|
|
|
|
|
|
|
|
# response = self.s.get(url=url, headers=self.headers)
|
2024-08-28 07:23:08 +00:00
|
|
|
|
html = scraper.get(url,headers=self.headers).text
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
soup = BeautifulSoup(html, 'lxml')
|
2021-02-27 15:01:45 +00:00
|
|
|
|
content = soup.select_one("#mainBox > main > div.blog-content-box")
|
2024-07-03 09:41:44 +00:00
|
|
|
|
if(content == None):
|
2024-07-10 00:11:19 +00:00
|
|
|
|
return "",soup.title.text
|
2019-11-08 03:27:23 +00:00
|
|
|
|
# 删除注释
|
|
|
|
|
for useless_tag in content(text=lambda text: isinstance(text, Comment)):
|
|
|
|
|
useless_tag.extract()
|
|
|
|
|
# 删除无用标签
|
|
|
|
|
tags = ["svg", "ul", ".hljs-button.signin"]
|
|
|
|
|
delete_ele(content, tags)
|
|
|
|
|
# 删除标签属性
|
|
|
|
|
attrs = ["class", "name", "id", "onclick", "style", "data-token", "rel"]
|
|
|
|
|
delete_ele_attr(content,attrs)
|
|
|
|
|
# 删除空白标签
|
2024-08-28 07:23:08 +00:00
|
|
|
|
# eles_except = [ "br", "hr"]
|
|
|
|
|
# delete_blank_ele(content, eles_except)
|
2019-11-08 03:27:23 +00:00
|
|
|
|
# 转换为markdown
|
2024-07-10 00:11:19 +00:00
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
md = Tomd(str(content)).markdown
|
2024-08-28 07:23:08 +00:00
|
|
|
|
file = open('test.html','w+',encoding='utf-8')
|
|
|
|
|
file.write(str(md))
|
|
|
|
|
file.close()
|
2024-07-10 00:11:19 +00:00
|
|
|
|
return md,soup.title.text
|
2019-11-08 03:27:23 +00:00
|
|
|
|
|
|
|
|
|
def write_readme(self):
|
|
|
|
|
print("+"*100)
|
|
|
|
|
print("[++] 开始爬取 {} 的博文 ......".format(self.username))
|
|
|
|
|
print("+"*100)
|
|
|
|
|
reademe_path = result_file(self.username,file_name="README.md",folder_name=self.folder_name)
|
|
|
|
|
with open(reademe_path,'w', encoding='utf-8') as reademe_file:
|
|
|
|
|
readme_head = "# " + self.username + " 的博文\n"
|
|
|
|
|
reademe_file.write(readme_head)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
2021-02-27 15:01:45 +00:00
|
|
|
|
self.TaskQueue.reverse()
|
|
|
|
|
for (article_title,article_href) in self.TaskQueue:
|
2019-11-08 03:27:23 +00:00
|
|
|
|
text = str(self.url_num) + '. [' + article_title + ']('+ article_href +')\n'
|
|
|
|
|
reademe_file.write(text)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
self.url_num += 1
|
|
|
|
|
self.url_num = 1
|
|
|
|
|
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
def get_all_articles(self):
|
2021-02-27 15:01:45 +00:00
|
|
|
|
while len(self.TaskQueue) > 0:
|
|
|
|
|
(article_title,article_href) = self.TaskQueue.pop()
|
2024-07-03 09:41:44 +00:00
|
|
|
|
time.sleep(randint(10, 25))
|
|
|
|
|
|
2021-02-27 15:01:45 +00:00
|
|
|
|
file_name = re.sub(r'[\/::*?"<>|\n]','-', article_title) + ".md"
|
|
|
|
|
artical_path = result_file(folder_username=self.username, file_name=file_name, folder_name=self.folder_name)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
article_title = article_title.replace('\n',' ')
|
|
|
|
|
article_title = article_title.replace('"',' ')
|
|
|
|
|
article_title = article_title.replace('\'',' ')
|
|
|
|
|
article_title = article_title.replace('\r',' ')
|
|
|
|
|
article_title = article_title.replace('\t',' ')
|
2021-02-27 15:01:45 +00:00
|
|
|
|
|
|
|
|
|
md_head = "# " + article_title + "\n"
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
2024-08-28 07:23:08 +00:00
|
|
|
|
md,article = self.get_md(article_href)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
while md == "":
|
|
|
|
|
time.sleep(randint(5, 25))
|
2024-08-28 07:23:08 +00:00
|
|
|
|
md,article = self.get_md(article_href)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
2024-08-28 07:23:08 +00:00
|
|
|
|
md = '[引用自](www.csdn.net)\r\n ' + md_head + md
|
2021-02-27 15:01:45 +00:00
|
|
|
|
print("[++++] 正在处理URL:{}".format(article_href))
|
2024-07-03 09:41:44 +00:00
|
|
|
|
# https://www.testingcloud.club/sapi/api/article_tree
|
2021-02-27 15:01:45 +00:00
|
|
|
|
with open(artical_path, "w", encoding="utf-8") as artical_file:
|
|
|
|
|
artical_file.write(md)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
requests.put("https://www.testingcloud.club/sapi/api/article_tree",json.dumps({
|
|
|
|
|
"title": (article_title),
|
|
|
|
|
"content":(md),
|
|
|
|
|
"spend_time":1,
|
|
|
|
|
"father":2500,
|
|
|
|
|
"level":1,
|
|
|
|
|
"author":"sds",
|
|
|
|
|
"is_public":1,
|
|
|
|
|
"author":"admin"
|
|
|
|
|
}))
|
|
|
|
|
md = md
|
2021-02-27 15:01:45 +00:00
|
|
|
|
self.url_num += 1
|
|
|
|
|
|
2019-11-08 03:27:23 +00:00
|
|
|
|
|
2021-02-27 15:01:45 +00:00
|
|
|
|
|
2024-11-16 19:09:14 +00:00
|
|
|
|
"""
|
|
|
|
|
spider函数用于启动CSDN博客爬虫。
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
username (str): CSDN用户名。
|
|
|
|
|
cookie_path (str): 存储CSDN登录cookie的文件路径。
|
|
|
|
|
folder_name (str, optional): 保存爬取文章的文件夹名称,默认为"blog"。
|
|
|
|
|
|
|
|
|
|
功能:
|
|
|
|
|
1. 检查并创建用于保存文章的文件夹。
|
|
|
|
|
2. 初始化CSDN爬虫对象。
|
|
|
|
|
3. 启动爬虫并开始爬取文章。
|
|
|
|
|
4. 写入README文件。
|
|
|
|
|
5. 获取所有文章信息。
|
|
|
|
|
"""
|
2021-02-27 15:01:45 +00:00
|
|
|
|
def spider(username: str, cookie_path:str, folder_name: str = "blog"):
|
2019-11-08 03:27:23 +00:00
|
|
|
|
if not os.path.exists(folder_name):
|
|
|
|
|
os.makedirs(folder_name)
|
2019-11-08 03:57:43 +00:00
|
|
|
|
csdn = CSDN(username, folder_name, cookie_path)
|
2019-11-08 03:27:23 +00:00
|
|
|
|
csdn.start()
|
2021-02-27 15:01:45 +00:00
|
|
|
|
csdn.write_readme()
|
|
|
|
|
csdn.get_all_articles()
|
2019-11-08 03:27:23 +00:00
|
|
|
|
|
|
|
|
|
|
2024-07-03 09:41:44 +00:00
|
|
|
|
def onearticle(href: str,cookie:str,folder_name: str = "blog"):
|
|
|
|
|
if not os.path.exists(folder_name):
|
|
|
|
|
os.makedirs(folder_name)
|
|
|
|
|
csdn = CSDN('username', folder_name, cookie)
|
2024-07-10 00:11:19 +00:00
|
|
|
|
md,title = csdn.get_md(href)
|
2024-07-03 09:41:44 +00:00
|
|
|
|
while md == "":
|
|
|
|
|
time.sleep(randint(5, 25))
|
|
|
|
|
md = csdn.get_md(href)
|
|
|
|
|
|
|
|
|
|
print("[++++] 正在处理URL:{}".format(href))
|
|
|
|
|
# https://www.testingcloud.club/sapi/api/article_tree
|
2024-07-10 00:11:19 +00:00
|
|
|
|
|
|
|
|
|
requests.put("https://www.testingcloud.club/sapi/api/article_tree",json.dumps({
|
|
|
|
|
"title": (title),
|
|
|
|
|
"content":(md),
|
|
|
|
|
"spend_time":1,
|
|
|
|
|
"father":2500,
|
|
|
|
|
"level":1,
|
|
|
|
|
"author":"sds",
|
|
|
|
|
"is_public":1,
|
|
|
|
|
"author":"admin"
|
|
|
|
|
}))
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|
2024-11-16 19:09:14 +00:00
|
|
|
|
|
|
|
|
|
def onearticlewith(href: str,cookie:str,father: int = 2500,folder_name: str = "blog"):
|
|
|
|
|
if not os.path.exists(folder_name):
|
|
|
|
|
os.makedirs(folder_name)
|
|
|
|
|
csdn = CSDN('username', folder_name, cookie)
|
|
|
|
|
md,title = csdn.get_md(href)
|
|
|
|
|
while md == "":
|
|
|
|
|
time.sleep(randint(5, 25))
|
|
|
|
|
md = csdn.get_md(href)
|
|
|
|
|
|
|
|
|
|
print("[++++] 正在处理URL:{}".format(href))
|
|
|
|
|
# https://www.testingcloud.club/sapi/api/article_tree
|
|
|
|
|
|
|
|
|
|
requests.put("https://www.testingcloud.club/sapi/api/article_tree",json.dumps({
|
|
|
|
|
"title": (title),
|
|
|
|
|
"content":(md),
|
|
|
|
|
"spend_time":1,
|
|
|
|
|
"father":father,
|
|
|
|
|
"level":1,
|
|
|
|
|
"author":"sds",
|
|
|
|
|
"is_public":1,
|
|
|
|
|
"author":"admin"
|
|
|
|
|
}))
|
2024-07-03 09:41:44 +00:00
|
|
|
|
|