You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

221 lines
8.9 KiB

1 week ago
# 详解Python + Selenium 批量采集微信公众号搭建自己的微信公众号每日AI简报告别信息焦虑
# https://blog.csdn.net/k352733625/article/details/149222945
# 微信爬爬猫---公众号文章抓取代码分析
# https://blog.csdn.net/yajuanpi4899/article/details/121584268
1 week ago
"""
# 查看selenium版本
pip show selenium
4.34.2
1 week ago
1 week ago
# 查看Chrome浏览器版本
chrome://version/
138.0.7204.101 (正式版本) 64
# 下载驱动包
https://googlechromelabs.github.io/chrome-for-testing/
https://storage.googleapis.com/chrome-for-testing-public/138.0.7204.94/win64/chromedriver-win64.zip
"""
1 week ago
import asyncio
1 week ago
import datetime
1 week ago
import json
1 week ago
import logging
import random
import re
1 week ago
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service as ChromeService
1 week ago
import requests
1 week ago
from Util.PostgreSQLUtil import init_postgres_pool
1 week ago
from Util.WxGzhUtil import init_wechat_browser, get_article_content
1 week ago
# 删除重复的日志配置,只保留以下内容
1 week ago
logger = logging.getLogger('WxGzh')
1 week ago
logger.setLevel(logging.INFO)
1 week ago
# 确保只添加一个handler
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
1 week ago
1 week ago
1 week ago
async def get_wechat_sources():
"""从t_wechat_source表获取微信公众号列表"""
try:
pool = await init_postgres_pool()
async with pool.acquire() as conn:
1 week ago
rows = await conn.fetch('SELECT * FROM t_wechat_source')
1 week ago
return [dict(row) for row in rows]
finally:
await pool.close()
1 week ago
1 week ago
async def is_article_exist(pool, article_url):
"""检查文章URL是否已存在数据库中"""
try:
async with pool.acquire() as conn:
row = await conn.fetchrow('''
1 week ago
SELECT 1
FROM t_wechat_articles
WHERE url = $1 LIMIT 1
1 week ago
''', article_url)
1 week ago
return row is not None
except Exception as e:
logging.error(f"检查文章存在性失败: {e}")
return False # 出错时默认返回False避免影响正常流程
1 week ago
async def save_article_to_db(pool, article_title, account_name, article_url, publish_time, content, source_id):
1 week ago
# 先检查文章是否已存在
if await is_article_exist(pool, article_url):
1 week ago
logger.info(f"文章已存在,跳过保存: {article_url}")
1 week ago
return
1 week ago
# 准备在这里调用 lightrag进行知识库构建
# TODO
1 week ago
1 week ago
try:
async with pool.acquire() as conn:
await conn.execute('''
1 week ago
INSERT INTO t_wechat_articles
(title, source, url, publish_time, content, source_id)
VALUES ($1, $2, $3, $4, $5, $6)
''', article_title, account_name, article_url,
1 week ago
publish_time, content, source_id)
1 week ago
except Exception as e:
logging.error(f"保存文章失败: {e}")
1 week ago
1 week ago
if __name__ == '__main__':
1 week ago
# 从文件cookies.txt中获取
with open('cookies.txt', 'r', encoding='utf-8') as f:
content = f.read()
# 使用json还原为json对象
cookies = json.loads(content)
1 week ago
# 检查是否有过期时间
1 week ago
expiry = cookies["expiry"]
1 week ago
if expiry:
# 换算出过期时间
expiry_time = time.localtime(expiry)
expiry_date = time.strftime("%Y-%m-%d %H:%M:%S", expiry_time)
1 week ago
1 week ago
# 获取当前时间戳
current_timestamp = time.time()
# 检查是否已过期
if current_timestamp > expiry:
1 week ago
logger.error("Cookie已过期")
1 week ago
exit()
1 week ago
# 移除expiry属性
del cookies["expiry"]
1 week ago
logger.info(f"cookies的过期时间一般是4天cookies过期时间%s" % expiry_date)
1 week ago
options = Options()
1 week ago
options.add_argument('-headless') # 无头参数,调试时可以注释掉
1 week ago
# 设置headers
1 week ago
header = {
"HOST": "mp.weixin.qq.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36 QBCore/4.0.1301.400 QQBrowser/9.0.2524.400 Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2875.116 Safari/537.36 NetType/WIFI MicroMessenger/7.0.20.1781(0x6700143B) WindowsWechat(0x63010200)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.5;q=0.4",
"Connection": "keep-alive"
}
service = ChromeService(executable_path=r"C:\Windows\System32\chromedriver.exe")
1 week ago
# 使用统一的初始化方式
driver = init_wechat_browser()
1 week ago
# 方法使用requests库发送请求获取重定向URL
1 week ago
url = 'https://mp.weixin.qq.com'
response = requests.get(url=url, allow_redirects=False, cookies=cookies)
if 'Location' in response.headers:
redirect_url = response.headers.get("Location")
1 week ago
logger.info(f"重定向URL:%s" % redirect_url)
1 week ago
token_match = re.findall(r'token=(\d+)', redirect_url)
if token_match:
token = token_match[0]
1 week ago
logger.info(f"获取到的token:%s" % token)
1 week ago
article_urls = []
1 week ago
1 week ago
# 获取公众号列表
1 week ago
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
gzlist = loop.run_until_complete(get_wechat_sources())
finally:
loop.close()
1 week ago
1 week ago
# 爬取文章
1 week ago
for item in gzlist:
account_name = item["account_name"]
account_id = item["account_id"]
1 week ago
id = item["id"]
1 week ago
# 搜索微信公众号的接口地址
search_url = 'https://mp.weixin.qq.com/cgi-bin/searchbiz?'
# 搜索微信公众号接口需要传入的参数有三个变量微信公众号token、随机数random、搜索的微信公众号名字
query_id = {
'action': 'search_biz',
'token': token,
'lang': 'zh_CN',
'f': 'json',
'ajax': '1',
'random': random.random(),
'query': account_name,
'begin': '0',
'count': '5'
}
# 打开搜索微信公众号接口地址需要传入相关参数信息如cookies、params、headers
search_response = requests.get(search_url, cookies=cookies, headers=header, params=query_id)
# 取搜索结果中的第一个公众号
lists = search_response.json().get('list')[0]
# 获取这个公众号的fakeid后面爬取公众号文章需要此字段
fakeid = lists.get('fakeid')
logging.info("fakeid:" + fakeid)
# 微信公众号文章接口地址
appmsg_url = 'https://mp.weixin.qq.com/cgi-bin/appmsg?'
# 搜索文章需要传入几个参数登录的公众号token、要爬取文章的公众号fakeid、随机数random
query_id_data = {
'token': token,
'lang': 'zh_CN',
'f': 'json',
'ajax': '1',
'random': random.random(),
'action': 'list_ex',
'begin': '0', # 不同页此参数变化变化规则为每页加5
'count': '5',
'query': '',
'fakeid': fakeid,
'type': '9'
}
# 打开搜索的微信公众号文章列表页
query_fakeid_response = requests.get(appmsg_url, cookies=cookies, headers=header, params=query_id_data)
fakeid_list = query_fakeid_response.json().get('app_msg_list')
for item in fakeid_list:
1 week ago
article_url = item.get('link')
article_title = item.get('title')
1 week ago
publish_time = datetime.datetime.fromtimestamp(int(item.get("update_time")))
1 week ago
1 week ago
if '试卷' in article_title: # 过滤掉试卷,致知物理中有大量试卷,我做教育资讯的不关心试卷
1 week ago
continue
1 week ago
1 week ago
logger.info(f"正在处理文章: {article_title} ({publish_time})")
1 week ago
logger.info(f"正在获取文章: {article_title}内容...")
1 week ago
content = get_article_content(article_url)
1 week ago
logger.info(f"成功获取文章: {article_title}内容。")
1 week ago
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
pool = loop.run_until_complete(init_postgres_pool())
loop.run_until_complete(
1 week ago
save_article_to_db(pool, article_title, account_name, article_url, publish_time, content, id))
1 week ago
finally:
loop.run_until_complete(pool.close())
loop.close()
1 week ago
1 week ago
time.sleep(1)
# 关闭浏览器
driver.quit()