一、douban豆瓣自动回贴,使用的是urllib,以带上cookie。
二、另外还有一种是通过sess = requests.session(),具体可以看:https://blog.csdn.net/sxf1061700625/article/details/94128278
# https://blog.csdn.net/qq_31235811/article/details/88652122
#https://blog.csdn.net/weixin_37598106/article/details/72852220
#http://www.py3study.com/Article/details/id/354.html
#https://www.jianshu.com/p/a8d94c92249e
import random
import time
from urllib import request,parse
from http import cookiejar
cookie = cookiejar.CookieJar()
handler = request.HTTPCookieProcessor(cookie)
opener = request.build_opener(handler)
from bs4 import BeautifulSoup
base_url = [r'https://www.douban.com/group/topic/167669608/',
r'https://www.douban.com/group/topic/167911345/'
]
pars = ["爱教了。 ",
"非常感谢,对我帮助太大了。 ",
"我们私聊吧",
"对的,也这么觉得!"]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
}
def login():
url ='https://accounts.douban.com/j/mobile/login/basic'
data = {
'ck': '',
'name': '135xxxxxxxx',
'password': 'xxxxxxxx',
'remember': 'on',
'ticket': '',
}
#把data信息编写为机byte形式
data = parse.urlencode(data).encode()
req = request.Request(url,data=data,headers=headers,method = 'POST')
rsp = opener.open(req)
print(rsp.read())
def post_comments(url):
print("开始获取ck..........")
req = request.Request(url, headers=headers)
html_ck = opener.open(req)
soup = BeautifulSoup(html_ck.read(), 'lxml')
ck = soup.find_all('tbody')[0].find_all("a")[-1]['href'][-4:]
print("已经成功获取ck: ", ck, )
print("开始发贴..........")
one_par = pars[random.randint(0, len(pars)-1)]
data = {
'ck': ck,
# 'ref_cid': '1692237050',
'rv_comment': one_par,
'start': 0,
'submit_btn': '发送',
}
data = parse.urlencode(data).encode()
url=url + "add_comment"
req = request.Request(url, data=data,headers=headers,method = 'POST')
rsp = opener.open(req)
html = rsp.read().decode()
print("Post successfully!")
if __name__=='__main__':
login()
for url in base_url:
post_comments(url)
time.sleep(5)