|
'''
|
|
作者id author_id; 作者url author_url;
|
|
发布时间 text_time; 内容 text_content; 微博url text_url; 微博/评论 text_type;
|
|
点赞数 like_num; 评论数 comment_num;
|
|
'''
|
|
|
|
from selenium import webdriver
|
|
import pandas as pd
|
|
import selenium.webdriver.support.ui as ui
|
|
from selenium.webdriver.common.by import By
|
|
import datetime as dt
|
|
import time
|
|
from tqdm import tqdm
|
|
import re
|
|
import csv
|
|
|
|
def login():
|
|
driver.get('https://weibo.com/login.php')
|
|
driver.maximize_window()
|
|
time.sleep(3)
|
|
title = driver.title
|
|
print(title)
|
|
while (title != "微博 – 随时随地发现新鲜事"):
|
|
time.sleep(1)
|
|
title = driver.title
|
|
print(title)
|
|
time.sleep(1)
|
|
|
|
def get_url(_key, begin, end):
|
|
url_list = []
|
|
t0 = "https://s.weibo.com/weibo?q=" + _key + "&xsort=hot&suball=1×cope=custom:"
|
|
be = dt.datetime.strptime(str(begin), '%Y/%m/%d')
|
|
en = dt.datetime.strptime(str(end), '%Y/%m/%d')
|
|
delta = dt.timedelta(days=1)
|
|
i = be + delta
|
|
while (i < en):
|
|
for j in range(0, 24):
|
|
t = t0 + i.strftime('%Y-%m-%d') + '-' + str(j) + ":"
|
|
t = t + i.strftime('%Y-%m-%d') + '-' + str(j+1) + "&page="
|
|
url_list.append(t)
|
|
i += delta
|
|
return url_list
|
|
|
|
|
|
|
|
driver = webdriver.Chrome()
|
|
login()
|
|
|
|
pd_data = pd.read_csv("data/data_case.csv", encoding="gbk")
|
|
|
|
for name, key1, key2, key3, begin, end, fbegin, fend, csvname, have_reptiled in pd_data.values:
|
|
if (have_reptiled<=1):
|
|
continue
|
|
url_list = []
|
|
time.sleep(3)
|
|
if (str(key1)!="nan"):
|
|
url_list += get_url(str(key1), begin, end)
|
|
if (str(key2)!="nan"):
|
|
url_list += get_url(str(key2), begin, end)
|
|
if (str(key3)!="nan"):
|
|
url_list += get_url(str(key3), begin, end)
|
|
print(url_list)
|
|
|
|
k = 0
|
|
author_id = []
|
|
text_time = []
|
|
fp = open(str(csvname), 'w', encoding='utf-8')
|
|
fp.writelines('author_id,author_url,text_time,text_content,text_url,text_type,like_num,comment_num\n')
|
|
for u in url_list:
|
|
err = 0
|
|
for p in range(1, 51):
|
|
driver.get(u + str(p))
|
|
time.sleep(1)
|
|
div_list = driver.find_elements(By.XPATH, "/html/body/div[1]/div[2]/div/div[2]/div[1]/div[2]/div")
|
|
|
|
if (len(div_list)==0):
|
|
print("Something Error")
|
|
err = err + 1
|
|
if (err>=2):
|
|
break
|
|
else:
|
|
continue
|
|
|
|
|
|
for div in div_list:
|
|
print("No. ", k)
|
|
now = div.find_element(By.XPATH, "./div/div[1]/div[2]/div/div[2]/a")
|
|
aid = now.text
|
|
aurl = now.get_attribute('href')
|
|
print("author_id: ", aid)
|
|
print("author_url: ", aurl)
|
|
now = div.find_element(By.XPATH, "./div/div[1]/div[2]/div[2]/a")
|
|
|
|
_time = now.text
|
|
turl = now.get_attribute('href')
|
|
print("text_time: ", _time)
|
|
print("text_url: ", turl)
|
|
if (aid in author_id and _time in text_time):
|
|
print("Have Reptiled!")
|
|
continue
|
|
now = div.find_element(By.XPATH, "./div/div[1]/div[2]/p")
|
|
content = now.text
|
|
content = content.replace('\n', '').replace(',', ',')
|
|
print("text_content: ", content)
|
|
comment = div.find_element(By.XPATH, "./div/div[2]/ul/li[2]/a").text
|
|
like = div.find_element(By.XPATH, "./div/div[2]/ul/li[3]/a").text
|
|
print("comment_num: ", comment)
|
|
print("like_num: ", like)
|
|
|
|
author_id.append(aid)
|
|
text_time.append(_time)
|
|
k = k + 1
|
|
fp.writelines(aid + "," + aurl + "," + _time + "," + content + "," + turl + ",1," + like + "," + comment + "\n")
|
|
request.get("https://weibo.com/ajax/statuses/mymblog?uid=6723451248&page=2&feature=0&since_id=4963035557659776kp2") |