File size: 4,031 Bytes
035b9a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
'''

作者id author_id; 作者url author_url;

发布时间 text_time; 内容 text_content; 微博url text_url; 微博/评论 text_type;

点赞数 like_num; 评论数 comment_num;

'''
from selenium import webdriver
import pandas as pd
import selenium.webdriver.support.ui as ui
import time
import re
import os


# 模拟登录微博
def login():
    driver.get('https://weibo.com/login.php')
    driver.maximize_window()
    time.sleep(3)
    title = driver.title
    print(title)
    while (title != "微博 – 随时随地发现新鲜事"):
        time.sleep(1)
        title = driver.title
        print(title)
    time.sleep(1)


# 打开Chorme并登录微博
driver = webdriver.Chrome()
wait = ui.WebDriverWait(driver, 10)
kk = re.compile(r'\d+')
login()


for i in range(28):
    # 导入weibo数据
    csv_name = "weibo_case" + str(i) + ".csv"
    print(csv_name)
    if (not os.path.exists(csv_name)):
        continue
    pd_data = pd.read_csv(csv_name, encoding="utf-8")
    urls = pd_data["text_url"].tolist()
    comments = pd_data["comment_num"].tolist()
    Num = len(urls) + 1  # 记录爬取条数
    fp = open(csv_name, 'a', encoding='utf-8')

    for k in range(len(urls)):
        print(k, "_of_", len(urls))
        time.sleep(1)
        # 评论数量不足则不爬
        if (comments[k]=="评论"):
            continue
        #if (int(comments[k])<100):
            #continue
        url_name = urls[k]
        driver.get(url_name)
        time.sleep(2)
        # 开始爬虫
        author_id = []
        text_comment = []
        no_fresh = 0
        for i in range(2000):
            # 当前窗口显示的所有评论的div
            div_list = driver.find_elements_by_xpath('//*[@id="scroller"]/div[1]/div')
            no_fresh += 1
            for div in div_list:
                _time = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[2]/div[1]').text  # 爬取时间
                name = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/a[1]').text  # 爬取发表人id
                aurl = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/a[1]').get_attribute('href') # 爬取发表人url
                comment = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/span').text  # 爬取微博内容
                comment = comment.replace(',', ',')
                if ((name in author_id) and (comment in text_comment) or (len(name)<=1)): # 去重
                    #print("Have Reptiled!")
                    continue

                ele = div.find_elements_by_xpath('./div/div/div/div[1]/div[2]/div[2]/div[2]/div[4]/button/span[2]') # 爬取点赞数量
                if (len(ele) == 1):
                    like = ele[0].text
                else:
                    like = 0

                ele = div.find_elements_by_xpath('./div/div/div/div[2]/div/div/div/span') # 爬取评论数量
                reply = 0
                if (len(ele) == 1):
                    x = re.findall(kk, ele[0].text)  # 正则表达式定位数字
                    if (len(x) == 1):
                        reply = int(x[0])

                print("No. ", Num, "(", k, "_of_", len(urls), ")")
                print("Time:", _time)
                print("Name:", name)
                print("Comment:", comment)
                print("Like:", like)
                print("Reply:", reply)

                # 爬取完毕,添加到数据列表当中
                text_comment.append(comment)
                author_id.append(name)
                Num += 1
                no_fresh = 0
                fp.writelines(name + "," + aurl + "," + _time + "," + comment + ",,2," + str(like) + "," + str(reply) + "\n")

            if (no_fresh>=5):
                break
            else:
                driver.execute_script("window.scrollBy(0,500)")  # 往下滑动更新页面显示的微博
                time.sleep(2)