爬虫库
使用简单的requests库,这是一个阻塞的库,速度比较慢。 解析使用XPATH表达式 总体采用类的形式
多线程
使用concurrent.future并发模块,建立线程池,把future对象扔进去执行即可实现并发爬取效果
数据存储
使用Python ORM sqlalchemy保存到数据库,也可以使用自带的csv模块存在CSV中。
API接口
因为API接口存在数据保护情况,一个电影的每一个分类只能抓取前25页,全部评论、好评、中评、差评所有分类能爬100页,每页有20个数据,即最多为两千条数据。
因为时效性原因,不保证代码能爬到数据,只是给大家一个参考思路,上代码 :
1 from datetime import datetime
2 import random
3 import csv
4 from concurrent.futures import ThreadPoolExecutor, as_completed
5
6 from lxml import etree
7 import pymysql
8 import requests
9
10 from models import create_session, Comments
11
12 #随机UA
13 USERAGENT = [
14 ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1‘,
15 ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36‘,
16 ‘Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0‘,
17 ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50‘,
18 ‘Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50‘,
19 ‘Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12‘
20 ]
21
22
23 class CommentFetcher:
24 headers = {‘User-Agent‘: ‘‘}
25 cookie = ‘‘
26 cookies = {‘cookie‘: cookie}
27 # cookie为登录后的cookie,需要自行复制
28 base_node = ‘//div[@class="comment-item"]‘
29
30
31 def __init__(self, movie_id, start, type=‘‘):
32 ‘‘‘
33 :type: 全部评论:‘‘, 好评:h 中评:m 差评:l
34 :movie_id: 影片的ID号
35 :start: 开始的记录数,0-480
36 ‘‘‘
37 self.movie_id = movie_id
38 self.start = start
39 self.type = type
40 self.url = ‘https://movie.douban测试数据/subject/{id}/comments?start={start}&limit=20&sort=new_score\&status=P&percent_type={type}&comments_only=1‘.format(
41 id=str(self.movie_id),
42 start=str(self.start),
43 type=self.type
44 )
45 #创建数据库连接
46 self.session = create_session()
47
48 #随机useragent
49 def _random_UA(self):
50 self.headers[‘User-Agent‘] = random.choice(USERAGENT)
51
52
53 #获取api接口,使用get方法,返回的数据为json数据,需要提取里面的HTML
54 def _get(self):
55 self._random_UA()
56 res = ‘‘
57 try:
58 res = requests.get(self.url, cookies=self.cookies, headers=self.headers)
59 res = res.json()[‘html‘]
60 except Exception as e:
61 print(‘IP被封,请使用代理IP‘)
62 print(‘正在获取{} 开始的记录‘.format(self.start))
63 return res
64
65 def _parse(self):
66 res = self._get()
67 dom = etree.HTML(res)
68
69 #id号
70 self.id = dom.xpath(self.base_node + ‘/@data-cid‘)
71 #用户名
72 self.username = dom.xpath(self.base_node + ‘/div[@class="avatar"]/a/@title‘)
73 #用户连接
74 self.user_center = dom.xpath(self.base_node + ‘/div[@class="avatar"]/a/@href‘)
75 #点赞数
76 self.vote = dom.xpath(self.base_node + ‘//span[@class="votes"]/text()‘)
77 #星级
78 self.star = dom.xpath(self.base_node + ‘//span[contains(@class,"rating")]/@title‘)
79 #发表时间
80 self.time = dom.xpath(self.base_node + ‘//span[@class="comment-time "]/@title‘)
81 #评论内容 所有span标签class名为short的节点文本
82 self.content = dom.xpath(self.base_node + ‘//span[@class="short"]/text()‘)
83
84 #保存到数据库
85 def save_to_database(self):
86 self._parse()
87 for i in range(len(self.id)):
88 try:
89 comment = Comments(
90 id=int(self.id[i]),
91 username=self.username[i],
92 user_center=self.user_center[i],
93 vote=int(self.vote[i]),
94 star=self.star[i],
95 time=datetime.strptime(self.time[i], ‘%Y-%m-%d %H:%M:%S‘),
96 content=self.content[i]
97 )
98
99 self.session.add(comment)
100 self.session测试数据mit()
101 return ‘finish‘
102
103
104 except pymysql.err.IntegrityError as e:
105 print(‘数据重复,不做任何处理‘)
106
107 except Exception as e:
108 #数据添加错误,回滚
109 self.session.rollback()
110
111 finally:
112 #关闭数据库连接
113 self.session.close()
114
115 #保存到csv
116 def save_to_csv(self):
117 self._parse()
118 f = open(‘comment.csv‘, ‘w‘, encoding=‘utf-8‘)
119 csv_in = csv.writer(f, dialect=‘excel‘)
120 for i in range(len(self.id)):
121 csv_in.writerow([
122 int(self.id[i]),
123 self.username[i],
124 self.user_center[i],
125 int(self.vote[i]),
126 self.time[i],
127 self.content[i]
128 ])
129 f.close()
130
131
132 if __name__ == ‘__main__‘:
133 with ThreadPoolExecutor(max_workers=4) as executor:
134 futures = []
135 for i in [‘‘, ‘h‘, ‘m‘, ‘l‘]:
136 for j in range(25):
137 fetcher = CommentFetcher(movie_id=26266893, start=j * 20, type=i)
138 futures.append(executor.submit(fetcher.save_to_csv))
139
140 for f in as_completed(futures):
141 try:
142 res = f.done()
143 if res:
144 ret_data = f.result()
145 if ret_data == ‘finish‘:
146 print(‘{} 成功保存数据‘.format(str(f)))
147 except Exception as e:
148 f.cancel()
查看更多关于Python多线程豆瓣影评API接口爬虫的详细内容...
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://haodehen.cn/did170483