Scrape Center爬虫平台ssr3案例练习

ssr3 案例带有 HTTP Basic Authentication,适合用作 HTTP 认证案例,用户名密码均为 admin

ssr 系列

爬取目标

  • 详情页链接
  • 电影名称
  • 评分
  • 剧情简介

本地存储方式

  • csv文件存储

验证

访问 https://ssr3.scrape.center 会弹出认证窗口:
ssr3.png

无验证访问

如果直接爬虫访问:

1
2
3
4
5
6
import requests

url = 'https://ssr3.scrape.center'

response = requests.get(url)
print(response.text)

则返回的结果如下:

1
2
3
4
5
6
7
<html>
<head><title>401 Authorization Required</title></head>
<body>
<center><h1>401 Authorization Required</h1></center>
<hr><center>nginx</center>
</body>
</html>

意思是 需要401授权

带上验证访问

urllib 库验证

崔庆才给出的验证代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from urllib.request import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, build_opener
from urllib.error import URLError


username = 'admin'
password = 'admin'
url = 'https://ssr3.scrape.center'

p = HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, username, password)
auth_handler = HTTPBasicAuthHandler(p)
opener = build_opener(auth_handler)

try:
result = opener.open(url)
html = result.read().decode('utf-8')
print(html)
except URLError as e:
print(e.reason)

requests 库验证

只需给 auth 参数传入一个二元组

1
2
3
4
5
import requests

url = 'https://ssr3.scrape.center'
res = requests.get(url, auth=('admin', 'admin'))
print(res.text)

完整代码

爬取所有电影并存储到csv

urllib 库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import re
import csv
import time
from urllib.error import URLError
from urllib.request import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, build_opener


def match(mode: str) -> list:
return re.findall(mode, html, re.S)


def auth(url):
username = 'admin'
password = 'admin'

p = HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, username, password)
auth_handler = HTTPBasicAuthHandler(p)
opener = build_opener(auth_handler)

try:
result = opener.open(url)
html = result.read().decode('utf-8')
time.sleep(1) # 暂停 1 秒 防止请求过快
return html
except URLError as e:
print(e.reason)


def writer_csv(data_list):
'''
将爬取的数据存储到 data.csv 文件
'''
with open('./data.csv', 'a+', newline='', encoding='utf-8-sig') as f:
writer = csv.writer(f)
writer.writerow(data_list)


def get_detail(detail_url_list):
'''
爬取链接、电影名称、评分、剧情简介
'''
global html
for detail in detail_url_list:
detail_url = f'https://ssr3.scrape.center{detail}'
print(f'正在爬取详情页 {detail_url}')
html = auth(detail_url)
movies_name = match(r'class="m-b-sm">(.*?)</h2>')
rating = match(r'm-b-n-sm">\n *(.*?)</p>')
plot_summary = match(r'<p data-v-63864230="">\n *(.*?)\n *</p></div>')

data_list = [
detail_url,
movies_name[0],
rating[0],
plot_summary[0]
]
writer_csv(data_list)


def get_list_page(page):
'''
爬取详情页链接,用于下一步处理
'''
global html
page_url = f'https://ssr3.scrape.center/page/{page}'
html = auth(page_url)
detail_url_list = match(r'data-v-7f856186="" href="(.*?)"')
get_detail(detail_url_list)


def main():
for page in range(1, 11):
get_list_page(page)


if __name__ == '__main__':
main()

requests 库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import re
import csv
import time

import requests


headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36 Edg/99.0.1150.36'
}


def match(mode: str) -> list:
return re.findall(mode, html, re.S)


def writer_csv(data_list):
'''
将爬取的数据存储到 data.csv 文件
'''
with open('./data.csv', 'a+', newline='', encoding='utf-8-sig') as f:
writer = csv.writer(f)
writer.writerow(data_list)


def get_detail(detail_url_list):
'''
爬取链接、电影名称、评分、剧情简介
'''
global html
for detail in detail_url_list:
detail_url = f'https://ssr3.scrape.center{detail}'
print(f'正在爬取详情页 {detail_url}')
time.sleep(1) # 暂停 1 秒 防止请求过快
response = requests.get(
detail_url, headers=headers, auth=('admin', 'admin'))
html = response.text
movies_name = match(r'class="m-b-sm">(.*?)</h2>')
rating = match(r'm-b-n-sm">\n *(.*?)</p>')
plot_summary = match(r'<p data-v-63864230="">\n *(.*?)\n *</p></div>')

data_list = [
detail_url,
movies_name[0],
rating[0],
plot_summary[0]
]
writer_csv(data_list)


def get_list_page(page):
'''
爬取详情页链接,用于下一步处理
'''
global html
time.sleep(1) # 暂停 1 秒 防止请求过快
page_url = f'https://ssr3.scrape.center/page/{page}'
response = requests.get(
page_url, headers=headers, auth=('admin', 'admin'))
html = response.text
detail_url_list = match(r'data-v-7f856186="" href="(.*?)"')
get_detail(detail_url_list)


def main():
for page in range(1, 11):
get_list_page(page)


if __name__ == '__main__':
main()

输出

data.csv 部分内容:

1
2
3
4
5
https://ssr1.scrape.center/detail/1,霸王别姬 - Farewell My Concubine,9.5,影片借一出《霸王别姬》的京戏,牵扯出三个人之间一段随时代风云变幻的爱恨情仇。段小楼(张丰毅 饰)与程蝶衣(张国荣 饰)是一对打小一起长大的师兄弟,两人一个演生,一个饰旦,一向配合天衣无缝,尤其一出《霸王别姬》,更是誉满京城,为此,两人约定合演一辈子《霸王别姬》。但两人对戏剧与人生关系的理解有本质不同,段小楼深知戏非人生,程蝶衣则是人戏不分。段小楼在认为该成家立业之时迎娶了名妓菊仙(巩俐 饰),致使程蝶衣认定菊仙是可耻的第三者,使段小楼做了叛徒,自此,三人围绕一出《霸王别姬》生出的爱恨情仇战开始随着时代风云的变迁不断升级,终酿成悲剧。
https://ssr1.scrape.center/detail/2,这个杀手不太冷 - Léon,9.5,里昂(让·雷诺 饰)是名孤独的职业杀手,受人雇佣。一天,邻居家小姑娘马蒂尔德(纳塔丽·波特曼 饰)敲开他的房门,要求在他那里暂避杀身之祸。原来邻居家的主人是警方缉毒组的眼线,只因贪污了一小包毒品而遭恶警(加里·奥德曼 饰)杀害全家的惩罚。马蒂尔德 得到里昂的留救,幸免于难,并留在里昂那里。里昂教小女孩使枪,她教里昂法文,两人关系日趋亲密,相处融洽。 女孩想着去报仇,反倒被抓,里昂及时赶到,将女孩救回。混杂着哀怨情仇的正邪之战渐次升级,更大的冲突在所难免……
https://ssr1.scrape.center/detail/3,肖申克的救赎 - The Shawshank Redemption,9.5,20世纪40年代末,小有成就的青年银行家安迪(蒂姆·罗宾斯 饰)因涉嫌杀害妻子及她的情人而锒铛入狱。在这座名为肖申克的监狱内,希望似乎虚无缥缈,终身监禁的惩罚无疑注定了安迪接下来灰暗绝望的人生。未过多久,安迪尝试接近囚犯中颇有声望的瑞德(摩根·弗里曼 饰),请求对方帮自己搞来小锤子。以此为契机,二人逐渐熟稔,安迪也仿佛在鱼龙混杂、罪恶横生、黑白混淆的牢狱中找到属于自己的求生之道。他利用自身的专业知识,帮助监狱管理层逃税、洗黑钱,同时凭借与瑞德的交往在犯人中间也渐渐受到礼遇。表面看来,他已如瑞德那样对那堵高墙从憎恨转变为处之泰然,但是对自由的渴望仍促使他朝着心中的希望和目标前进。而关于其罪行的真相,似乎更使这一切朝前推进了一步……
...
...