原来的代码有点问题,改了以后成功添加多线程,都跑一分钟,多线程要多爬120张图片,这样看来效率果然高了许多。
先上普通的:1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48import requests
import os
import re
import random
dirs="D:/妹子图"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36'}
def mkdir():
if not os.path.exists(dirs):
os.mkdir('D:/妹子图')
os.chdir('D:/妹子图')
return True
else:
print("妹子图文件夹已存在")
return False
def get_max_page():
url="https://www.mzitu.com/zipai/"
r = requests.get(url,headers=headers)
result=re.findall("<span aria-current='page' class='page-numbers current'>(.*?)</span>",r.text,re.S)
return result[0]
def find_onepage_imgs(url):
r=requests.get(url,headers=headers)
result=re.findall('<div class="comment-meta commentmetadata"><a href=".*?">(.*?)</a>.*?</div>.*?<p><img class="lazy".*?data-original="(.*?)".*?</p>',r.text,re.S)
#print(result[0][0].split())
return result
def download_img(img,path,i):
with open(path+str(int(random.random()*100)) + ".jpg", 'wb+') as f:
print("下载第" + str(i) + "页提交于" + path + "的图片")
f.write(img.content)
if __name__== '__main__':
mkdir()
print (os.getcwd())
max_page=get_max_page()
for i in range(int(max_page),1,-1):
onepage_list=find_onepage_imgs("http://www.mzitu.com/zipai/comment-page-"+ str(i)+"/#comments" )
for j in onepage_list:
path=j[0].split()
path=path[0][0:4]+path[0][5:7]+path[0][8:10]+path[1]+path[2][0:2]+path[2][3:5]
img=requests.get(j[1])
download_img(img,path,i)
必须要说的是,之前下载图片名称是网站上的提交时间,然后提交时间会有相同,所以我判断如果文件名相同则文件名加排序。现在我直接在文件名后加一个随机数,就不用那么麻烦了。下面是多线程爬虫:1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58import requests
import os
import re
import random
import threading
dirs = "D:/妹子图"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36'}
def mkdir():
if not os.path.exists(dirs):
os.mkdir('D:/妹子图')
os.chdir('D:/妹子图')
return True
else:
print("妹子图文件夹已存在")
return False
def get_max_page():
url = "https://www.mzitu.com/zipai/"
r = requests.get(url, headers=headers)
result = re.findall("<span aria-current='page' class='page-numbers current'>(.*?)</span>", r.text, re.S)
return result[0]
def find_onepage_imgs(url):
r = requests.get(url, headers=headers)
result = re.findall(
'<div class="comment-meta commentmetadata"><a href=".*?">(.*?)</a>.*?</div>.*?<p><img class="lazy".*?data-original="(.*?)".*?</p>',
r.text, re.S)
# print(result[0][0].split())
return result
def download_img(img,path,i):
with open(path+str(int(random.random()*100)) + ".jpg", 'wb+') as f:
print("下载第" + str(i) + "页提交于" + path + "的图片")
f.write(img.content)
if __name__ == '__main__':
mkdir()
print(os.getcwd())
max_page = get_max_page()
for i in range(int(max_page), 1, -1):
onepage_list = find_onepage_imgs("http://www.mzitu.com/zipai/comment-page-" + str(i) + "/#comments")
threads=[]
for j in onepage_list:
path = j[0].split()
path = path[0][0:4] + path[0][5:7] + path[0][8:10] + path[1] + path[2][0:2] + path[2][3:5]
img = requests.get(j[1])
t=threading.Thread(target=download_img,args=(img,path,i))
threads.append(t)
for i in range(0, len(threads) - 1):
threads[i].start()
for i in range(0, len(threads) - 1):
threads[i].join()