-
#!/usr/bin/env python
-
#-*- coding:utf-8 -*-
-
import re
-
import requests
-
import os
-
from urlparse import urlsplit
-
from os.path import basename
-
def getHtml(url):
-
session = requests.Session()
-
# 模擬瀏覽器訪問(wèn)
-
header = {
-
'User-Agent': "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
-
'Accept-Encoding': 'gzip, deflate'}
-
res = session.get(url, headers=header)
-
if res.status_code == 200:
-
content = res.content
-
else:
-
content = ''
-
return content
-
-
def mkdir(path):
-
if not os.path.exists(path):
-
print '新建文件夾:', path
-
os.makedirs(path)
-
return True
-
else:
-
print u"圖片存放于:", os.getcwd() + os.sep + path
-
return False
-
-
def download_pic(img_lists, dir_name):
-
print "一共有 {num} 張照片".format(num=len(img_lists))
-
for image_url in img_lists:
-
response = requests.get(image_url, stream=True)
-
if response.status_code == 200:
-
image = response.content
-
else:
-
continue
-
file_name = dir_name + os.sep + basename(urlsplit(image_url)[2])
-
try:
-
with open(file_name, "wb") as picture:
-
picture.write(image)
-
except IOError:
-
print("IO Error\n")
-
return
-
finally:
-
picture.close
-
print "下載 {pic_name} 完成!".format(pic_name=file_name)
-
-
def getAllImg(html):
-
# 利用正則表達(dá)式把源代碼中的圖片地址過(guò)濾出來(lái)
-
#reg = r'data-actualsrc="(.*?)">'
-
reg = r'https://pic\d.zhimg.com/[a-fA-F0-9]{5,32}_\w+.jpg'
-
imgre = re.compile(reg, re.S)
-
tmp_list = imgre.findall(html) # 表示在整個(gè)網(wǎng)頁(yè)中過(guò)濾出所有圖片的地址,放在imglist中
-
# 清理掉頭像和去重 獲取data-original的內(nèi)容
-
tmp_list = list(set(tmp_list)) # 去重
-
imglist = []
-
for item in tmp_list:
-
if item.endswith('r.jpg'):
-
img_list.append(item)
-
print 'num : %d' % (len(imglist))
-
return imglist
-
-
-
if __name__ == '__main__':
-
question_id = 35990613
-
zhihu_url = "https://www.zhihu.com/question/{qid}".format(qid=question_id)
-
html_content = getHtml(zhihu_url)
-
path = 'zhihu_pic'
-
mkdir(path) # 創(chuàng)建本地文件夾
-
img_list = getAllImg(html_content) # 獲取圖片的地址列表
-
download_pic(img_list, path) # 保存圖片
-
#!/usr/bin/env python
-
#-*- coding:utf-8 -*-
-
import re
-
import requests
-
import os
-
from urlparse import urlsplit
-
from os.path import basename
-
-
headers = {
-
'User-Agent': "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
-
'Accept-Encoding': 'gzip, deflate'}
-
-
-
def mkdir(path):
-
if not os.path.exists(path):
-
print '新建文件夾:', path
-
os.makedirs(path)
-
return True
-
else:
-
print u"圖片存放于:", os.getcwd() + os.sep + path
-
return False
-
-
-
def download_pic(img_lists, dir_name):
-
print "一共有 {num} 張照片".format(num=len(img_lists))
-
for image_url in img_lists:
-
response = requests.get(image_url, stream=True)
-
if response.status_code == 200:
-
image = response.content
-
else:
-
continue
-
file_name = dir_name + os.sep + basename(urlsplit(image_url)[2])
-
try:
-
with open(file_name, "wb") as picture:
-
picture.write(image)
-
except IOError:
-
print("IO Error\n")
-
continue
-
finally:
-
picture.close
-
print "下載 {pic_name} 完成!".format(pic_name=file_name)
-
-
-
def get_image_url(qid, headers):
-
# 利用正則表達(dá)式把源代碼中的圖片地址過(guò)濾出來(lái)
-
#reg = r'data-actualsrc="(.*?)">'
-
tmp_url = "https://www.zhihu.com/node/QuestionAnswerListV2"
-
size = 10
-
image_urls = []
-
session = requests.Session()
-
# 利用循環(huán)自動(dòng)完成需要點(diǎn)擊 “更多” 獲取所有答案,每個(gè)分頁(yè)作為一個(gè)answer集合。
-
while True:
-
postdata = {'method': 'next', 'params': '{"url_token":' +
-
str(qid) + ',"pagesize": "10",' + '"offset":' + str(size) + "}"}
-
page = session.post(tmp_url, headers=headers, data=postdata)
-
ret = eval(page.text)
-
answers = ret['msg']
-
size += 10
-
if not answers:
-
print "圖片URL獲取完畢, 頁(yè)數(shù): ", (size - 10) / 10
-
return image_urls
-
#reg = r'https://pic\d.zhimg.com/[a-fA-F0-9]{5,32}_\w+.jpg'
-
imgreg = re.compile('data-original="(.*?)"', re.S)
-
for answer in answers:
-
tmp_list = []
-
url_items = re.findall(imgreg, answer)
-
for item in url_items: # 這里去掉得到的圖片URL中的轉(zhuǎn)義字符'\\'
-
image_url = item.replace("\\", "")
-
tmp_list.append(image_url)
-
# 清理掉頭像和去重 獲取data-original的內(nèi)容
-
tmp_list = list(set(tmp_list)) # 去重
-
for item in tmp_list:
-
if item.endswith('r.jpg'):
-
print item
-
image_urls.append(item)
-
print 'size: %d, num : %d' % (size, len(image_urls))
-
-
-
if __name__ == '__main__':
-
question_id = 26037846
-
zhihu_url = "https://www.zhihu.com/question/{qid}".format(qid=question_id)
-
path = 'zhihu_pic'
-
mkdir(path) # 創(chuàng)建本地文件夾
-
img_list = get_image_url(question_id, headers) # 獲取圖片的地址列表
-
download_pic(img_list, path) # 保存圖片
上述內(nèi)容就是Python中如何使用requsets獲取知乎最有價(jià)值的內(nèi)容,你們學(xué)到知識(shí)或技能了嗎?如果還想學(xué)到更多技能或者豐富自己的知識(shí)儲(chǔ)備,歡迎關(guān)注本站行業(yè)資訊頻道。