import requests
from requests.adapters import HTTPAdapter
import re
from urllib import parse
import os
def getpiclist(kw):
headers = {
'authority': 'stock.tuchong.com',
'method': 'GET',
'path': '/search?term='+kw+'&use=0&source= extbaidudkey68',
'scheme': 'https',
'accept': 'text/html,application/xhtml+xml,application /xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br ',
'accept-language': 'zh-CN,zh;q=0.9 ',
'cache-control': 'max-age=0 ',
'cookie': '_ga=GA1.2.286933693.1548990624; weilisessionid=e54ec13b6b6a18a62bf33ab9d0400623 ; wluuid=WLGEUST-970FA996-29E6-75FD-8FF6-A428AD814CCC; wlsource=extbaidudkey68; qimo_seosource_e7dfc0b0-b3b6-11e7-b58e-df773034efe4=%E5%85%B6%E4%BB%96%E7%BD%91%E7% AB%99; qimo_seokeywords_e7dfc0b0-b3b6-11e7-b58e-df773034efe4=%E6%9C%AA%E7%9F%A5; href=https%3A%2F%2Fstock.tuchong.com%2Fsearch%3Fterm%3D%25E5%25B8 Percent -b3b6-11e7-b58e-df773034efe4=79be0962-4966-11e9-9fd8-1d264daba2e8; webp_enabled=0; pageViewNum=4',
'referer': 'https://tc.ftsm-vip.com /?source=extbaidudkey68&utm_source=extbaidudkey68',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
url = "https://stock.tuchong.com/free /search/?term=" + kw
response = requests.get(url, headers=headers)
if response.status_code == 200:
# print(response.text)
picl = re.findall("\"imageId\":\"(\d+ )\"", response.text)
# print(piclist)
piclist = []
for pic in picl:
urls = "https://p3a.pstatp.com/weili /l/" + pic + ".jpg"
if urls not in piclist:
piclist.append(urls)
else:
piclist = "not find url"
return piclist
def downloadpic(url, savepath, s):
try:
headers = {
'Accept': 'text/html,application/xhtml+xml,application /xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br ',
'Accept-Language': 'zh-CN,zh;q=0.9 ',
'Cache-Control': 'max-age=0 ',
'Connection': 'keep-alive',
'Host': 'p3a.pstatp.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
pic = s.get(url, timeout=5, headers=headers)
picname = url.rsplit("/", 1)[1]
fp = open(savepath + picname, 'wb ')
fp.write(pic.content)
fp.close()
except requests.exceptions.ConnectionError:
print(url,"[Error] The current picture cannot be downloaded")
except requests.exceptions.ReadTimeout:
print(url, "[Error] Timeout")
except requests.exceptions.ChunkedEncodingError:
print(url, "[Error] The remote host forcibly closed an existing connection")
except requests.exceptions.RequestException as e:
print(url, "[Error]", e)
def main():
words = ["Peach"] #The name to download
for word in words:
kw = parse.quote(str(word))
savepath = "./" + word + "/< span style="color: #800000;">"
if not os.path.exists(savepath):
os.makedirs(savepath)
s = requests.Session()
s.mount('http://< span style="color: #800000;">', HTTPAdapter(max_retries=2))
s.mount('https://< span style="color: #800000;">', HTTPAdapter(max_retries=2))
picturelist = getpiclist(kw)
if type(picturelist) is list:
for picurl in picturelist:
print(f"Downloading {word} picture {picturelist.index(picurl)}{picurl}")
downloadpic(picurl, savepath, s)
else:
print("Crawl completed")
break
if __name__ == "__main__":
main()
import requests
from requests.adapters import HTTPAdapter
import re
from urllib import parse
import os
def getpiclist(kw):
headers = {
'authority': 'stock.tuchong.com',
'method': 'GET',
'path': '/search?term='+kw+'&use=0&source= extbaidudkey68',
'scheme': 'https',
'accept': 'text/html,application/xhtml+xml,application /xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br ',
'accept-language': 'zh-CN,zh;q=0.9 ',
'cache-control': 'max-age=0 ',
'cookie': '_ga=GA1.2.286933693.1548990624; weilisessionid=e54ec13b6b6a18a62bf33ab9d0400623 ; wluuid=WLGEUST-970FA996-29E6-75FD-8FF6-A428AD814CCC; wlsource=extbaidudkey68; qimo_seosource_e7dfc0b0-b3b6-11e7-b58e-df773034efe4=%E5%85%B6%E4%BB%96%E7%BD%91%E7% AB%99; qimo_seokeywords_e7dfc0b0-b3b6-11e7-b58e-df773034efe4=%E6%9C%AA%E7%9F%A5; href=https%3A%2F%2Fstock.tuchong.com%2Fsearch%3Fterm%3D%25E5%25B8 Percent -b3b6-11e7-b58e-df773034efe4=79be0962-4966-11e9-9fd8-1d264daba2e8; webp_enabled=0; pageViewNum=4',
'referer': 'https://tc.ftsm-vip.com /?source=extbaidudkey68&utm_source=extbaidudkey68',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
url = "https://stock.tuchong.com/free /search/?term=" + kw
response = requests.get(url, headers=headers)
if response.status_code == 200:
# print(response.text)
picl = re.findall("\"imageId\":\"(\d+ )\"", response.text)
# print(piclist)
piclist = []
for pic in picl:
urls = "https://p3a.pstatp.com/weili /l/" + pic + ".jpg"
if urls not in piclist:
piclist.append(urls)
else:
piclist = "not find url"
return piclist
def downloadpic(url, savepath, s):
try:
headers = {
'Accept': 'text/html,application/xhtml+xml,application /xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br ',
'Accept-Language': 'zh-CN,zh;q=0.9 ',
'Cache-Control': 'max-age=0 ',
'Connection': 'keep-alive',
'Host': 'p3a.pstatp.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
pic = s.get(url, timeout=5, headers=headers)
picname = url.rsplit("/", 1)[1]
fp = open(savepath + picname, 'wb ')
fp.write(pic.content)
fp.close()
except requests.exceptions.ConnectionError:
print(url,"[Error] The current picture cannot be downloaded")
except requests.exceptions.ReadTimeout:
print(url, "[Error] Timeout")
except requests.exceptions.ChunkedEncodingError:
print(url, "[Error] The remote host forcibly closed an existing connection")
except requests.exceptions.RequestException as e:
print(url, "[Error]", e)
def main():
words = ["Peach"] #The name to download
for word in words:
kw = parse.quote(str(word))
savepath = "./" + word + "/< span style="color: #800000;">"
if not os.path.exists(savepath):
os.makedirs(savepath)
s = requests.Session()
s.mount('http://< span style="color: #800000;">', HTTPAdapter(max_retries=2))
s.mount('https://< span style="color: #800000;">', HTTPAdapter(max_retries=2))
picturelist = getpiclist(kw)
if type(picturelist) is list:
for picurl in picturelist:
print(f"Downloading {word} picture {picturelist.index(picurl)}{picurl}")
downloadpic(picurl, savepath, s)
else:
print("Crawl completed")
break
if __name__ == "__main__":
main()