Python爬虫实现爬取京东手机页面的图片(实例代码)
实例如下所示:
__author__='FredZhao' importrequests frombs4importBeautifulSoup importos fromurllib.requestimporturlretrieve classPicture(): def__init__(self): self.headers={'User-Agent':'Mozilla/5.0(Macintosh;IntelMacOSX10_12_0)AppleWebKit/537.36(KHTML,likeGecko)Chrome/55.0.2883.95Safari/537.36'} self.base_url='https://list.jd.com/list.html?cat=9987,653,655&page=' self.base_path=os.path.dirname(__file__) defmakedir(self,name): path=os.path.join(self.base_path,name) isExist=os.path.exists(path) ifnotisExist: os.makedirs(path) print("Filehasbeencreated.") else: print('OK!Thefileisexisted.Youdonotneedcreateanewone.') os.chdir(path) defrequest(self,url): r=requests.get(url,headers=self.headers) returnr defget_img(self,page): r=self.request(self.base_url+str(page)) plist=BeautifulSoup(r.text,'lxml').find('div',id='plist') item=plist.find_all('li',class_='gl-item') print(len(item)) self.makedir('pictures') num=0 foriinitem: num+=1 imglist=i.find('div',class_='p-img') print(num) img=imglist.find('img') print('Thisis%spicture'%num) ifimg.get('src'): url='https:'+img.get('src') fileName=img.get('src').split('/')[-1] urlretrieve(url,filename=fileName) elifimg.get('data-lazy-img'): url='https:'+img.get('data-lazy-img') fileName=img.get('data-lazy-img').split('/')[-1] urlretrieve(url,filename=fileName) if__name__=='__main__': picture=Picture() foriinrange(2):#控制爬取的页数 picture.get_img(i+1)
以上这篇Python爬虫实现爬取京东手机页面的图片(实例代码)就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。