requests和lxml实现爬虫的方法
如下所示:
#requests模块来请求页面
#lxml模块的html构建selector选择器(格式化响应response)
#fromlxmlimporthtml
#importrequests
#response=requests.get(url).content
#selector=html.formatstring(response)
#hrefs=selector.xpath('/html/body//div[@class='feed-item_j_feed_item']/a/@href')
#以url='https://www.mafengwo.cn/gonglve/ziyouxing/2033.html'为例子
#python2.7 importrequests fromlxmlimporthtml importos
#获取首页中子页的url链接
defget_page_urls(url):
response=requests.get(url).content
#通过lxml的html来构建选择器
selector=html.fromstring(response)
urls=[]
foriinselector.xpath("/html/body//div[@class='feed-item_j_feed_item']/a/@href"):
urls.append(i)
returnurls
#gettitlefromachild'shtml(div[@class='title'])
defget_page_a_title(url):
'''urlisziyouxing'sa@href'''
response=requests.get(url).content
selector=html.fromstring(response)
#getxpathbychrome'stool-->/html/body//div[@class='title']/text()
a_title=selector.xpath("/html/body//div[@class='title']/text()")
returna_title
#获取页面选择器(通过lxml的html构建) defget_selector(url): response=requests.get(url).content selector=html.fromstring(response) returnselector
#通过chrome的开发者工具分析html页面结构后发现,我们需要获取的文本内容主要显示在div[@class='l-topic']和div[@class='p-section']中
#获取所需的文本内容
defget_page_content(selector):
#/html/body/div[2]/div[2]/div[1]/div[@class='l-topic']/p/text()
page_title=selector.xpath("//div[@class='l-topic']/p/text()")
#/html/body/div[2]/div[2]/div[1]/div[2]/div[15]/div[@class='p-section']/text()
page_content=selector.xpath("//div[@class='p-section']/text()")
returnpage_title,page_content
#获取页面中的图片url地址
defget_image_urls(selector):
imagesrcs=selector.xpath("//img[@class='_j_lazyload']/@src")
returnimagesrcs
#获取图片的标题 defget_image_title(selector,num) #num是从2开始的 url="/html/body/div[2]/div[2]/div[1]/div[2]/div["+num+"]/span[@class='img-an']/text()" ifselector.xpath(url)isnotNone: image_title=selector.xpath(url) else: image_title="map"+str(num)#没有就起一个 returnimage_title
#下载图片
defdownloadimages(selector,number):
'''number是用来计数的'''
urls=get_image_urls()
num=2
amount=len(urls)
forurlinurls:
image_title=get_image_title(selector,num)
filename="/home/WorkSpace/tour/words/result"+number+"/+"image_title+".jpg"
ifnotos.path.exists(filename):
os.makedirs(filename)
print('downloading%simage%s'%(number,image_title))
withopen(filename,'wb')asf:
f.write(requests.get(url).content)
num+=1
print"已经下载了%s张图"%num
#入口,启动并把获取的数据存入文件中 if__name__=='__main__': url='https://www.mafengwo.cn/gonglve/ziyouxing/2033.html' urls=get_page_urls(url) #turntogetresponsefromhtml number=1 foriinurls: selector=get_selector(i) #downloadimages downloadimages(selector,number) #gettextandwriteintoafile page_title,page_content=get_page_content(selector) result=page_title+'\n'+page_content+'\n\n' path="/home/WorkSpace/tour/words/result"+num+"/" ifnotos.path.exists(filename): os.makedirs(filename) filename=path+"num"+".txt" withopen(filename,'wb')asf: f.write(result) printresult
到此就结束了该爬虫,爬取页面前一定要认真分析html结构,有些页面是由js生成,该页面比较简单,没涉及到js的处理,日后的随笔中会有相关分享
以上这篇requests和lxml实现爬虫的方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。