我们首先来看下实例代码:from time import sleep

import faker

import requests

from lxml import etree

fake = faker.Faker()

base_url = "http://angelimg.spbeen"

def get_next_link(url):

content = downloadHtml(url)

html = etree.HTML(content)

next_url = html.xpath("//a[@class='ch next']/@href")

if next_url:

return base_url + next_url[0]

else:

return False

def downloadHtml(ur):

user_agent = fake.user_agent()

headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen/"}

response = requests.get(url, headers=headers)

return response.text

def getImgUrl(content):

html = etree.HTML(content)

img_url = html.xpath('//*[@id="content"]/a/img/@src')

title = html.xpath(".//div['@class=article']/h2/text()")

return img_url[0],title[0]

def saveImg(title,img_url):

if img_url is not None and title is not None:

with open("txt/"+str(title)+".jpg",'wb') as f:

user_agent = fake.user_agent()

headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen/"}

content = requests.get(img_url, headers=headers)

#request_view(content)

f.write(content.content)

f.close()

def request_view(response):

import webbrowser

request_url = response.url

base_url = '

base_url = base_url.encode()

content = response.content.replace(b"

tem_html = open('tmp.html','wb')

tem_html.write(content)

tem_html.close()

webbrowser.open_new_tab('tmp.html')

def crawl_img(url):

content = downloadHtml(url)

res = getImgUrl(content)

title = res[1]

img_url = res[0]

saveImg(title,img_url)

if __name__ == "__main__":

url = "http://angelimg.spbeen/ang/4968/1"

while url:

print(url)

crawl_img(url)

url = get_next_link(url)

python 爬虫如何执行自动下一页循环加载文字from bs4 import BeautifulSoup

import requests

import time

from lxml import etree

import os

# 该demo执行的为如何利用bs去爬一些文字

def start():

# 发起网络请求

html=requests.get('http://www.baidu')

#编码

html.encoding=html.apparent_encoding

#创建sp

soup=BeautifulSoup(html.text,'html.parser')

print(type(soup))

print('打印元素')

print(soup.prettify())

#存储一下title 该方法没有提示直接展示

title=soup.head.title.string

print(title)

# 写入文本

with open(r'C:/Users/a/Desktop/a.txt','w') as f:

f.write(title)

print(time.localtime())

url_2 = 'http://news.gdzjdaily/zjxw/politics/sz_4.shtml'

def get_html_from_bs4(url):

# response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8')

response = requests.get(url).content.decode('utf-8')

soup = BeautifulSoup(response, 'html.parser')

next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href')

# for i in nett

print(next_page)

next2='http://news.gdzjdaily/zjxw/politics/'+next_page

def get_html_from_etree(url):

response = requests.get(url).content.decode('utf-8')

html= etree.HTML(response)

next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0]

print(next_page)

# next2='http://news.gdzjdaily/zjxw/politics/'+next_page

get_html_from_etree(url_2)

if __name__ == '__main__':

start()

到此这篇关于python爬虫实现获取下一页代码的文章就介绍到这了,更多相关python爬虫获取下一页内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持

更多推荐

python爬虫获取下一页_python爬虫实现获取下一页代码