目录

一:获取网页重要信息

二:爬取网页数据

三:源码分享


一:获取网页重要信息

在要爬取数据信息的网页上,F12进入查看网页内容

python爬虫爬取网页上的天气数据-编程之家

python爬虫爬取网页上的天气数据-编程之家

二:爬取网页数据

1 导入模块

import requests
from bs4 import BeautifulSoup
import urllib.request
import random

2 设置header 防止403资源不可用

# 设置header 防止产生403forbidden
my_headers = ["Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36","Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14","Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11','Opera/9.25 (Windows NT 5.1; U; en)','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)','Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)','Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12','Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7","Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]

3 抓取网页信息

# 抓取网页信息
def get_content(url, headers):random_header = random.choice(headers)req = urllib.request.Request(url)req.add_header("User-Agent", random_header)req.add_header("Host", "lishi.tianqi.com")req.add_header("Referer", "http://lishi.tianqi.com/")req.add_header("GET", url)content = urllib.request.urlopen(req).read()return content

4 抓取自己想要获取的数据信息,csv导出

# 三个月份天气的链接
urls = ["http://lishi.tianqi.com/wuhan/202210.html","http://lishi.tianqi.com/wuhan/202211.html","http://lishi.tianqi.com/wuhan/202212.html"]file = open('weather.csv', 'w')
for url in urls:response = get_content(url, my_headers)soup = BeautifulSoup(response, 'html.parser')weather_list = soup.select('ul[class="thrui"]')for weather in weather_list:ul_list = weather.select('li')for ul in ul_list:li_list = ul.select('div')str = ""for li in li_list:str += li.string + ','file.write(str + 'n')
file.close()

测试:

运行程序后,生成csv,可以拷贝至桌面自己查看下是否成功爬取 

python爬虫爬取网页上的天气数据-编程之家

可以获取到网页上有关天气的数据信息

三:源码分享

import requests
from bs4 import BeautifulSoup
import urllib.request
import random# 设置header 防止产生403forbidden
my_headers = ["Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36","Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14","Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11','Opera/9.25 (Windows NT 5.1; U; en)','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)','Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)','Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12','Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7","Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]# 抓取网页信息
def get_content(url, headers):random_header = random.choice(headers)req = urllib.request.Request(url)req.add_header("User-Agent", random_header)req.add_header("Host", "lishi.tianqi.com")req.add_header("Referer", "http://lishi.tianqi.com/")req.add_header("GET", url)content = urllib.request.urlopen(req).read()return content# 三个月份天气的链接
urls = ["http://lishi.tianqi.com/wuhan/202210.html","http://lishi.tianqi.com/wuhan/202211.html","http://lishi.tianqi.com/wuhan/202212.html"]file = open('weather.csv', 'w')
for url in urls:response = get_content(url, my_headers)soup = BeautifulSoup(response, 'html.parser')weather_list = soup.select('ul[class="thrui"]')for weather in weather_list:ul_list = weather.select('li')for ul in ul_list:li_list = ul.select('div')str = ""for li in li_list:str += li.string + ','file.write(str + 'n')
file.close()