1 基础

一般分为四个步骤,注意response为text或者json

import requests

#1 指定url
url='https://www.sogou.com'
#2 发起请求,get会反应一个响应对象
response=requests.get(url=url)
#3 获取响应数据,text返回字符串形式的响应数据
page_text=response.text
#print(page_text)#html源代码
#4持续化存储
with open('./sougou.html','w',encoding='utf-8') as f:
    f.write(page_text)

2 UA伪装

import requests
#网页采集器
#user-agent 请求载体的身份标识
#ua伪装 伪装成浏览器 
url='https://www.sogou.com/web'#?后可以不要
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
}
kw=input('请输入:')
param={
    'query':kw
}
reponse=requests.get(url=url,params=param,headers=headers)
page_text=reponse.text
file_name=kw+'.html'
with open(file_name,'w',encoding='utf-8') as f:
    f.write(page_text)

3 Post请求

#破解百度翻译
#post请求(携带了参数)
import requests
import json

post_url='https://fanyi.baidu.com/sug'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
}
word=input('请输入:')
data={
    'kw':word
}
response=requests.post(url=post_url,data=data,headers=headers)
text=response.json()
print(text)
file_name=word+'.json'
fp=open(file_name,'w',encoding='utf-8')
json.dump(text,fp=fp,ensure_ascii=False)
print('over')

4 豆瓣电影信息爬取

import requests
import json
url='https://movie.douban.com/j/chart/top_list'
params={
     'type':'24',
    'interval_id':'100:90',
    'action': '',
    'start':1,#从第几部电影去取
    'limit':'20'#每次取出的个数,可更改为200
}
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
}
response=requests.get(url=url,params=params,headers=headers)
list_data=response.json()
fp=open('./douban.json','w',encoding='utf-8')
json.dump(list_data,fp=fp,ensure_ascii=False)

5 城市肯德基地址抓取

需要手动发送城市,故采用post查询,类似标题3

import requests
import json
url='http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
params={
'cname':'',
'pid':'',
'keyword':'北京',
'pageIndex':'1',
'pageSize':'10'
}
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
}
response=requests.post(url=url,data=params,headers=headers)
list_data=response.text
print('finish')

6 国家药品监督管理局爬取数据

网址:scxk.nmpa.gov.cn:81/xk/

查看网页后发现具体信息是ajax加载的信息,故第一步在xhr中找到指定id,根据id取请求公司具体信息(url相同,只有参数id不同)

import json
import requests

#动态加载数据 第一步初始网页获取ID
url='http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
}
id_list=[]
all_data_list=[]
for page in range(1,300):
    data={
    'on':'true',
    'page':page,
    'pageSize':'15',
    'productName':'',
    'conditionType':'1',
    'applyname':'',
    'applysn': ''
    }
    json_ids=requests.post(url=url,headers=headers,data=data).json()
    for dic in json_ids['list']:
        id_list.append(dic['ID'])
print(len(id_list))

#第二步根据特定ID抽取特定公司信息
post_url='http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'
for id in id_list:
    data={
        'id':id
    }
    detail_json=requests.post(url=post_url,headers=headers,data=data).json()
    print(detail_json)
    all_data_list.append(detail_json)
fp=open('./alldata','w',encoding='utf-8')
json.dump(all_data_list,fp=fp,ensure_ascii=False)
print('over')