日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 运维知识 > 数据库 >内容正文

数据库

mysql汽车品牌系列_爬取汽车之家汽车品牌型号系列数据

發布時間:2023/12/31 数据库 23 豆豆
生活随笔 收集整理的這篇文章主要介紹了 mysql汽车品牌系列_爬取汽车之家汽车品牌型号系列数据 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

需要安裝python3,安裝,代碼開頭的幾個庫,只供學習和參考。如需嫌棄麻煩,請直接下載https://download.csdn.net/download/weixin_36691991/11032522

import re

import json

import requests

from lxml import etree

import os

import urllib3.exceptions

import pymysql

import time

main_url = 'https://car.autohome.com.cn/javascript/NewSpecCompare.js'

photo_url = 'https://www.autohome.com.cn/grade/carhtml/'

type_type_url = "https://car.autohome.com.cn/duibi/ashx/specComparehandler.ashx?callback=jsonpCallback&type=1&seriesid="

http = urllib3.PoolManager()

html = requests.get(main_url).text

data = re.findall(r'=(.*?);', html, re.S)[0]

dir_string = '/file/'

folder = os.getcwd() + dir_string

if not os.path.exists(folder):

res = os.makedirs(folder, mode=0o777)

with open(folder+"data.json", 'w',encoding='utf-8') as f:

f.write(data)

with open(folder+"data.json", 'r',encoding='utf-8') as f:

datas = json.loads(f.read())

for data in datas:

brands = {}

brands['name'] = data['N']

brands['ini'] = data['L']

# 獲取圖片鏈接

url = photo_url + brands['ini'] + "_photo.html"

html = requests.get(url).text

selecter = etree.HTML(html)

imgs = selecter.xpath('//dl/dt/a/img/@src')

titles = selecter.xpath('//dl/dt/div/a/text()')

for title, img in zip(titles, imgs):

if title == data['N']:

brands['img'] = img.strip('//')

types=[]

for tss in data['List']:

for t in tss['List']:

ts={}

ts['name'] = t['N']

ts['seriesid'] = t['I']

print(t['N'])

'''

獲取分類下的分類

'''

type_url = type_type_url+str(t['I'])

type_json = requests.get(type_url).text

type_json = re.findall(r'\({(.*?)}\)', type_json, re.S)[0]

json_file = t['N'].replace('/','')

with open(folder +json_file+".json", 'w+', encoding='utf-8') as f:

f.write("{"+type_json+"}")

with open(folder + json_file+".json", 'r', encoding='utf-8') as f:

datas = json.loads(f.read())

sl = []

for ty_j in datas['List']:

for key,value in ty_j.items():

if type(value)==list:

for v in value:

sl.append(v['N'])

ts['sl']=sl

types.append(ts)

brands['type']=types

"""

創建文件夾

"""

dir_string = '/file/brand'

folder1 = os.getcwd() + dir_string

if not os.path.exists(folder1):

res = os.makedirs(folder1, mode=0o777)

"""

下載圖片

"""

heades = {

"User-Agent": "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 71.0.3578.98Safari / 537.36"

}

try:

try:

req = http.request('GET', brands['img'], headers=heades)

res = req.data

file_name = folder1 + "/" + brands['name'] + ".png"

with open(file_name, 'wb') as f:

f.write(res)

brands['img'] = file_name

time.sleep(1)

except urllib3.exceptions.LocationParseError as e:

brands['img'] = ""

print(e)

except KeyError as e:

brands['img']=''

"""

數據入庫

"""

conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='root', db='weiqing', charset='utf8')

cursor = conn.cursor()

print((brands['name'],brands['ini'],brands['img']));

cursor.execute("insert into brand(name,ini,img)values(%s,%s,%s)",(brands['name'],brands['ini'],brands['img']))

b_pid = cursor.lastrowid

for m_t in brands['type']:

print((b_pid,m_t['name']))

cursor.execute("insert into type(b_id,name)values(%s,%s)",(b_pid,m_t['name']))

t_pid = cursor.lastrowid

try:

for m_s in m_t['sl']:

print((t_pid, m_s))

cursor.execute("insert into slis(t_id,name)values(%s,%s)",(t_pid, m_s))

except KeyError as e:

print(e)

cursor.execute("insert into slis(t_id,name)values(%s,%s)", (t_pid, ""))

conn.commit()

cursor.close()

conn.close()

print(brands['name']+"===="+brands['ini']+"======"+brands['img'])

exit()

總結

以上是生活随笔為你收集整理的mysql汽车品牌系列_爬取汽车之家汽车品牌型号系列数据的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。