码迷,mamicode.com
首页 > 编程语言 > 详细

Python爬虫,京东商品详情爬取!

时间:2020-09-14 19:06:06      阅读:50      评论:0      收藏:0      [点我收藏+]

标签:页码   完成   输出   ica   requests   als   usg   div   rap   

最近因需求需要,需要到京东爬取一些类别的商品信息。记录下过程中踩过的坑,最后奉献上全部代码。仅供互相学习,如有错误请指正~~

  1. 京东网页翻页。
    京东的页面是打开时先加载前30个商品,浏览到下面时再加载另30个商品。加载前30个商品时 page=1,后30个商品时 page=2。所以京东的翻页可以用request库直接 page+1翻页,也可以使用selenium库滚轮操作到最后全部加载完成后以page = 2n-1的方式翻页。
  2. 获取商品详情页的价格及评论
    从以上代码获取到商品详情页的网页后,继续对详情url发起请求后发现请求不到商品价格及评论。最后用青花瓷抓包后发现这2个是单独的JS加载,返回的是两个json包。可以通过解析json获取价格和评论。

全部代码如下:

from bs4 import BeautifulSoup
import requests as re
import random,json
from selenium import webdriver
from time import sleep
from urllib.parse import quote
import pandas as pd

def Get_Header(re_url):
    list_header = [
        {
            ‘Accept‘:‘text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8‘,
            ‘cache-control‘:‘max-age=0‘,
            ‘Cookie‘:‘__jda=122270672.1744496012.1577155345.1598497068.1598508599.11; __jdu=1744496012; shshshfp=075cb5bfc884ea12ffa496ed64bd02f9; shshshfpa=c831f236-6ab3-aa6d-14d2-b2ef1c6bd04e-1588213838; shshshfpb=bF5VGKKJpLPVkXI1nPyFvHQ%3D%3D; unpl=V2_ZzNtbUEESxB1CRJRLklZB2JREV4RUkUcJQARVikYWQ1uAxZfclRCFnQUR11nGloUZwIZWURcRhJFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZHsfWQBiCxVcQlRzJXI4dmR5H1kDZAsiXHJWc1chVE9SfR5ZAyoDFFhHUksSdAhFZHopXw%3d%3d; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_2b9410e4da434c22b478a9f3c0498153|1598497067783; areaId=18; ipLoc-djd=18-1482-48942-49058; PCSYCityID=CN_430000_430100_0; _pst=%E6%9E%AB%E5%8F%B68%E7%96%AF%E8%80%B6; unick=%E6%9E%AB%E5%8F%B68%E7%96%AF%E8%80%B6; pin=%E6%9E%AB%E5%8F%B68%E7%96%AF%E8%80%B6; _tp=BbPVbKtR38igbOb%2B6nMOFcitpL6scJlLuOEa4E7XiG%2BuT9kM0ukhvBamyGyg9b0W; __jdc=122270672; shshshsID=bdf848f07ecf3bdaadb0464a9365fbfd_6_1598509486237; user-key=ee732f60-2886-4e04-be53-d0157916bc02; cn=0; __jdb=122270672.6.1744496012|11.1598508599; 3AB9D23F7A4B3C9B=U3GDXXISZUYHKELVNFI4WVW3PWLZJCTYCKPU7HYZC3XUFR66FDWFKIXLECXK46FPOOCQUFAFPMN67RL3SJLQLWCZIU‘,
            ‘upgrade-insecure-requests‘:‘1‘,
            ‘User-Agent‘:‘Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0‘,
            ‘Referer‘:f‘{re_url}‘,
            ‘Connection‘:‘close‘
        }
    ]
    return random.choice(list_header)

def Get_Product_Url(url):
    driver = webdriver.Firefox()
    driver.get(url)
    js_code = ‘‘‘window.scrollTo(0,5000)‘‘‘
    driver.execute_script(js_code)
    sleep(5)
    soup = BeautifulSoup(driver.page_source,‘lxml‘)

    product_url_list = [f"https://item.jd.com/{tag.attrs[‘data-sku‘]}.html" for tag in soup.find_all(‘li‘,class_=‘gl-item‘)]
    price_url_list = [f"https://p.3.cn/prices/mgets?&skuIds=J_{tag.attrs[‘data-sku‘]}" for tag in soup.find_all(‘li‘,class_=‘gl-item‘)]
    comment_url_list = [f"https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&score=7&sortType=5&page=0&pageSize=10&isShadowSku=0&productId={tag.attrs[‘data-sku‘]}" for tag in soup.find_all(‘li‘,class_=‘gl-item‘)]

    driver.close()
    return product_url_list,price_url_list,comment_url_list

def Get_Product_Url_R(url):
    headers = Get_Header(re_url=‘https://www.jd.com/‘)
    response = re.get(url,headers = headers)
    soup = BeautifulSoup(response.text,‘lxml‘)
    # 获取一页商品的 商品详情页URL,商品价格URL,商品评论URL的列表
    product_url_list = [f"https://item.jd.com/{tag.attrs[‘data-sku‘]}.html" for tag in soup.find_all(‘li‘,class_=‘gl-item‘)]
    price_url_list = [f"https://p.3.cn/prices/mgets?&skuIds=J_{tag.attrs[‘data-sku‘]}" for tag in soup.find_all(‘li‘,class_=‘gl-item‘)]
    comment_url_list = [f"https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&score=7&sortType=5&page=0&pageSize=10&isShadowSku=0&productId={tag.attrs[‘data-sku‘]}" for tag in soup.find_all(‘li‘,class_=‘gl-item‘)]
    sleep(random.randrange(1, 5))
    return product_url_list,price_url_list,comment_url_list

def Get_Data_M(url,i):
    product_url_list,price_url_list,comment_url_list = Get_Product_Url_R(url)
    headers = Get_Header(url)
    for j in range(1,len(product_url_list)):
        Get_Data_S(product_url_list[j],price_url_list[j],comment_url_list[j],headers,i,j)
        # if tag_list:
        #     for tag in tag_list:
        #         try:
        #             if product_url_list[i] != f"https://item.jd.com/{tag.attrs[‘data-sku‘]}.html":   # 排除已选商品
        #                 pro_url = f"https://item.jd.com/{tag.attrs[‘data-sku‘]}.html"
        #                 pri_url = f"https://p.3.cn/prices/mgets?&skuIds=J_{tag.attrs[‘data-sku‘]}"
        #                 com_url = f"https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&score=7&sortType=5&page=0&pageSize=10&isShadowSku=0&productId={tag.attrs[‘data-sku‘]}"
        #                 Get_Data_S(pro_url,pri_url,com_url,headers,flag=False)
        #         except Exception as e:
        #             # print(e)
        #             pass
    return result

def Get_Data_S(pro_url,pri_url,com_url,headers,i,j,flag = True):
    # 获取商品标题
    try:
        response = re.get(pro_url, headers=headers,timeout=3)
        soup = BeautifulSoup(response.text, ‘lxml‘)
        header_packet = soup.find(‘div‘, class_=‘itemInfo-wrap‘)
        title = header_packet.find(‘div‘, class_=‘sku-name‘).text.replace(‘ ‘, ‘‘)
        # 获取详情页其他规格商品的URL
        # if flag:
        #     try:
        #         tag_list = header_packet.find_all(‘div‘, attrs = {‘class‘:‘item‘})
        #     except:
        #         tag_list = 0
        # 更换headers内的来源网站,更换为商品详情页,爬取价格及评论
        headers = Get_Header(pro_url)
        # 获取商品价格
        response = re.get(pri_url, headers=headers)
        price = (json.loads(response.text))[0][‘p‘]
        # 获取商品评论
        response = re.get(com_url, headers=headers)
        page = (response.text).replace(‘fetchJSON_comment98(‘, ‘‘).replace(‘);‘, ‘‘).replace(r‘\n‘,‘‘)
        comment_dict = json.loads(page)
        # 全部评论数
        commentCount = comment_dict[‘productCommentSummary‘][‘commentCount‘]
        # 好评数
        goodcommentCount = comment_dict[‘productCommentSummary‘][‘goodCount‘]
        # 好评率
        goodcommentRate = comment_dict[‘productCommentSummary‘][‘goodRate‘]

        print(f‘{dept_name} 第{i}页第{j}个商品:‘,[title,price,commentCount,goodcommentCount,goodcommentRate])
        result.append([title,price,commentCount,goodcommentCount,goodcommentRate])
        sleep(random.randrange(1,3))
    except Exception as e:
        print(f‘{dept_name} 第{i}页第{j}个商品爬取失败!‘)
        print(e)
        pass
    # return tag_list

def Save_Data(data,dept_name,dept):
    sheet = pd.DataFrame(data=data,columns=[‘商品标题‘,‘商品价格‘,‘评论数‘,‘好评数‘,‘好评率‘])
    sheet[‘小类名称‘] = dept_name
    sheet[‘小类‘] = dept
    sheet = sheet[[‘小类‘,‘小类名称‘,‘商品标题‘,‘商品价格‘,‘评论数‘,‘好评数‘,‘好评率‘]]
    return sheet



def Main():
    global result,dept_name
    table = pd.DataFrame()
    # Excel = pd.ExcelWriter(r‘C:\Users\1000138836\Desktop\result.xlsx‘)      # 结果输出文件
    dept_xlsx = pd.read_excel(r‘C:\Users\1000138836\Desktop\12.xlsx‘,sheet_name=‘中类小类分析‘)
    for dept_name in dept_xlsx[‘小类名称‘].values.tolist():
        result = []
        dept = dept_xlsx[‘小类‘].values.tolist()[dept_xlsx[‘小类名称‘].values.tolist().index(dept_name)]
        url_dept = quote(dept_name)
        for i in range(1,7):   # 总共爬取3页,京东商品搜索后先加载半页,向下滚再加载半页。上半页页码为1,下半页页码为2.
            url = f"https://search.jd.com/Search?keyword={url_dept}&wq={url_dept}&page={i}"
            result = Get_Data_M(url,i)
        sheet = Save_Data(result,dept_name,dept)
        table = pd.concat([table,sheet])
        sleep(random.randrange(5,10))
        print(f"已爬取完 {dept_name} 大类")
    table.to_excel(r‘C:\Users\1000138836\Desktop\result.xlsx‘,index=None)

if __name__ == ‘__main__‘:
    Main()

需要视频教程的小伙伴加群:1136192749

 

Python爬虫,京东商品详情爬取!

标签:页码   完成   输出   ica   requests   als   usg   div   rap   

原文地址:https://www.cnblogs.com/A3535/p/13596366.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!