当前位置:网站首页>爬虫学习知识

爬虫学习知识

2022-06-10 21:49:00 泸州彭于晏

beautifulSoup获取标签属性值

在这里插入图片描述

beautifulSoup获取标签值

使用.string获取内容

实例

py1.py文件

import requests
from bs4 import BeautifulSoup
import csv
import time

url = "https://book.douban.com/"
headers = {
    
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36 Edg/102.0.1245.33'}
resp = requests.get(url, headers=headers)
result = resp.text
# print(result)

# body = BeautifulSoup(result, 'lxml')
body = BeautifulSoup(result, 'html.parser')
# print(body)
carousel = body.find('div', class_='carousel')
# print(carousel)

slide_list = carousel.find('div', class_='slide-list')
# print(lists)
uls = slide_list.find_all('ul')
# print(uls)
# print(len(uls))

# 存放遍历出的数据
books = []
for ul in uls:
    lis = ul.find_all('li')
    # print(lis)
    # print(len(lis))
    for li in lis:
        a_s = li.find_all('a')  # 找到全部a标签
        # print(a_s[0])

        t_a = li.find_all('div', class_='title')[0].find('a')
        # print(t_a.string) # 获取a标签的内容
        
        # 找到当前下的所有a标签
        href_a = li.find_all('div', class_='title')[0].find('a')
        # print(href_a['href']) # 获取a标签属性值

        # 向book列表中添加数据
        books.append([t_a.string, href_a['href']])

        # 获取图片
        print(a_s[0])

        # 获取图片地址
        imgs = a_s[0].find('img')['src']
        # print(imgs)

        # 获取图片alt属性
        name = a_s[0].find('img')['alt']

        # 转换为二进制流
        img_resp = requests.get(imgs).content
        print(img_resp)
        # print(img_resp)

        # time.sleep(0.5)
        with open("./images/{}.jpg".format(name), "wb") as f:  # 文件写入
            f.write(img_resp)
            time.sleep(0.5)  # 每隔0.5秒下载一张图片放入D://情绪图片测试
        f.close()
        print("{}图片爬取成功!".format(name))




print(books)
print(len(books))

# 存入csv文件

with open('data.csv', 'w', encoding='utf-8') as csvfile:
    # 创建实例
    writer = csv.writer(csvfile)

    writer.writerow(['title','url'])
    for i in books:
        writer.writerow(i)

csvfile.close()


py2.py文件

import requests
from bs4 import BeautifulSoup
import csv

books = []

# 打开文件,将数据读出
with open('data.csv', 'r', encoding='utf-8') as csvfile:
    # 创建实例
    reader = csv.reader(csvfile)
    # print(reader)
    for row in reader:
        # print(row)
        books.append(row)
        # for i in row:
            # print(i)
            # books.append(i)

csvfile.close()
#
print(books[2:])
# print(len(books))

https://blog.csdn.net/m0_60964321/article/details/122269923?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522165470154616780366517015%2522%252C%2522scm%2522%253A%252220140713.130102334…%2522%257D&request_id=165470154616780366517015&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2alltop_click~default-2-122269923-null-null.142v11pc_search_result_control_group,157v13control&utm_term=python%E7%88%AC%E8%99%AB%E4%BF%9D%E5%AD%98%E5%9B%BE%E7%89%87&spm=1018.2226.3001.4187

原网站

版权声明
本文为[泸州彭于晏]所创,转载请带上原文链接,感谢
https://blog.csdn.net/qq_44242204/article/details/125194178