一个爬虫Demo(经典豆瓣TOP250)

 获取网页源码,拿想要的内容就完事了。

import xlwt
import requests
from bs4 import BeautifulSoup


def get_style():
    style = xlwt.XFStyle()

    alignment = xlwt.Alignment()
    alignment.horz = 0x02
    alignment.vert = 0x01
    style.alignment = alignment

    font = xlwt.Font()
    font.name = '仿宋'
    style.font = font

    return style


def get_excel(style):
    workbook = xlwt.Workbook(encoding='ascii')
    worksheet = workbook.add_sheet("MovieTop250")

    col_names = ["排名", "影名", "评论", "链接"]
    for i in range(len(col_names)):
        worksheet.write(0, i, col_names[i], style)

    worksheet.col(0).width = 100 * 20
    worksheet.col(1).width = 400 * 20
    worksheet.col(2).width = 1200 * 20
    worksheet.col(3).width = 650 * 20

    return workbook, worksheet


def crawler(worksheet, style):
    headers = {
        'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 '
    }

    count = 1
    for page_num in range(0, 250, 25):
        content = requests.get(f"https://movie.douban.com/top250?start={page_num}", headers=headers).text
        soup = BeautifulSoup(content, "html.parser")
        all_movies = soup.findAll("div", attrs={"class": "info"})
        for movie in all_movies:
            movie_name = movie.find("span", attrs={"class": "title"}).string
            comment = movie.find("span", attrs={"class": "inq"})
            movie_comment = "None" if comment is None else comment.string
            movie_link = movie.find("a").attrs["href"]

            worksheet.write(count, 0, count, style)
            worksheet.write(count, 1, f"《{movie_name}》", style)
            worksheet.write(count, 2, f"{movie_comment}", style)
            worksheet.write(count, 3, f"{movie_link}", style)
            count += 1


if __name__ == '__main__':
    _style = get_style()
    _workbook, _worksheet = get_excel(_style)
    crawler(_worksheet, _style)
    _workbook.save("MovieTop250.xls")
wAAACH5BAEKAAAALAAAAAABAAEAAAICRAEAOw==

相关推荐

  1. python爬虫爬取豆瓣TOP250用csv文件

    2024-03-11 11:00:04       6 阅读
  2. 使用爬虫爬取豆瓣电影Top250(方法二)

    2024-03-11 11:00:04       6 阅读
  3. 使用爬虫爬取豆瓣电影Top250(方法一)

    2024-03-11 11:00:04       9 阅读

最近更新

  1. TCP协议是安全的吗?

    2024-03-11 11:00:04       14 阅读
  2. 阿里云服务器执行yum,一直下载docker-ce-stable失败

    2024-03-11 11:00:04       16 阅读
  3. 【Python教程】压缩PDF文件大小

    2024-03-11 11:00:04       15 阅读
  4. 通过文章id递归查询所有评论(xml)

    2024-03-11 11:00:04       18 阅读

热门阅读

  1. 国产开源数据框openGauss安装与jdbc连接

    2024-03-11 11:00:04       19 阅读
  2. 企业微信HOOK接口Dll调用(4.1.16.6002版本)

    2024-03-11 11:00:04       21 阅读
  3. 美国站群服务器租用需要考虑哪些关键点

    2024-03-11 11:00:04       24 阅读
  4. vue如何优化首页加载速度

    2024-03-11 11:00:04       19 阅读
  5. 深入理解与使用go之中间件--实现

    2024-03-11 11:00:04       23 阅读
  6. IOS面试题object-c 71-80

    2024-03-11 11:00:04       18 阅读
  7. ssl域名转发配置

    2024-03-11 11:00:04       22 阅读
  8. git命令

    git命令

    2024-03-11 11:00:04      21 阅读
  9. 学习Android的第二十四天

    2024-03-11 11:00:04       20 阅读