python爬虫抓取新闻并且植入自己的mysql远程数据库内

python爬虫抓取新闻并且植入自己的mysql远程数据库内!这个代码是我自己写了很久才写好的,分享给大家。喜欢的点个赞。


# -*- coding: utf-8 -*-
from xml.etree import ElementTree as ET
import datetime
import random

import pymysql
from selenium import webdriver
from lxml import etree
from time import sleep
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By


def strreplace_v1(old_str, key, value):
    # 替换某个字符串的一个或某几个字符串
    new_str = old_str.replace(key, value)
    return new_str


def get_page_source_html(driver, urlinfo):
    driver.get(urlinfo)
    page_text = driver.page_source
    tree = etree.HTML(page_text)
    return tree


def get_page_source_etree(driver):
    page_text = driver.page_source
    tree = etree.HTML(page_text)
    return tree


def get_list_a(etree, xpathinfo):
    return etree.xpath(xpathinfo)


def get_news_title(etree, xpathino):
    return etree.xpath(xpathino)


def get_news_content(etree, xpathino):
    return etree.xpath(xpathino)


def get_news_publish(etree, xpathino):
    return etree.xpath(xpathino)


def getUA():
    uaList = [
        # 360
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        # chrome
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
        # "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",

        # firefox
        # "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
        "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:36.0) Gecko/20100101 Firefox/36.0",

        # ie11
        # "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
        # ie8
        # "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; 4399Box.1357; 4399Box.1253; 4399Box.1357)",

        # 2345王牌
        # "Chrome/39.0.2171.99 Safari/537.36 2345Explorer/6.5.0.11018",

        # 搜狗
        # "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0",
        # opera
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60"

    ]
    headers = random.choice(uaList)
    return headers


def get_desurl_list():
    # 所有的目标url集合。500个。
    urlinfo_list = []
    tree = ET.parse('sitemap.xml')
    url = tree.find("url")
    for rank in tree.iter('loc'):
        urlinfo_list.append(rank.text)
    return urlinfo_list


def createwailian(urlwllist, urlzhiru):
    str = strreplace_v1(urlzhiru, 'hrefinfo', urlwllist[random.randint(0, len(urlwllist))])
    return str


if __name__ == "__main__":
    allwlurllist = get_desurl_list()
    options = Options()
    options.add_argument("--disable-desktop-notifications")
    options.add_argument('User-Agent=%s' % getUA())
    # options.add_argument('--proxy-server={0}'.format('103.37.141.69:80'))
    # 创建浏览器对象
    driver = webdriver.Chrome(options=options)
    urlend = ""
    urlbegin = 'http://www.106ms.com/index.php?list=6-'
    for urlstart in range(1, 10):
        print('当前正访问:{0}'.format(urlbegin + str(urlstart)))
        driver.get(urlbegin + str(urlstart))
        # //*[@id="menu-item-10"]/a
        # 点击SEO基础知识链接
        # driver.find_element(By.XPATH, value='//*[@id="menu-item-5"]/a').click()
        # 存储a标签的集合
        list_a = []
        # xpath获取到的集合是一个存储了大量的webelment对象的集合,想具体拿到属性信息,还得再写标签自身的xpath语法。
        # //*[@id="moar"]/section[2]/div/div[1]/div[1]/dl/dd/h3/a
        a_list = get_list_a(get_page_source_etree(driver), '//*[@id="moar"]/section[2]/div/div/div/dl/dd/h3/a')
        sleep(1)
        for a in a_list:
            href = a.xpath('./@href')[0]
            list_a.append(href)
        print("当前页面获取a标签集合长度为{0}".format(len(list_a)))
        sleep(1)
        # 遍历当前list_a
        try:
            db = pymysql.Connect(
                host='8.142.*.*',  # 服务器ip地址
                port=3306,  # mysql默认端口号
                user="106iiaa",  # 用户名
                password="yrdsrootadmi3",  # 密码
                charset="utf8",  # 字符集
                db="hbdsa89aa"  # 数据库
            )
            cursor = db.cursor()
            #xuanyan = "<p>本平台所发布的部分公开信息来源于互联网,转载的目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点,本平台所提供的信息,只供参考之用。不保证信息的准确性、有效性、及时性和完整性。如有侵权请联系:[14878741214]删除,谢谢合作!</p>"
            #urlzhiru = '<p>网站入口1:<a href="hrefinfo" target="_blank">网站入口地址</a></p><p>网站入口2:<a href="http://diyigefan.com/" target="_blank">diyigefan.com</a></p>'
            readnumber = 0
            base_url = 'http://www.106ms.com'
            for newsurl in list_a:
                try:
                    wanquan = base_url + newsurl
                    # print(wanquan)
                    driver.get(wanquan)
                    # title = driver.find_element(By.CLASS_NAME, value='article-title').text
                    # content = driver.find_element(By.CLASS_NAME, value='article-content').text
                    # newscontent = driver.find_element(By.XPATH, value='//div[@class="news_txt"]')
                    #newscontentfordes = driver.find_element(By.CLASS_NAME, value='news_txt').text
                    # /html/head/meta[6]
                    newscontentfordes = driver.find_element(By.XPATH, value='/html/head/meta[6]')
                    # 使用beautifulsoup封装html源码信息,然后开始提取内容。
                    soup = BeautifulSoup(driver.page_source, features='lxml', from_encoding='utf-8')
                    for s in soup('img'):
                        s.extract()
                    for s in soup('a'):
                        s.extract()
                    # 已经提前清除了img标签和a标签了
                    allp = soup.find("article", {"class": "content text-left"}).findAll('div')
                    paragraphs = []
                    for x in allp:
                        paragraphs.append(str(x))
                    # 去掉最后一个元素的值。
                    content2 = ''.join(paragraphs[0:-1])
                    # print(content2)
                    # 标题信息
                    title = driver.find_element(By.XPATH, value='//*[@id="moar"]/section[2]/div/div/div/article/header/h2').text
                    keywords = title
                    # content = content2  + createwailian(allwlurllist, urlzhiru)
                    content = content2
                    # content = newscontent + xuanyan
                    des = str(newscontentfordes.strip())[0:120]
                    sql = 'insert into news (title, keywords, des,content, author,publish,click,state,attr,attrdiy,flag,cate,uid) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
                    values = (
                        title, keywords, des, content, 'admin',
                        str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
                        random.randint(100, 999),
                        0, 0, 0, '画室新闻', 5, 3)
                    cursor.execute(sql, values)
                    db.commit()
                    readnumber = readnumber + 1
                    print("标题:{0}---插入数据库成功".format(title))
                except Exception as ee:
                    print('发生了异常', ee)
                    continue
        except Exception as e:
            # print('发生了异常', e)
            db.rollback()
        finally:
            cursor.close()
            db.close()
        sleep(5)

        print('本次任务成功植入{0}篇软文.'.format(readnumber))
        sleep(600)
    driver.quit()

前提是,你需要提前安装好里面使用到了一些插件包。

否则是会报错的。

相关推荐

  1. 基于python新闻爬虫

    2024-02-06 03:16:01       59 阅读
  2. 如何看待微芯片

    2024-02-06 03:16:01       30 阅读
  3. 利用python抓取小说,爬虫抓取小说

    2024-02-06 03:16:01       30 阅读
  4. Python网络爬虫项目开发实战:怎么解决数据抓取

    2024-02-06 03:16:01       34 阅读

最近更新

  1. docker php8.1+nginx base 镜像 dockerfile 配置

    2024-02-06 03:16:01       94 阅读
  2. Could not load dynamic library ‘cudart64_100.dll‘

    2024-02-06 03:16:01       101 阅读
  3. 在Django里面运行非项目文件

    2024-02-06 03:16:01       82 阅读
  4. Python语言-面向对象

    2024-02-06 03:16:01       91 阅读

热门阅读

  1. Linux cp命令(cp指令)解析

    2024-02-06 03:16:01       50 阅读
  2. 每日一题 力扣1696跳跃游戏

    2024-02-06 03:16:01       60 阅读
  3. 【数学1】基础数学问题

    2024-02-06 03:16:01       60 阅读
  4. 【Android】代码混淆简单介绍

    2024-02-06 03:16:01       63 阅读
  5. 异或加密原理及简单应用(C语言版)

    2024-02-06 03:16:01       56 阅读
  6. Docker Compose下载

    2024-02-06 03:16:01       52 阅读
  7. 【lesson12】高并发内存池项目最终完整版代码

    2024-02-06 03:16:01       45 阅读
  8. Simulink仿真中Simulink.ConfigSet用法

    2024-02-06 03:16:01       53 阅读
  9. 流量控制原理

    2024-02-06 03:16:01       60 阅读
  10. Android~集成opencv问题

    2024-02-06 03:16:01       50 阅读
  11. 蓝桥杯刷题day05——2023

    2024-02-06 03:16:01       53 阅读
  12. 【C语言】语句细节理解 超详细 易懂简单

    2024-02-06 03:16:01       54 阅读
  13. Flink-1.18.1环境搭建

    2024-02-06 03:16:01       60 阅读
  14. element-plus 更换主题色

    2024-02-06 03:16:01       56 阅读
  15. C Primer Plus(第六版)15.9 编程练习 第6题

    2024-02-06 03:16:01       49 阅读
  16. ES6-let

    ES6-let

    2024-02-06 03:16:01      37 阅读