python第一次爬虫

    科技2024-03-25  90

    from bs4 import BeautifulSoup import re import urllib.request, urllib.error import xlwt import sqlite3 import random import requests import urllib import socket import MySQLdb def main(): baseurl = "https://www.123.top/?type=productinfo&id=" datalist = getData(baseurl) # savepath = ".\\豆瓣.xls" # saveData(savepath) # findImgSrc = re.compile(r'<img class="img-responsive" src="(.*?)"') findName = re.compile(r'<h2 class="c_38485a f18">(.*)</h2>') db = MySQLdb.connect("47. 3", " h", "1 23", "hi h", charset='utf8') # html = askURL("https://www.c2.top/?type=productinfo&id=106") # bs = BeautifulSoup(html, 'html.parser') # img = bs.select(".img-responsive") # is = re.findall(findImgSrc) # print(img.get_text() ) def get_ua(): user_agents = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60', 'Opera/8.0 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0', 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2 ', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', # over 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0) ', "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0" ] user_agent = random.choice(user_agents) # random.choice(),从列表中随机抽取一个对象 return user_agent def getData(baseurl): datalist = [] html = askURL(baseurl) # 100 start 1430 end for i in range(100, 1430): try: url = baseurl + str(i) html = askURL(url) bs = BeautifulSoup(html, 'html.parser') img = bs.select(".img-responsive") src = 'https://www.123.top/' + img[0].get('src') # 封面 text = bs.select(".c_38485a") name = re.findall(findName, str(text[0]))[0] # 标题 title = bs.select(".course-info>div>a") type = [] # 分类 for item in title: type.append(item.get_text()) info = bs.select(".main") # 信息 print('id为{0}的数据内容爬取中'.format(i)) # print(src) # print(name) str1 = ','.join(type) # print(str1) # print(info[0]) delpl = bs.select(".shoplist")[0] # 删除评论内容 delpl2 = bs.select(".wrapbox")[0] # 删除评论内容2 # print(info[0]) # print(delpl) delinfo = str(info[0]).replace(str(delpl), '') delinfo2 = delinfo.replace('本商品可参与分享赚佣金计划 【佣金0.5元】', '') # print(delinfo2) delinfo3 = delinfo2.replace('点击参与', '') # print(delinfo3) delinfo4 = delinfo3.replace(str(delpl2), '') # print(delinfo4) str2 = delinfo4.replace("'", '') # print(str2) str3 = str2.replace('"', '^') # print(str3) # print(str3) # print(delinfo4) cursor = db.cursor() print(1) sql = "INSERT INTO newgood(cid,face,name,type,info)VALUES(%s,'%s','%s','%s','%s')" % (i, src, name, str1, str3) # try: print(sql) # # 执行sql语句 cursor.execute(sql) # # 提交到数据库执行 db.commit() # except: # # Rollback in case there is any error # db.rollback() # # # 关闭数据库连接 # db.close() except: print('id为{0}的数据爬取失败'.format(i)) # 写入新的数据库 找有的游戏 cursor = db.cursor() sql = "INSERT INTO bad(cid)VALUES (%s)" % (i) # try: # # 执行sql语句 cursor.execute(sql) # # 提交到数据库执行 db.commit() # return datalist # # 得到网页类容 def askURL(url): head = { 'User-Agent': get_ua() # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36" # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0" # "User-Agent": "Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50" } # print(get_ua()) request = urllib.request.Request(url, headers=head) html = "" try: response = urllib.request.urlopen(request, timeout=random.randint(1,60)) html = response.read().decode("utf-8") # print(html) except urllib.error.URLError as e: if hasattr(e, 'code'): print(e.code) if hasattr(e, 'reason'): print(e.reason) return html def saveData(savepath): print('save...') if __name__ == "__main__": main()
    Processed: 0.011, SQL: 8