个人常用代码

    科技2022-08-17  130

    个人常用功能的代码

    正则

    re.findall(r'src=(.*?).jpg',st,re.S|re.M)#返回列表 #-----------去掉不要的内容------------ import re str = "*ssdjih256456/*我是" str = re.sub("[A-Za-z0-9\?\、\╲\/\*\\\”、<\>\|]", "", str) print(str) 》》我是

    取值分为三段

    str=你是我 head,sep,tail=str.partition('是')

    以是为中间分开三段

    取str含有的参数递归

    [file for file in files if file.endswith('.xls') or file.endswith('.xlsx')]

    随机函数

    import random random.randint(5,50)#随机数字 slice = random.sample(list, 5) #从list中随机获取5个元素,作为一个片断返回

    对浏览器进行截图,然后切图

    在这里插入代码片def paqu(): global yeshu,driver from PIL import Image from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC chromedriver_path = r"C:\Users\Dream\AppData\Local\Programs\Python\Python37\chromedriver.exe" # chromedriver的路径 print(f'爬取{zhuang[zhuanghao]}图片') # 创建chrome参数对象 options = webdriver.ChromeOptions() options.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错 options.add_argument('--window-size=1920,1080') # 指定浏览器窗口大小 options.add_argument('--start-maximized') # 浏览器窗口最大化 options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面 # options.add_argument('--blink-settings=imagesEnabled=false') # 不加载图片,加快访问速度 options.add_argument('--headless') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败 options.add_argument('test-type') options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors", "enable-automation"]) # 此步骤很重要,设置为开发者模式,防止被各大网站识别出来使用了Selenium # options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2}) # 不加载图片,加快访问速度 driver = webdriver.Chrome(options=options, executable_path=chromedriver_path) driver.get('https://api.hzfc.cn/hzfcweb_ifs/interaction/'+zhuang_url[zhuanghao]) # print(driver.title)#打印标题 html=etree.HTML(str(driver.page_source)) try: yeshu=html.xpath('//em/@data-total-pages')[0]#页码,图片一共多少页 yeshu=int(yeshu) except: print('页数为1') yeshu=1 for ye in range(int(yeshu)): # time.sleep(1) baidu_img = WebDriverWait(driver, 20).until( EC.presence_of_element_located((By.CSS_SELECTOR, 'div.sxqk > img')) ) # driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')#将画面向下拉动至最下方 # driver.execute_script("window.scrollBy(0,22)") driver.save_screenshot(f".\\输出\\{kaifa}\\{zhuang_name}\\quantu\\quanye-{ye}.png") # 对整个浏览器页面进行截图 left = baidu_img.location['x'] top = baidu_img.location['y'] right = baidu_img.location['x'] + baidu_img.size['width'] bottom = baidu_img.location['y'] + baidu_img.size['height'] im = Image.open(f".\\输出\\{kaifa}\\{zhuang_name}\\quantu\\quanye-{ye}.png") im = im.crop((left, top, right, bottom)) # 对浏览器截图进行裁剪 im.save(f".\\输出\\{kaifa}\\{zhuang_name}\\jietu\\jietu-{ye}.png") driver.find_element_by_class_name('next_page').click() info_1 = (f'共{yeshu}页,第{ye+1}页存储成功' print(f"\r{info_1}",end="") print("图片爬取完成") paqu() driver.quit()

    对浏览器进行长截图

    from selenium import webdriver from selenium.webdriver.chrome.options import Options import os import time def get_image(url, pic_name): #chromedriver的路径 chromedriver = r"C:\Users\Dream\AppData\Local\Programs\Python\Python37\chromedriver.exe" os.environ["webdriver.chrome.driver"] = chromedriver #设置chrome开启的模式,headless就是无界面模式 #一定要使用这个模式,不然截不了全页面,只能截到你电脑的高度 chrome_options = Options() chrome_options.add_argument('headless') chrome_options.add_argument('--hide-scrollbars') driver = webdriver.Chrome(chromedriver,chrome_options=chrome_options) #控制浏览器写入并转到链接 driver.get(url) time.sleep(1) #接下来是全屏的关键,用js获取页面的宽高,如果有其他需要用js的部分也可以用这个方法 width = driver.execute_script("return document.documentElement.scrollWidth") height = driver.execute_script("return document.documentElement.scrollHeight") print(width,height) #将浏览器的宽高设置成刚刚获取的宽高 driver.set_window_size(width, height) time.sleep(1) driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')#将画面向下拉动至最下方 time.sleep(1) driver.execute_script('window.scrollTo(0, 0)')#将画面向下拉动至上 time.sleep(1) #截图并关掉浏览器 driver.save_screenshot(pic_name) driver.close() #你输入的参数 url = 'https://www.aes.com/category/community-stories/community-economic-growth/diversity-inclusion/' pic_name = r'image.png' get_image(url, pic_name)

    对浏览器进行截图

    from selenium import webdriver chromedriver_path = r"C:\Users\Dream\AppData\Local\Programs\Python\Python37\chromedriver.exe" # chromedriver的路径 # 创建chrome参数对象 options = webdriver.ChromeOptions() options.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错 options.add_argument('--window-size=1920,8080') # 指定浏览器窗口大小 options.add_argument('--start-maximized') # 浏览器窗口最大化 options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面 # options.add_argument('--blink-settings=imagesEnabled=false') # 不加载图片,加快访问速度 options.add_argument('--headless') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败 options.add_argument('test-type') options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors", "enable-automation"]) # 此步骤很重要,设置为开发者模式,防止被各大网站识别出来使用了Selenium # options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2}) # 不加载图片,加快访问速度 driver = webdriver.Chrome(options=options, executable_path=chromedriver_path) driver.get('https://www.baidu.com') driver.save_screenshot(f"quanye.png") # 对整个浏览器页面进行截图 print("图片爬取完成") driver.quit()

    读取excel表前10行和后10行

    import pandas as pd movies_df = pd.read_excel("2018 Shanghai University Computer Rank Examination.xls") shuju=movies_df.head(10) print('前10行') print(shuju) shuju=movies_df.head(-10) print('后10行') print(shuju)

    饼状曲线图显示

    import matplotlib.pyplot as plt plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签 labels = ['合格','优秀','不合格'] sizes = [63,16,30] plt.pie(sizes, labels=labels, autopct='%1.1f%%') plt.title("旅游英语专业2019年计算机等级考试统计情况") plt.show()

    QT模块进行数据折线图显示

    from pyqtgraph.Qt import QtGui, QtCore import pyqtgraph as pg # 创建 PlotWidget 对象 pw = pg.plot() # 设置图表标题、颜色、字体大小 pw.setTitle("气温趋势",color='008080',size='12pt') # 背景色改为白色 pw.setBackground('w') # 显示表格线 pw.showGrid(x=True, y=True) # 设置上下左右的label # 第一个参数 只能是 'left', 'bottom', 'right', or 'top' pw.setLabel("left", "气温(摄氏度)") pw.setLabel("bottom", "时间") # 设置Y轴 刻度 范围 pw.setYRange(min=-10, # 最小值 max=50) # 最大值 # 创建 PlotDataItem ,缺省是曲线图 curve = pw.plot( pen=pg.mkPen('b')) # 线条颜色 hour = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] temperature = [30, 32, 34, 32, 33, 31, 29, 32, 35, 45] curve.setData(hour, # x坐标 temperature # y坐标 ) QtGui.QApplication.instance().exec_()

    QT多线程任务,防止界面卡顿

    from PySide2.QtWidgets import QApplication, QMainWindow, QPushButton, QPlainTextEdit,QMessageBox import time from threading import Thread from PySide2.QtCore import Signal,QObject class MySignals(QObject): text_print = Signal(str) # 实例化 global_ms = MySignals() def handleCalc1(): # global button # button.setEnabled(False) info = textEdit.toPlainText() print(info) for i in range(5): global_ms.text_print.emit( f'输出内容{i}') time.sleep(0.5) # int('我') # button.setEnabled(True) def threadFunc(): print('子线程 开始') handleCalc1() print('子线程 结束') def handleCalc(): thread = Thread(target=threadFunc ) thread.start() # 自定义信号的处理函数 def printToGui(text): textEdit.appendPlainText(str(text)) global_ms.text_print.connect(printToGui) app = QApplication([]) window = QMainWindow() window.resize(500, 400) window.move(300, 300) window.setWindowTitle('薪资统计') textEdit = QPlainTextEdit(window) textEdit.setPlaceholderText("请输入薪资表") textEdit.move(10,25) textEdit.resize(300,350) button = QPushButton('统计', window) button.move(380,80) button.clicked.connect(handleCalc) window.show() app.exec_()

    爬取天气预报代码

    import requests,csv from bs4 import BeautifulSoup #网页请求 def qingqiu(diqu): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36', } response = requests.get(f'http://www.weather.com.cn/textFC/{diqu}.shtml', headers=headers, verify=False) #指定编码 print(response,response.url) response=response.content html = response.decode('utf-8') return html #解析数据 def jiexi(html): #开始解析 soup = BeautifulSoup(html,'html5lib') #找到第一级html soup_fu = soup.find('div',class_="conMidtab") # print(soup_fu) #找到每一项的 tables = soup_fu.find_all("table") # print(tables) for table in tables: #过滤掉前两个tr trs=table.find_all('tr')[2:] # print(trs) for tr in trs: tds=tr.find_all('td') # print(tds) # #城市 city_td=tds[-8] city_cs = list(city_td.stripped_strings)[0] # print(city) #天气气象 city_td=tds[-4] city_tqqx = list(city_td.stripped_strings)[0] # print(city) #风向风力 city_td=tds[-3] city_fxfl = list(city_td.stripped_strings)[0] # print(city) #最低气温 city_td=tds[-2] city_zdqw = list(city_td.stripped_strings)[0] # print(city) #创建数据字典 tianqiyubao = { } tianqiyubao['城市'] = city_cs tianqiyubao['天气现象'] = city_tqqx tianqiyubao['风向风力'] = city_fxfl tianqiyubao['最低气温'] = city_zdqw tainqi_lists.append(tianqiyubao) #存储数据 def writes(tainqi_lists): with open('tianqi.csv','w',newline='',encoding='utf-8-sig')as f: writer = csv.DictWriter(f , fieldnames=['城市','天气现象','风向风力','最低气温']) writer.writeheader() for shuju in tainqi_lists: writer.writerow(shuju) if __name__ == "__main__": #创建数据列表 tainqi_lists=[] diqus=['hb','db','hd','hz','hn','xb','xn','gat'] for diqu in diqus: res=qingqiu(diqu) jiexi(res) writes(tainqi_lists)

    爬取豆瓣 top250代码

    import requests,csv from lxml import etree # 请求页面 def qinqiuyemian(yeshu): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36', 'Referer': 'https://movie.douban.com/top250', } params = ( ('start', yeshu), ('filter', ''), ) response = requests.get('https://movie.douban.com/top250', headers=headers, params=params) if response.status_code == 200 : return response #爬取电影名字、作者、评分、推荐语 def xuanzeshuju(soures): res=soures.text html = etree.HTML(res) #选择父级 html_top = html.xpath('//div[@class="info"]') #循环出每一个电影信息 for one_dy in html_top: #创建一个字典存储电影 moviedict = { } #电影名字 dy_mane = one_dy.xpath('div[@class="hd"]/a/span/text()')#[0].strip() # print(dy_mane) #电影链接 dy_url = one_dy.xpath('div[@class="hd"]/a/@href')[0].strip() # print(dy_mane) #电影导演 dy_daoyan = one_dy.xpath('div[@class="bd"]/p/text()')[0].strip() # print(dy_daoyan) #电影评分 dy_pingfen = one_dy.xpath('div[@class="bd"]/div[@class="star"]/span[@class="rating_num"]/text()')[0].strip() # print(dy_pingfen) #电影评价 dy_pingjia = one_dy.xpath('div[@class="bd"]/div[@class="star"]/span[4]/text()')[0].strip() # print(dy_pingjia) #电影推荐于 try: dy_tuijianyu = one_dy.xpath('div[@class="bd"]/p[@class="quote"]/span[@class="inq"]/text()')[0].strip() except IndexError: dy_tuijianyu = '无推荐语' # print(dy_tuijianyu) moviedict['名称'] = ''.join(dy_mane) moviedict['url'] = dy_url moviedict['导演'] = dy_daoyan moviedict['评分'] = dy_pingfen moviedict['评价'] = dy_pingjia moviedict['推荐语'] = dy_tuijianyu #加入列表 movielist.append(moviedict) print(movielist) #保存数据 def writedata(movielist): with open('douban.csv','w',newline='',encoding='utf-8-sig')as f: #写入表头 writer = csv.DictWriter(f,fieldnames=['名称','导演','评分','评价','推荐语','url']) writer.writeheader() #从列表当中遍历出数据 for shuju in movielist: writer.writerow(shuju) if __name__ == "__main__": #创建电影列表 movielist = [] for yeshu in range(0,260,25): soures=qinqiuyemian(yeshu) xuanzeshuju(soures) writedata(movielist)
    Processed: 0.048, SQL: 9