营收与预测:线性回归建立预测收入水平的线性回归模型。

    科技2022-07-16  109

    1.获取数据

    特征含义、

    ## 获取数据 from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score import statsmodels.api as sm #是 Python 中一个强大的统计分析包,包含了回归分析、时间序列分析、假设检验等等的功能 import numpy as np import pandas as pd import matplotlib.pyplot as plt data = pd.read_table('C:/Users/lb/Desktop/test/earndata3.txt',sep='\t',engine="python",encoding = 'utf-8') data.columns.values data.head() # # 重命名 # data.rename(columns = {'类型':'type','盈利率':'profit','付费率':'pay','活跃率':'active','收入':'income','触达比例':'touch', # '转化比例':'conves','新增比例':'new','运营费用占比':'operate','服务费用占比':'servicce'},inplace = True) # data # 数据框操作,plt.rcParams设置图像细节,如图像大小,线条样式和宽度 # 绘制某两个维度的散点图 plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签 plt.rcParams['axes.unicode_minus']=False #用来正常显示负号 plt.scatter(data['feature35'],data['收入']) plt.xlabel('feature35') plt.ylabel('收入') plt.show()

    2.查看缺失值和填充办法

    #查看缺失值 na_num = pd.isna(data).sum() print(na_num) #缺失值填充 #fillna() #df['taixin'] = df['taixin'].fillna(df['taixin'] .mean()) #均值 #df['taixin'] = df['taixin'].fillna(df['taixin'] .mode()) #众数 # df['taixin'] = df['taixin'].interpolate() #插值法

    异常值 一般用箱线图

    #查看异常值 plt.boxplot(data['feature1']) plt.show()

    Seaborn是对matplotlib的extend,是一个数据可视化库,提供更高级的API封装,在应用中更加的方便灵活。 1.直方图和密度图 2.柱状图和热力图 3.设置图形显示效果 4.调色功能

    当Pearson相关系数低于0.4,则表明变量之间存在弱相关关系;当Pearson相关系数在0.4~0.6之间,则说明变量之间存在中度相关关系;当相关系数在0.6以上时,则反映变量之间存在强相关关系 .

    导出

    # 导出为文件 import csv outputpath='C:/Users/dell/Desktop/cor2.csv' data_cor.to_csv(outputpath,index=True,header=True) #绘制各变量之间的散点图 sns.pairplot(data) plt.show()

    #划分测试训练集 from sklearn.model_selection import train_test_split data_x = data.drop(['收入'],axis=1) data_y = data['收入'] train_x,test_x,train_y,test_y = train_test_split(data_x,data_y,test_size=0.3,random_state=6) train_x.head()

    3.这里把所有重新恢复成 从 0开始

    # #索引恢复 for i in [train_x,test_x]: i.index = range(i.shape[0]) train_x.head()

    导入linear_model模块,然后创建一个线性模型linear_model.LinearRegression,该线性回归模型创建有几个参数(可以通过help(linear_model.LinearRegression)来查看):

    LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)

    1、fit_intercept:bool量,选择是否需要计算截距,默认为True,如果中心化了的数据可以选择false

    2、normalize:bool量,选择是否需要标准化(中心化),默认为false,为true,表示标准化处理,一般不使用。可以自行使用standerdscaler

    3、copy_x: bool量,选择是否复制X数据,默认True,如果否,可能会因为中心化把X数据覆盖

    4、n_job:int量,选择几核用于计算,默认1,-1表示全速运转

    #建立线性回归模型 from sklearn.linear_model import LinearRegression #线性回归 model = LinearRegression() model.fit(train_x,train_y) display(model.intercept_) # 显示模型的截距 display(model.coef_)#显示模型的参数

    [*zip(train_x.columns,model.coef_)]

    [(‘feature1’, -1.306876920532666), (‘feature2’, -2.135359164945528), (‘feature3’, 0.3922366623031011), (‘feature4’, -0.4006529864411556), (‘feature5’, -0.800071692310333), (‘feature6’, -0.04600005945256574), (‘feature7’, -0.11265174384663018), (‘feature8’, 0.10045483109131019), (‘feature9’, -0.32704175764925286), (‘feature10’, 1.2434839400292559), (‘feature11’, 1.5832422299388336), (‘feature12’, -0.09226719574100156), (‘feature13’, -2.459151124321635), (‘feature14’, 1.9779923876600112), (‘feature15’, 0.024525225949885543), (‘feature16’, 0.00918442799938769), (‘feature17’, 0.006352020349345027), (‘feature18’, 1.6985207539315572), (‘feature19’, -4.9467995989630396e-05), (‘feature20’, -0.022589825867085835), (‘feature21’, 0.09947572093574145), (‘feature22’, -1.0843768519460424), (‘feature23’, -0.000538610562770904), (‘feature24’, 0.007229716791249371), (‘feature25’, 0.0011119599539866497), (‘feature26’, 0.23842187221124864), (‘feature27’, 0.026069170729882317), (‘feature28’, 0.00691494578802669), (‘feature29’, -0.0449676591248237), (‘feature30’, 0.0011027808324655089), (‘feature31’, -1.151930096574248), (‘feature32’, 0.001446787798073345), (‘feature33’, 0.012505109738047488), (‘feature34’, 0.3162910511343061), (‘feature35’, -0.4609002081574919), (‘feature36’, -0.03493518878291976), (‘feature37’, 0.000816129764761191), (‘feature38’, 1.4467629087041338), (‘feature39’, 0.038077869864662946), (‘feature40’, 2.4660343230998505e-05)]

    4.预测和实际图像

    # 预测与实际图像 pre_train = model.predict(train_x) # plt.plot(range(len(pre_train)),sorted(pre_train),label = 'yuce') # plt.plot(range(len(train_y)),sorted(train_y),label = 'shiji') plt.plot(range(len(pre_train)),pre_train,label = 'yuce') plt.plot(range(len(train_y)),train_y,label = 'shiji') plt.legend() plt.show()

    5.这样看不上很清楚 ,可以将预测值进行排序 看画图

    # 预测与实际图像 pre_train = model.predict(train_x) plt.plot(range(len(pre_train)),sorted(pre_train),label = 'yuce') plt.plot(range(len(train_y)),sorted(train_y),label = 'shiji') # plt.plot(range(len(pre_train)),pre_train,label = 'yuce') # plt.plot(range(len(train_y)),train_y,label = 'shiji') plt.legend() plt.show()

    6.模型评估

    from sklearn.metrics import mean_squared_error as MSE MSE(train_y,pre_train)

    from sklearn.model_selection import cross_val_score #出现负数则为损失 cross_val_score(model,train_x,train_y,cv=10,scoring = "r2").mean()

    交叉验证

    #使用sklearn来进行调用,计算MSE from sklearn.metrics import mean_squared_error mean_squared_error(train_y,pre_train) # 求R方 pre_y = model.predict(test_x) from sklearn.metrics import r2_score import statsmodels.api as sm score=r2_score(test_y,pre_y) #第一个是真实值,第二个预测值 score

    计算MSE

    cross_val_score(model,train_x,train_y,cv=10,scoring = "neg_mean_squared_error").mean()

    # 预测值与实际值 pre_test = model.predict(test_x) plt.plot(range(len(pre_test)),sorted(pre_test),label = 'yuce') plt.plot(range(len(test_y)),sorted(test_y),label = 'shiji') plt.legend() plt.show()

    7.多重共线性

    多重共线性检查的是自变量之间存在线性关系,存在多重共线性会导致变量的显著性检验将失去效果、OLS数据失真。一般使用方差膨胀因子来进行检测,若VIF>10,证明存在共线性,若存在多重共线性,可以选择删除变量或重新选择模型(LASSO)。 dmatrices :特征组合起来

    #多重共线性,自变量之间存在共线性 from patsy.highlevel import dmatrices from statsmodels.stats.outliers_influence import variance_inflation_factor Y,X = dmatrices( '收入~ feature1+feature2+feature3+feature4+feature5+feature6+feature7+feature8+feature9+feature10+feature11+feature12+feature13+feature14+feature15+feature16+feature17+feature18+feature19+feature20+feature21+feature22+feature23+feature24+feature25+feature26+feature27+feature28+feature29+feature30+feature31+feature32+feature33+feature34+feature35+feature36+feature37+feature38+feature39+feature40', data = data, return_type= 'dataframe') vif = pd.DataFrame() vif[ "VIF Factor"] = [variance_inflation_factor(X.values,i) for i in range(X.shape[1])] vif[ "features"] = X.columns vif

    VIF Factor features 0 8.244820 Intercept 1 1.159042 feature1 2 1.833402 feature2 3 1.062635 feature3 4 1.171333 feature4 5 1.024516 feature5 6 4.241079 feature6 7 9.112658 feature7 8 4.629342 feature8 9 4.295007 feature9 10 3.085991 feature10 11 1.106177 feature11 12 1.083720 feature12 13 1.073559 feature13 14 1.013818 feature14 15 1.070490 feature15 16 1.498616 feature16 17 1.248611 feature17 18 1.007976 feature18 19 1.095409 feature19 20 3.652704 feature20 21 3.985798 feature21 22 6.668252 feature22 23 3.120996 feature23 24 1.020012 feature24 25 2.170860 feature25 26 2.018375 feature26 27 1.926179 feature27 28 1.982204 feature28 29 1.492437 feature29 30 1.275029 feature30 31 5.298781 feature31 32 2.267253 feature32 33 2.655571 feature33 34 3.786994 feature34 35 6.317910 feature35 36 1.039711 feature36 37 3.315587 feature37 38 3.098461 feature38 39 1.313444 feature39 40 1.276520 feature40

    #强影响点,SR绝对值>3为相对大的影响点,对结果有比较大的影响点,若删除对模型会产生影响 from statsmodels.stats.outliers_influence import OLSInfluence model1 = sm.OLS(data_y,data_x).fit() OLSInfluence(model1).summary_frame().head() #离群点检测,原理群体的点,检测最大标准化残差值 model2 = sm.OLS(data_y,data_x).fit() outliers = model2.get_influence() outliers #高杠杆点,使用帽子统计量进行统计,>2(p+1)/n(其中p为自变量的个数,n为观测的个数) leverage = outliers.hat_matrix_diag #使用dffits距离来判断高杠杆点,DFFITS统计值大于2sqrt((p+1)/n)时 ,则认为该样本点可能存在异常 dffits = outliers.dffits[ 0] #学生化残差 resid_stu = outliers.resid_studentized_external #cook距离,值越大则为异常点的可能性就越高 cook = outliers.cooks_distance[ 0] #如果一个样本的covratio(协方差)值离数值1越远,则认为该样本越可能是异常值 covratio = outliers.cov_ratio #几种检测进行合并 contat1 = pd.concat([pd.Series(leverage, name = 'leverage'),pd.Series(dffits, name = 'dffits'), pd.Series(resid_stu,name = 'resid_stu'),pd.Series(cook, name = 'cook'), pd.Series(covratio, name = 'covratio'),],axis = 1) data_outliers = pd.concat([data,contat1], axis = 1) data_outliers.head()

    以学生会残差为2 为界限 计算离群值比例

    outliers_ratio = sum(np.where((np.abs(data_outliers.resid_stu)> 2), 1, 0))/data_outliers.shape[ 0] outliers_ratio

    去掉离群值

    data_outliers1 = data_outliers.loc[np.abs(data_outliers.resid_stu)<= 2,] data_outliers1

    # 剔除异常值和共线性之后重新建模 from sklearn.linear_model import LinearRegression #线性回归 data_x1 = data_outliers1.drop(['收入'],axis=1) data_y1 = data_outliers1['收入'] from sklearn.model_selection import train_test_split train_x1,test_x1,train_y1,test_y1 = train_test_split(data_x1,data_y1) #建立新的线性回归模型 model4 = LinearRegression() model4.fit(train_x1,train_y1) pre_y1 = model4.predict(test_x1) score=r2_score(test_y1,pre_y1) score

    sklearn.preprocessing.PolynomialFeatures,对特征进行构造,degree:控制多项式的次数

    #模型解释性 from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree = 2).fit(train_x) poly.get_feature_names()

    poly.get_feature_names(train_x.columns)

    把数据列放进去

    X_ = poly.transform(train_x) reg = LinearRegression().fit(X_ ,train_y) reg.score(X_ ,train_y)

    [*(zip(poly.get_feature_names(train_x.columns),reg.coef_))]

    本来都是以行所以先转置一下

    coeframe = pd.DataFrame([poly.get_feature_names(train_x.columns),reg.coef_]).T coeframe.columns = ['feature','coef'] coeframe.sort_values(by = 'coef').head(10)

    8.使用LASSO

    Lasso中最重要的参数,alpha : float, 可选,默认 1.0。当 alpha 为 0 时算法等同于普通最小二乘法,可通过 Linear Regression 实现,因此不建议将 alpha 设为 0.

    from sklearn.linear_model import Lasso model5 = Lasso(alpha=1.0) model5.fit(train_x,train_y) pre_y5 = model5.predict(test_x) score=r2_score(test_y,pre_y5) score

    class sklearn.linear_model.Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver=‘auto’, random_state=None)¶ alpha 正则化系数,较大的值指定更强的正则化,默认1.0,调整 fit_intercept 是否计算模型的截距,默认为True,计算截距 normalize 在需要计算截距时,如果值为True,则变量x在进行回归之前先进行归一化,果需要进行标准化则normalize=False copy_X 默认为True,将复制X;否则,X可能在计算中被覆盖。 max_iter 共轭梯度求解器的最大迭代次数 tol float类型,指定计算精度 solver 在计算过程中选择的解决器 ,可选svd(奇异值分解法),lsqr(最小二乘法),岭回归不用调

    9.使用岭回归

    # 使用岭回归 from sklearn.linear_model import Ridge model6 = Ridge(alpha=1.0) model6.fit(train_x,train_y) pre_y6 = model6.predict(test_x) score=r2_score(test_y,pre_y6) score

    10.lassocv

    #lassocv from sklearn.linear_model import LassoCV alpha = np.logspace(-10,-2,200,base=10) lasso_ = LassoCV(alphas = alpha ,cv =10).fit(train_x,train_y) lasso_.mse_path_ #mes

    最佳参数

    lasso_.alpha_ #最佳参数

    使用python自带波士顿房价做线性回归

    波士顿房价数据集来源于1978年美国某经济学杂志上。该数据集包含若干波士顿房屋的价格及其各项数据,每个数据项包含14个数据,分别是犯罪率、是否在河边和平均房间数等相关信息,其中最后一个数据是房屋中间价。

    import numpy as np import pandas as pd from pandas import Series,DataFrame import matplotlib.pyplot as plt %matplotlib inline import sklearn.datasets as datasets #导入数据 boston_dataset = datasets.load_boston() X_full = boston_dataset.data Y_full = boston_dataset.target boston = pd.DataFrame(X_full) boston.columns = boston_dataset.feature_names boston['PRICE'] = Y_full print(boston.head()) #查看数据前几行 # 数据分布 plt.scatter(boston.CHAS, boston.PRICE) plt.xlabel('CHAS') plt.ylabel('PRICE') plt.show() import seaborn as sns sns.set() sns.pairplot(boston) #划分测试与验证数据集 from sklearn.model_selection import train_test_split X_train,x_test,y_train,y_true = train_test_split(train,target,test_size=0.2) ##建立模型 from sklearn.linear_model import LinearRegression #线性回归 from sklearn.linear_model import Ridge # 岭回归 from sklearn.linear_model import Lasso # LASSO回归 from sklearn.linear_model import ElasticNet linear = LinearRegression() ridge = Ridge() lasso = Lasso() elasticnet = ElasticNet() #训练模型 linear.fit(X_train,y_train) ridge.fit(X_train,y_train) lasso.fit(X_train,y_train) elasticnet.fit(X_train,y_train) ##模型预测 y_pre_linear = linear.predict(x_test) y_pre_ridge = ridge.predict(x_test) y_pre_lasso = lasso.predict(x_test) y_pre_elasticnet = elasticnet.predict(x_test) ##计算分值 from sklearn.metrics import r2_score linear_score=r2_score(y_true,y_pre_linear) ridge_score=r2_score(y_true,y_pre_ridge) lasso_score=r2_score(y_true,y_pre_lasso) elasticnet_score=r2_score(y_true,y_pre_elasticnet) display(linear_score,ridge_score,lasso_score,elasticnet_score) ##对比 #Linear plt.plot(y_true,label='true') plt.plot(y_pre_linear,label='linear') plt.legend() #Ridge plt.plot(y_true,label='true') plt.plot(y_pre_ridge,label='ridge') plt.legend() #lasso plt.plot(y_true,label='true') plt.plot(y_pre_lasso,label='lasso') plt.legend() #elasticnet plt.plot(y_true,label='true') plt.plot(y_pre_elasticnet,label='elasticnet') plt.legend() if __name__ == "__main__": pass
    Processed: 0.010, SQL: 8