实现sigmoid函数
def sigmoid(z):
return 1/(1+np.exp(-z))
实现假设函数
def hypothesis(X,theta):
z = np.dot(X,theta)
return sigmoid(z)
损失函数
def loss(X,y,theta):
m = X.shape[0]
y_ = hypothesis(X,theta)
loss_f = -y*np.log(y_)-(1-y)*np.log(1-y_)
return np.sum(loss_f)/m
梯度下降法求解
def gradientDescent(X,y,theta,iterations,alpha):
#取数据条数
m = X.shape[0]
#在x最前面插入全1的列
X = np.hstack((np.ones((m, 1)), X))
for i in range(iterations):
for j in range(len(theta)):
theta[j] = theta[j]-(alpha/m)*np.sum((hypothesis(X,theta) - y)*X[:,j].reshape(-1,1))
#每迭代1000次输出一次损失值
if(i%10000==0):
print('第',i,'次迭代,当前损失为:',loss(X,y,theta),'theta=',theta)
return theta
n = X.shape[1]#特征数
theta = np.zeros(n+1).reshape(n+1, 1) # theta是列向量,+1是因为求梯度时X前会增加一个全1列
iterations = 250000
alpha = 0.009
gradientDescent(X,y,theta,iterations,alpha)
转载请注明原文地址:https://blackberry.8miu.com/read-875.html