梯度下降上课代码
import numpy as np
import matplotlib.pyplot as plt
x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]
w = 1.0
def forward(x):
return x * w
def cost(xs,ys):
cost = 0
for x,y in zip(xs,ys):
y_pred = forward(x)
cost += (y_pred -y) ** 2
return cost / len(xs) #计算MSE
def gradient(xs,ys):
grad = 0
for x,y in zip(xs,ys):
grad += 2 * x * (x * w - y) #相应的梯度落差式(即导数式)
return grad / len(xs)
mse_list = []
print('Predict (befortraining)',4,forward(4))
for epoch in range(100):
cost_val = cost(x_data,y_data) #便于画图
grad_val = gradient(x_data,y_data)
w -= 0.01 * grad_val
mse_list.append(cost_val)
print('Epoch:',epoch,'w=',w,'loss=',cost_val)
print('Predict(after training)',4,forward(4))
w_list=np.arange(0,100,1)
plt.plot(w_list,mse_list)
plt.ylabel('cost')
plt.xlabel('times')
plt.show()
随机梯度下降
只需要求出一个样本的均值
相应代码修改如下
import numpy as np
import matplotlib.pyplot as plt
x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]
w = 1.0
def forward(x):
return x * w
def loss(x,y):
y_pred = forward(x)
return y_pred * y_pred
def gradient(x,y):
return 2 * x * (x * w -y)
print('Predict (befortraining)',4,forward(4))
for epoch in range(100):
for x,y in zip(x_data,y_data):
grad = gradient(x,y)
w -= 0.01 * grad
print("\tgrad:",x,y,grad)
l = loss(x,y)
print('Predict(after training)',4,forward(4))