线性回归python代码实现
生活随笔
收集整理的這篇文章主要介紹了
线性回归python代码实现
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
import numpy as np
import matplotlib.pyplot as pltplt.rcParams['font.sans-serif']=['SimHei'] #顯示中文標簽input_x = [0.50,0.75,1.00,1.25,1.50,1.75,1.75,2.00,2.25,2.50,2.75,3.00,3.25,3.50,4.00,4.25,4.50,4.75,5.00,5.50]
input_y = [10, 26, 23, 43, 20, 22, 43, 50, 62, 50, 55, 75, 62, 78, 87, 76, 64, 85, 90, 98]
input_x = np.array(input_x, dtype=float)
input_y = np.array(input_y, dtype=float)a = 0
b = 0class linear_regression:'''線性回歸類'''def __init__(self, a_init, b_init, x, y):self.a = a_init #初始化參數aself.b = b_init #初始化參數bself.x = x #數據xself.y = y #標簽y# self.epochs = int(epochs) #迭代的輪數self.shape = self.x.shape[0] #數據的個數def model(self):'''計算預測值'''return self.a*self.x + self.bdef cost_function(self):'''損失函數'''return 0.5/self.shape * (np.square(self.y-self.a*self.x-self.b)).sum()def optimize(self):'''梯度下降更新參數a和b'''alpha = 1e-1y_hat = self.model()da = (1.0/self.shape) * ((y_hat-self.y)*self.x).sum() #a的梯度db = (1.0/self.shape) * (y_hat-self.y).sum() #b的梯度self.a = self.a - alpha*da #梯度下降self.b = self.b - alpha*db #梯度下降return self.a, self.bdef iterate(self):'''迭代更新'''i = 0loss_list=[]while(True):self.a, self.b = self.optimize()print("a:",self.a, " b:",self.b)print("loss:",self.cost_function())loss_list.append(self.cost_function())if i>=1 and np.abs(loss_list[i]-loss_list[i-1]) < 1e-8:breaki = i+1y_hat = self.model()loss = self.cost_function()plt.scatter(self.x,self.y,color='red')plt.plot(self.x, y_hat,color="blue")plt.xlabel('x')plt.ylabel('y')plt.show()if __name__ == '__main__':# print("請輸入迭代輪數:")# epochs = input()result = linear_regression(a, b, input_x, input_y)result.iterate()
總結
以上是生活随笔為你收集整理的线性回归python代码实现的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 生成技术在人工智能平台中的应用探索
- 下一篇: python中.append()和.ex