本文共 4295 字,大约阅读时间需要 14 分钟。
上一篇博客我们在 J u p y t e r N o t e b o o k Jupyter Notebook JupyterNotebook 使用了我们的随机梯度下降法,下面我们就将上一篇博客中的随机梯度下降法封装在 L i n e a R e g r e s s i o n LineaRegression LineaRegression 中,函数名为 fit_sgd。
# LinearRegression.pyimport numpy as npfrom metrics import r2_scoreclass LinearRegression: def __init__(self): """初始化 Linear Regression""" self.coef_ = None # 系数 self.interception_ = None # 截距 self._theta = None # θ def fit_normal(self, X_train, y_train): """根据训练数据集X_train,y_train训练Linear Regression模型""" assert X_train.shape[0] == y_train.shape[0], \ "the size of X_train must be equal to the size of y_train" X_b = np.hstack([np.ones((len(X_train), 1)), X_train]) # 在 X_train 前加一列 1 self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train) self.interception_ = self._theta[0] #截距 self.coef_ = self._theta[1:] #系数 return self def fit_gd(self, X_train, y_train, eta=0.01, n_iters=1e4): """根据训练数据集X_train,y_train,使用梯度下降法训练Linear Regression模型""" assert X_train.shape[0] == y_train.shape[0], \ "the size of X_train, y_train must be equal to the size of y_train" def J(theta, X_b, y): try: return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b) except: return float('inf') def dJ(theta, X_b, y): return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b) def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8): theta = initial_theta i_iter = 0 while i_iter < n_iters: gradient = dJ(theta, X_b, y) last_theta = theta theta = theta - eta * gradient if(abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon): break i_iter += 1 return theta X_b = np.hstack([np.ones((len(X_train), 1)), X_train]) initial_theta = np.zeros(X_b.shape[1]) self._theta = gradient_descent(X_b, y_train, initial_theta, eta) self.interception_ = self._theta[0] self.coef_ = self._theta[1:] return self def fit_sgd(self, X_train, y_train, n_iters=5, t0=5, t1=50): """根据训练数据集X_train,y_train, 使用梯度下降法训练LinearRegression模型""" assert X_train.shape[0] == y_train.shape[0], \ "the size of X_train must be equal to the size of y_train" assert n_iters >= 1 # 至少将样本看一次 def dJ_sgd(theta, X_b_i, y_i): return X_b_i.T,dot(X_b_i.dot(theta) - y_i) * 2 def sgd(X_b, y, initial_theta, n_iters, t0=5, t1=50): def learning_rate(t): return t0 / (t + t1) theta = initial_theta m = len(X_b) for i_iters in range(n_iters): #至少要将将我们的样本个数(m)看5遍 # 为了保证每一遍都能够遍历了所有的样本数,我们先将下标乱序,然后依次进行遍历 # 这样既保证了随机性,又保证了能够遍历到每一个样本 indexes = np.random.permutation(m) X_b_new = X_b[indexes] y_new = y[indexes] for i in randge(m): gradient = dJ_sgd(theta, X_b_new[i], y_new[i]) theta = theta - learning_rate(i_iters * m + i) * gradient return theta X_b = np.hstack([np.ones((len(X_train), 1)), X_train]) initial_theta = np.zeros(X_b.shape[1]) self._theta = sgd(X_b, y_train, initial_theta) self.interception_ = self._theta[0] self.coef_ = self._theta[1:] return self def predict(self, X_predict): """给定待预测数据集X_predict,返回表示X_predict的结果向量""" assert self.interception_ is not None and self.coef_ is not None, \ "must fit before predict!" assert X_predict.shape[1] == len(self.coef_), \ "the feature number of X_predict must be equal to X_train" X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict]) return X_b.dot(self._theta) def score(self, X_test, y_test): """根据测试数据集X_test和y_test确定当前模型的准确度""" y_predict = self.predict(X_test) return r2_score(y_test, y_predict) def __repr__(self): return "LinearRegression()"
下面我们就具体编程使用:
这篇博客主要实际的使用随机梯度下降法完成了我们的线性回归的过程,我在下一篇博客将对我之前学习的梯度下降法进行一个总结。与此同时,会讨论更多与梯度下降算法相关的内容。
具体代码见
转载地址:http://etoi.baihongyu.com/