博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
COMP7404 Machine Learing——Adaline & Gradient Scent
阅读量:2135 次
发布时间:2019-04-30

本文共 10115 字,大约阅读时间需要 33 分钟。

 

 

import pandas as pdimport numpy as npimport matplotlib.pyplot as pltdf = pd.read_csv('https://www.dropbox.com/s/mqyxvm8z2v1a20g/iris.data?dl=1', header=None)y = df.iloc[0:100, 4]y = np.where(y == 'Iris-setosa', -1, 1)X = df.iloc[0:100, [0, 2]].valuesclass AdalineGD(object):    def __init__(self, eta=0.01, n_iter=50, random_state=1):        self.eta = eta        self.n_iter = n_iter        self.random_state = random_state    def fit(self, X, y):        #initiate weights        rgen = np.random.RandomState(self.random_state)        self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])                self.cost_ = []        for i in range(self.n_iter):            net_input = self.net_input(X)            output = self.activation(net_input)            errors = (y - output)            self.w_[1:] += self.eta * X.T.dot(errors)            self.w_[0] += self.eta * errors.sum()            cost = (errors**2).sum() / 2.0            self.cost_.append(cost)        return self        def net_input(self, X):        return np.dot(X, self.w_[1:]) + self.w_[0]    def activation(self, X):        return X    def predict(self, X):        return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)ada = AdalineGD(n_iter=30, eta=0.0001).fit(X, y)print(ada.cost_)

 

import pandas as pdimport numpy as npimport matplotlib.pyplot as pltdf = pd.read_csv('https://www.dropbox.com/s/mqyxvm8z2v1a20g/iris.data?dl=1', header=None)y = df.iloc[0:100, 4]y = np.where(y == 'Iris-setosa', -1, 1)X = df.iloc[0:100, [0, 2]].valuesclass AdalineGD(object):    def __init__(self, eta=0.01, n_iter=50, random_state=1):        self.eta = eta        self.n_iter = n_iter        self.random_state = random_state    def fit(self, X, y):        #initiate weights        rgen = np.random.RandomState(self.random_state)        self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])                self.cost_ = []        for i in range(self.n_iter):            net_input = self.net_input(X)            output = self.activation(net_input)            errors = (y - output)            self.w_[1:] += self.eta * X.T.dot(errors)            self.w_[0] += self.eta * errors.sum()            cost = (errors**2).sum() / 2.0            self.cost_.append(cost)        return self        def net_input(self, X):        return np.dot(X, self.w_[1:]) + self.w_[0]    def activation(self, X):        return X    def predict(self, X):        return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)ada = AdalineGD(n_iter=30, eta=0.0001).fit(X, y)print(ada.cost_)fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')ax[0].set_xlabel('Epochs')ax[0].set_ylabel('log(Sum-squared-error)')ax[0].set_title('Adaline - Learning rate 0.01')ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(X, y)ax[1].plot(range(1, len(ada2.cost_) + 1), ada2.cost_, marker='o')ax[1].set_xlabel('Epochs')ax[1].set_ylabel('Sum-squared-error')ax[1].set_title('Adaline - Learning rate 0.0001')plt.show()

 

Improving gradient descent through feature scaling

import pandas as pdimport numpy as npimport matplotlib.pyplot as pltfrom matplotlib.colors import ListedColormapdf = pd.read_csv('https://www.dropbox.com/s/mqyxvm8z2v1a20g/iris.data?dl=1', header=None)y = df.iloc[0:100, 4]y = np.where(y == 'Iris-setosa', -1, 1)X = df.iloc[0:100, [0, 2]].valuesclass AdalineGD(object):    def __init__(self, eta=0.01, n_iter=50, random_state=1):        self.eta = eta        self.n_iter = n_iter        self.random_state = random_state    def fit(self, X, y):        #initiate weights        rgen = np.random.RandomState(self.random_state)        self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])                self.cost_ = []        for i in range(self.n_iter):            net_input = self.net_input(X)            output = self.activation(net_input)            errors = (y - output)            self.w_[1:] += self.eta * X.T.dot(errors)            self.w_[0] += self.eta * errors.sum()            cost = (errors**2).sum() / 2.0            self.cost_.append(cost)        return self        def net_input(self, X):        return np.dot(X, self.w_[1:]) + self.w_[0]    def activation(self, X):        return X    def predict(self, X):        return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)ada = AdalineGD(n_iter=30, eta=0.0001).fit(X, y)print(ada.cost_)fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')ax[0].set_xlabel('Epochs')ax[0].set_ylabel('log(Sum-squared-error)')ax[0].set_title('Adaline - Learning rate 0.01')ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(X, y)ax[1].plot(range(1, len(ada2.cost_) + 1), ada2.cost_, marker='o')ax[1].set_xlabel('Epochs')ax[1].set_ylabel('Sum-squared-error')ax[1].set_title('Adaline - Learning rate 0.0001')plt.show()X_std = np.copy(X)X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()def plot_decision_regions(X, y, classifier, resolution=0.01):    markers = ('s', 'x', 'o', '^', 'v')    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')    cmap = ListedColormap(colors[:len(np.unique(y))])    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),                           np.arange(x2_min, x2_max, resolution))    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)    Z = Z.reshape(xx1.shape)    plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)    plt.xlim(xx1.min(), xx1.max())    plt.ylim(xx2.min(), xx2.max())    for idx, cl in enumerate(np.unique(y)):        plt.scatter(x=X[y == cl, 0],                     y=X[y == cl, 1],                    alpha=0.8,                     c=colors[idx],                    marker=markers[idx],                     label=cl,                     edgecolor='black')ada = AdalineGD(n_iter=15, eta=0.01)ada.fit(X_std, y)plot_decision_regions(X_std, y, classifier=ada)plt.title('Adaline - Gradient Descent')plt.xlabel('sepal length [standardized]')plt.ylabel('petal length [standardized]')plt.legend(loc='upper left')plt.tight_layout()plt.show()plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')plt.xlabel('Epochs')plt.ylabel('Sum-squared-error')plt.tight_layout()plt.show()print(ada.cost_)

 与之前的图对比可以发现,收敛地更快了

 

 

 

stochastic gradient descent

import pandas as pdimport numpy as npimport matplotlib.pyplot as pltfrom matplotlib.colors import ListedColormapdf = pd.read_csv('https://www.dropbox.com/s/mqyxvm8z2v1a20g/iris.data?dl=1', header=None)y = df.iloc[0:100, 4]y = np.where(y == 'Iris-setosa', -1, 1)X = df.iloc[0:100, [0, 2]].valuesX_std = np.copy(X)X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()class AdalineSGD(object):    def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):        self.eta = eta        self.n_iter = n_iter        self.w_initialized = False        self.shuffle = shuffle        self.random_state = random_state    def fit(self, X, y):        self._initialize_weights(X.shape[1])        self.cost_ = []        for i in range(self.n_iter):            if self.shuffle:                X, y = self._shuffle(X, y)            cost = []            for xi, target in zip(X, y):                cost.append(self._update_weights(xi, target))            avg_cost = sum(cost) / len(y)            self.cost_.append(avg_cost)        return self    def partial_fit(self, X, y):        if not self.w_initialized:            self._initialize_weights(X.shape[1])        if y.ravel().shape[0] > 1:            for xi, target in zip(X, y):                self._update_weights(xi, target)        else:            self._update_weights(X, y)        return self    def _shuffle(self, X, y):        r = self.rgen.permutation(len(y))        return X[r], y[r]    def _initialize_weights(self, m):        self.rgen = np.random.RandomState(self.random_state)        self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=1 + m)        self.w_initialized = True    def _update_weights(self, xi, target):        output = self.activation(self.net_input(xi))        error = (target - output)        self.w_[1:] += self.eta * xi.dot(error)        self.w_[0] += self.eta * error        cost = 0.5 * error**2        return cost    def net_input(self, X):        return np.dot(X, self.w_[1:]) + self.w_[0]    def activation(self, X):        return X    def predict(self, X):        return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)def plot_decision_regions(X, y, classifier, resolution=0.01):    markers = ('s', 'x', 'o', '^', 'v')    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')    cmap = ListedColormap(colors[:len(np.unique(y))])    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),                           np.arange(x2_min, x2_max, resolution))    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)    Z = Z.reshape(xx1.shape)    plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)    plt.xlim(xx1.min(), xx1.max())    plt.ylim(xx2.min(), xx2.max())    for idx, cl in enumerate(np.unique(y)):        plt.scatter(x=X[y == cl, 0],                     y=X[y == cl, 1],                    alpha=0.8,                     c=colors[idx],                    marker=markers[idx],                     label=cl,                     edgecolor='black')ada = AdalineSGD(n_iter=15, eta=0.01, random_state=1)ada.fit(X_std, y)plot_decision_regions(X_std, y, classifier=ada)plt.title('Adaline - Stochastic Gradient Descent')plt.xlabel('sepal length [standardized]')plt.ylabel('petal length [standardized]')plt.legend(loc='upper left')plt.tight_layout()plt.show()plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')plt.xlabel('Epochs')plt.ylabel('Average Cost')plt.tight_layout()plt.show()

 

 

 

转载地址:http://pmygf.baihongyu.com/

你可能感兴趣的文章
【NLP学习笔记】(二)gensim使用之Topics and Transformations
查看>>
【深度学习】LSTM的架构及公式
查看>>
【python】re模块常用方法
查看>>
剑指offer 19.二叉树的镜像
查看>>
剑指offer 20.顺时针打印矩阵
查看>>
剑指offer 21.包含min函数的栈
查看>>
剑指offer 23.从上往下打印二叉树
查看>>
剑指offer 25.二叉树中和为某一值的路径
查看>>
剑指offer 35.数组中只出现一次的数字
查看>>
剑指offer 60. 不用加减乘除做加法
查看>>
Leetcode C++《热题 Hot 100-13》234.回文链表
查看>>
Leetcode C++《热题 Hot 100-14》283.移动零
查看>>
Leetcode C++《热题 Hot 100-15》437.路径总和III
查看>>
Leetcode C++《热题 Hot 100-17》461.汉明距离
查看>>
Leetcode C++《热题 Hot 100-18》538.把二叉搜索树转换为累加树
查看>>
Leetcode C++《热题 Hot 100-19》543.二叉树的直径
查看>>
Leetcode C++《热题 Hot 100-21》581.最短无序连续子数组
查看>>
Leetcode C++《热题 Hot 100-22》2.两数相加
查看>>
Leetcode C++《热题 Hot 100-23》3.无重复字符的最长子串
查看>>
Leetcode C++《热题 Hot 100-24》5.最长回文子串
查看>>