{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from google.colab import drive\n", "\n", "drive.mount('/content/drive', force_remount=True)\n", "\n", "# 输入daseCV所在的路径\n", "# 'daseCV' 文件夹包括 '.py', 'classifiers' 和'datasets'文件夹\n", "# 例如 'CV/assignments/assignment1/daseCV/'\n", "FOLDERNAME = None\n", "\n", "assert FOLDERNAME is not None, \"[!] Enter the foldername.\"\n", "\n", "%cd drive/My\\ Drive\n", "%cp -r $FOLDERNAME ../../\n", "%cd ../../\n", "%cd daseCV/datasets/\n", "!bash get_datasets.sh\n", "%cd ../../" ] }, { "cell_type": "markdown", "metadata": { "tags": [ "pdf-title" ] }, "source": [ "# Softmax 练习\n", "\n", "*补充并完成本练习。*\n", "\n", "本练习类似于SVM练习,你要完成的事情包括:\n", "\n", "- 为Softmax分类器实现完全矢量化的**损失函数**\n", "- 实现其**解析梯度(analytic gradient)**的完全矢量化表达式\n", "- 用数值梯度**检查你的代码**\n", "- 使用验证集**调整学习率和正则化强度**\n", "- 使用**SGD优化**损失函数\n", "- **可视化**最终学习的权重\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "tags": [ "pdf-ignore" ] }, "outputs": [], "source": [ "import random\n", "import numpy as np\n", "from daseCV.data_utils import load_CIFAR10\n", "import matplotlib.pyplot as plt\n", "\n", "%matplotlib inline\n", "plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\n", "plt.rcParams['image.interpolation'] = 'nearest'\n", "plt.rcParams['image.cmap'] = 'gray'\n", "\n", "# for auto-reloading extenrnal modules\n", "# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n", "%load_ext autoreload\n", "%autoreload 2" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "tags": [ "pdf-ignore" ] }, "outputs": [], "source": [ "def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):\n", " \"\"\"\n", " Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n", " it for the linear classifier. These are the same steps as we used for the\n", " SVM, but condensed to a single function. \n", " \"\"\"\n", " # Load the raw CIFAR-10 data\n", " cifar10_dir = 'daseCV/datasets/cifar-10-batches-py'\n", " \n", " # Cleaning up variables to prevent loading data multiple times (which may cause memory issue)\n", " try:\n", " del X_train, y_train\n", " del X_test, y_test\n", " print('Clear previously loaded data.')\n", " except:\n", " pass\n", "\n", " X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n", " \n", " # subsample the data\n", " mask = list(range(num_training, num_training + num_validation))\n", " X_val = X_train[mask]\n", " y_val = y_train[mask]\n", " mask = list(range(num_training))\n", " X_train = X_train[mask]\n", " y_train = y_train[mask]\n", " mask = list(range(num_test))\n", " X_test = X_test[mask]\n", " y_test = y_test[mask]\n", " mask = np.random.choice(num_training, num_dev, replace=False)\n", " X_dev = X_train[mask]\n", " y_dev = y_train[mask]\n", " \n", " # Preprocessing: reshape the image data into rows\n", " X_train = np.reshape(X_train, (X_train.shape[0], -1))\n", " X_val = np.reshape(X_val, (X_val.shape[0], -1))\n", " X_test = np.reshape(X_test, (X_test.shape[0], -1))\n", " X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))\n", " \n", " # Normalize the data: subtract the mean image\n", " mean_image = np.mean(X_train, axis = 0)\n", " X_train -= mean_image\n", " X_val -= mean_image\n", " X_test -= mean_image\n", " X_dev -= mean_image\n", " \n", " # add bias dimension and transform into columns\n", " X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])\n", " X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])\n", " X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])\n", " X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])\n", " \n", " return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev\n", "\n", "\n", "# Invoke the above function to get our data.\n", "X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data()\n", "print('Train data shape: ', X_train.shape)\n", "print('Train labels shape: ', y_train.shape)\n", "print('Validation data shape: ', X_val.shape)\n", "print('Validation labels shape: ', y_val.shape)\n", "print('Test data shape: ', X_test.shape)\n", "print('Test labels shape: ', y_test.shape)\n", "print('dev data shape: ', X_dev.shape)\n", "print('dev labels shape: ', y_dev.shape)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Softmax 分类器\n", "\n", "请在**daseCV/classifiers/softmax.py**中完成本节的代码。" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 首先使用嵌套循环实现简单的softmax损失函数。\n", "# 打开文件 daseCV/classifiers/softmax.py 并补充完成\n", "# softmax_loss_naive 函数.\n", "\n", "from daseCV.classifiers.softmax import softmax_loss_naive\n", "import time\n", "\n", "# 生成一个随机的softmax权重矩阵,并使用它来计算损失。\n", "W = np.random.randn(3073, 10) * 0.0001\n", "loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)\n", "\n", "# As a rough sanity check, our loss should be something close to -log(0.1).\n", "print('loss: %f' % loss)\n", "print('sanity check: %f' % (-np.log(0.1)))" ] }, { "cell_type": "markdown", "metadata": { "tags": [ "pdf-inline" ] }, "source": [ "**问题 1**\n", "\n", "\n", "为什么我们期望损失接近-log(0.1)?简要说明。\n", "\n", "$\\color{blue}{\\textit 答:}$ *在这里写上你的答案* \n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 完成softmax_loss_naive,并实现使用嵌套循环的梯度的版本(naive)。\n", "loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)\n", "\n", "# 就像SVM那样,请使用数值梯度检查作为调试工具。\n", "# 数值梯度应接近分析梯度。\n", "from daseCV.gradient_check import grad_check_sparse\n", "f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0]\n", "grad_numerical = grad_check_sparse(f, W, grad, 10)\n", "\n", "# 与SVM情况类似,使用正则化进行另一个梯度检查\n", "loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1)\n", "f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0]\n", "grad_numerical = grad_check_sparse(f, W, grad, 10)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 现在,我们有了softmax损失函数及其梯度的简单实现,\n", "# 接下来要在 softmax_loss_vectorized 中完成一个向量化版本.\n", "# 这两个版本应计算出相同的结果,但矢量化版本应更快。\n", "tic = time.time()\n", "loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005)\n", "toc = time.time()\n", "print('naive loss: %e computed in %fs' % (loss_naive, toc - tic))\n", "\n", "from daseCV.classifiers.softmax import softmax_loss_vectorized\n", "tic = time.time()\n", "loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005)\n", "toc = time.time()\n", "print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))\n", "\n", "# 正如前面在SVM练习中所做的一样,我们使用Frobenius范数比较两个版本梯度。\n", "grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')\n", "print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))\n", "print('Gradient difference: %f' % grad_difference)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "tags": [ "code" ] }, "outputs": [], "source": [ "# 使用验证集调整超参数(正则化强度和学习率)。您应该尝试不同的学习率和正则化强度范围; \n", "# 如果您小心的话,您应该能够在验证集上获得超过0.35的精度。\n", "from daseCV.classifiers import Softmax\n", "results = {}\n", "best_val = -1\n", "best_softmax = None\n", "learning_rates = [1e-7, 5e-7]\n", "regularization_strengths = [2.5e4, 5e4]\n", "\n", "################################################################################\n", "# 需要完成的事: \n", "# 对验证集设置学习率和正则化强度。\n", "# 这与之前SVM中做的类似;\n", "# 保存训练效果最好的softmax分类器到best_softmax中。\n", "################################################################################\n", "# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n", "\n", "pass\n", "\n", "# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n", " \n", "# Print out results.\n", "for lr, reg in sorted(results):\n", " train_accuracy, val_accuracy = results[(lr, reg)]\n", " print('lr %e reg %e train accuracy: %f val accuracy: %f' % (\n", " lr, reg, train_accuracy, val_accuracy))\n", " \n", "print('best validation accuracy achieved during cross-validation: %f' % best_val)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 在测试集上评估\n", "# 在测试集上评估最好的softmax\n", "y_test_pred = best_softmax.predict(X_test)\n", "test_accuracy = np.mean(y_test == y_test_pred)\n", "print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, ))" ] }, { "cell_type": "markdown", "metadata": { "tags": [ "pdf-inline" ] }, "source": [ "**问题 2** - *对或错*\n", "\n", "假设总训练损失定义为所有训练样本中每个数据点损失的总和。可能会有新的数据点添加到训练集中,同时SVM损失保持不变,但是对于Softmax分类器的损失而言,情况并非如此。\n", "\n", "$\\color{blue}{\\textit 你的回答:}$\n", "\n", "\n", "$\\color{blue}{\\textit 你的解释:}$\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 可视化每个类别的学习到的权重\n", "w = best_softmax.W[:-1,:] # strip out the bias\n", "w = w.reshape(32, 32, 3, 10)\n", "\n", "w_min, w_max = np.min(w), np.max(w)\n", "\n", "classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n", "for i in range(10):\n", " plt.subplot(2, 5, i + 1)\n", " \n", " # Rescale the weights to be between 0 and 255\n", " wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)\n", " plt.imshow(wimg.astype('uint8'))\n", " plt.axis('off')\n", " plt.title(classes[i])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "# 重要\n", "\n", "这里是作业的结尾处,请执行以下步骤:\n", "\n", "1. 点击`File -> Save`或者用`control+s`组合键,确保你最新的的notebook的作业已经保存到谷歌云。\n", "2. 执行以下代码确保 `.py` 文件保存回你的谷歌云。" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "FOLDER_TO_SAVE = os.path.join('drive/My Drive/', FOLDERNAME)\n", "FILES_TO_SAVE = ['daseCV/classifiers/softmax.py']\n", "\n", "for files in FILES_TO_SAVE:\n", " with open(os.path.join(FOLDER_TO_SAVE, '/'.join(files.split('/')[1:])), 'w') as f:\n", " f.write(''.join(open(files).readlines()))" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.0" } }, "nbformat": 4, "nbformat_minor": 1 }