DaSE-Computer-Vision-2021
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

220 lines
9.1 KiB

  1. from __future__ import print_function
  2. from builtins import range
  3. from builtins import object
  4. import numpy as np
  5. import matplotlib.pyplot as plt
  6. from past.builtins import xrange
  7. class TwoLayerNet(object):
  8. """
  9. A two-layer fully-connected neural network. The net has an input dimension of
  10. N, a hidden layer dimension of H, and performs classification over C classes.
  11. We train the network with a softmax loss function and L2 regularization on the
  12. weight matrices. The network uses a ReLU nonlinearity after the first fully
  13. connected layer.
  14. In other words, the network has the following architecture:
  15. input - fully connected layer - ReLU - fully connected layer - softmax
  16. The outputs of the second fully-connected layer are the scores for each class.
  17. """
  18. def __init__(self, input_size, hidden_size, output_size, std=1e-4):
  19. """
  20. Initialize the model. Weights are initialized to small random values and
  21. biases are initialized to zero. Weights and biases are stored in the
  22. variable self.params, which is a dictionary with the following keys:
  23. W1: First layer weights; has shape (D, H)
  24. b1: First layer biases; has shape (H,)
  25. W2: Second layer weights; has shape (H, C)
  26. b2: Second layer biases; has shape (C,)
  27. Inputs:
  28. - input_size: The dimension D of the input data.
  29. - hidden_size: The number of neurons H in the hidden layer.
  30. - output_size: The number of classes C.
  31. """
  32. self.params = {}
  33. self.params['W1'] = std * np.random.randn(input_size, hidden_size)
  34. self.params['b1'] = np.zeros(hidden_size)
  35. self.params['W2'] = std * np.random.randn(hidden_size, output_size)
  36. self.params['b2'] = np.zeros(output_size)
  37. def loss(self, X, y=None, reg=0.0):
  38. """
  39. Compute the loss and gradients for a two layer fully connected neural
  40. network.
  41. Inputs:
  42. - X: Input data of shape (N, D). Each X[i] is a training sample.
  43. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
  44. an integer in the range 0 <= y[i] < C. This parameter is optional; if it
  45. is not passed then we only return scores, and if it is passed then we
  46. instead return the loss and gradients.
  47. - reg: Regularization strength.
  48. Returns:
  49. If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
  50. the score for class c on input X[i].
  51. If y is not None, instead return a tuple of:
  52. - loss: Loss (data loss and regularization loss) for this batch of training
  53. samples.
  54. - grads: Dictionary mapping parameter names to gradients of those parameters
  55. with respect to the loss function; has the same keys as self.params.
  56. """
  57. # Unpack variables from the params dictionary
  58. W1, b1 = self.params['W1'], self.params['b1']
  59. W2, b2 = self.params['W2'], self.params['b2']
  60. N, D = X.shape
  61. # Compute the forward pass
  62. scores = None
  63. #############################################################################
  64. # TODO: 执行向前传播,计算输入数据的每个类的score。
  65. # 将结果存储在scores变量中,该变量应该是一个(N, C)维的数组。
  66. #############################################################################
  67. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  68. pass
  69. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  70. # If the targets are not given then jump out, we're done
  71. if y is None:
  72. return scores
  73. # Compute the loss
  74. loss = None
  75. #############################################################################
  76. # TODO: 完成向前传播,计算损失。
  77. # 这应该包括数据损失和W1和W2的L2正则化项。
  78. # 将结果存储在变量loss中,它应该是一个标量。
  79. # 使用Softmax损失函数。
  80. #############################################################################
  81. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  82. pass
  83. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  84. # Backward pass: compute gradients
  85. grads = {}
  86. #############################################################################
  87. # TODO: 计算反向传播,计算权重和偏置值的梯度, 将结果存储在grads字典中。
  88. # 例如,grads['W1']存储W1的梯度,并且和W1是相同大小的矩阵。
  89. #############################################################################
  90. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  91. pass
  92. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  93. return loss, grads
  94. def train(self, X, y, X_val, y_val,
  95. learning_rate=1e-3, learning_rate_decay=0.95,
  96. reg=5e-6, num_iters=100,
  97. batch_size=200, verbose=False):
  98. """
  99. Train this neural network using stochastic gradient descent.
  100. Inputs:
  101. - X: A numpy array of shape (N, D) giving training data.
  102. - y: A numpy array f shape (N,) giving training labels; y[i] = c means that
  103. X[i] has label c, where 0 <= c < C.
  104. - X_val: A numpy array of shape (N_val, D) giving validation data.
  105. - y_val: A numpy array of shape (N_val,) giving validation labels.
  106. - learning_rate: Scalar giving learning rate for optimization.
  107. - learning_rate_decay: Scalar giving factor used to decay the learning rate
  108. after each epoch.
  109. - reg: Scalar giving regularization strength.
  110. - num_iters: Number of steps to take when optimizing.
  111. - batch_size: Number of training examples to use per step.
  112. - verbose: boolean; if true print progress during optimization.
  113. """
  114. num_train = X.shape[0]
  115. iterations_per_epoch = max(num_train / batch_size, 1)
  116. # Use SGD to optimize the parameters in self.model
  117. loss_history = []
  118. train_acc_history = []
  119. val_acc_history = []
  120. for it in range(num_iters):
  121. X_batch = None
  122. y_batch = None
  123. #########################################################################
  124. # TODO: 创建一个随机的数据和标签的mini-batch,存储在X_batch和y_batch中。
  125. #########################################################################
  126. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  127. pass
  128. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  129. # Compute loss and gradients using the current minibatch
  130. loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
  131. loss_history.append(loss)
  132. #########################################################################
  133. # TODO: 使用grads字典中的梯度来更新网络参数(参数存储在字典self.params中)
  134. # 使用随机梯度下降法。
  135. #########################################################################
  136. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  137. pass
  138. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  139. if verbose and it % 100 == 0:
  140. print('iteration %d / %d: loss %f' % (it, num_iters, loss))
  141. # Every epoch, check train and val accuracy and decay learning rate.
  142. if it % iterations_per_epoch == 0:
  143. # Check accuracy
  144. train_acc = (self.predict(X_batch) == y_batch).mean()
  145. val_acc = (self.predict(X_val) == y_val).mean()
  146. train_acc_history.append(train_acc)
  147. val_acc_history.append(val_acc)
  148. # Decay learning rate
  149. learning_rate *= learning_rate_decay
  150. return {
  151. 'loss_history': loss_history,
  152. 'train_acc_history': train_acc_history,
  153. 'val_acc_history': val_acc_history,
  154. }
  155. def predict(self, X):
  156. """
  157. Use the trained weights of this two-layer network to predict labels for
  158. data points. For each data point we predict scores for each of the C
  159. classes, and assign each data point to the class with the highest score.
  160. Inputs:
  161. - X: A numpy array of shape (N, D) giving N D-dimensional data points to
  162. classify.
  163. Returns:
  164. - y_pred: A numpy array of shape (N,) giving predicted labels for each of
  165. the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
  166. to have class c, where 0 <= c < C.
  167. """
  168. y_pred = None
  169. ###########################################################################
  170. # TODO: Implement this function; it should be VERY simple! #
  171. ###########################################################################
  172. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  173. pass
  174. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  175. return y_pred