DaSE-Computer-Vision-2021
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

136 lines
5.3 KiB

  1. from __future__ import print_function
  2. from builtins import range
  3. from builtins import object
  4. import numpy as np
  5. from daseCV.classifiers.linear_svm import *
  6. from daseCV.classifiers.softmax import *
  7. from past.builtins import xrange
  8. class LinearClassifier(object):
  9. def __init__(self):
  10. self.W = None
  11. def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
  12. batch_size=200, verbose=False):
  13. """
  14. Train this linear classifier using stochastic gradient descent.
  15. Inputs:
  16. - X: A numpy array of shape (N, D) containing training data; there are N
  17. training samples each of dimension D.
  18. - y: A numpy array of shape (N,) containing training labels; y[i] = c
  19. means that X[i] has label 0 <= c < C for C classes.
  20. - learning_rate: (float) learning rate for optimization.
  21. - reg: (float) regularization strength.
  22. - num_iters: (integer) number of steps to take when optimizing
  23. - batch_size: (integer) number of training examples to use at each step.
  24. - verbose: (boolean) If true, print progress during optimization.
  25. Outputs:
  26. A list containing the value of the loss function at each training iteration.
  27. """
  28. num_train, dim = X.shape
  29. num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
  30. if self.W is None:
  31. # lazily initialize W
  32. self.W = 0.001 * np.random.randn(dim, num_classes)
  33. # Run stochastic gradient descent to optimize W
  34. loss_history = []
  35. for it in range(num_iters):
  36. X_batch = None
  37. y_batch = None
  38. #########################################################################
  39. # TODO:
  40. # 从训练数据及其相应的标签中采样batch_size大小的样本,以用于本轮梯度下降。
  41. # 将数据存储在X_batch中,并将其相应的标签存储在y_batch中:
  42. # 采样后,X_batch的形状为(batch_size,dim),y_batch的形状(batch_size,)
  43. #
  44. # 提示:使用np.random.choice生成索引。 可重复的采样比不可重复的采样要快一点。
  45. #########################################################################
  46. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  47. pass
  48. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  49. # evaluate loss and gradient
  50. loss, grad = self.loss(X_batch, y_batch, reg)
  51. loss_history.append(loss)
  52. # perform parameter update
  53. #########################################################################
  54. # TODO:
  55. # 使用梯度和学习率更新权重。
  56. #########################################################################
  57. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  58. pass
  59. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  60. if verbose and it % 100 == 0:
  61. print('iteration %d / %d: loss %f' % (it, num_iters, loss))
  62. return loss_history
  63. def predict(self, X):
  64. """
  65. Use the trained weights of this linear classifier to predict labels for
  66. data points.
  67. Inputs:
  68. - X: A numpy array of shape (N, D) containing training data; there are N
  69. training samples each of dimension D.
  70. Returns:
  71. - y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
  72. array of length N, and each element is an integer giving the predicted
  73. class.
  74. """
  75. y_pred = np.zeros(X.shape[0])
  76. ###########################################################################
  77. # TODO:
  78. # 实现此方法。将预测的标签存储在y_pred中。
  79. ###########################################################################
  80. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  81. pass
  82. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  83. return y_pred
  84. def loss(self, X_batch, y_batch, reg):
  85. """
  86. Compute the loss function and its derivative.
  87. Subclasses will override this.
  88. Inputs:
  89. - X_batch: A numpy array of shape (N, D) containing a minibatch of N
  90. data points; each point has dimension D.
  91. - y_batch: A numpy array of shape (N,) containing labels for the minibatch.
  92. - reg: (float) regularization strength.
  93. Returns: A tuple containing:
  94. - loss as a single float
  95. - gradient with respect to self.W; an array of the same shape as W
  96. """
  97. pass
  98. class LinearSVM(LinearClassifier):
  99. """ A subclass that uses the Multiclass SVM loss function """
  100. def loss(self, X_batch, y_batch, reg):
  101. return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
  102. class Softmax(LinearClassifier):
  103. """ A subclass that uses the Softmax + Cross-entropy loss function """
  104. def loss(self, X_batch, y_batch, reg):
  105. return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)