DaSE-Computer-Vision-2021
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

97 lines
3.5 KiB

  1. from builtins import range
  2. import numpy as np
  3. from random import shuffle
  4. from past.builtins import xrange
  5. def svm_loss_naive(W, X, y, reg):
  6. """
  7. Structured SVM loss function, naive implementation (with loops).
  8. Inputs have dimension D, there are C classes, and we operate on minibatches
  9. of N examples.
  10. Inputs:
  11. - W: A numpy array of shape (D, C) containing weights.
  12. - X: A numpy array of shape (N, D) containing a minibatch of data.
  13. - y: A numpy array of shape (N,) containing training labels; y[i] = c means
  14. that X[i] has label c, where 0 <= c < C.
  15. - reg: (float) regularization strength
  16. Returns a tuple of:
  17. - loss as single float
  18. - gradient with respect to weights W; an array of same shape as W
  19. """
  20. dW = np.zeros(W.shape) # initialize the gradient as zero
  21. # compute the loss and the gradient
  22. num_classes = W.shape[1]
  23. num_train = X.shape[0]
  24. loss = 0.0
  25. for i in range(num_train):
  26. scores = X[i].dot(W)
  27. correct_class_score = scores[y[i]]
  28. for j in range(num_classes):
  29. if j == y[i]:
  30. continue
  31. margin = scores[j] - correct_class_score + 1 # note delta = 1
  32. if margin > 0:
  33. loss += margin
  34. dW[:,j] += X[i] # dW计算
  35. dW[:,y[i]] += -X[i] # dW计算
  36. # Right now the loss is a sum over all training examples, but we want it
  37. # to be an average instead so we divide by num_train.
  38. loss /= num_train
  39. # Add regularization to the loss.
  40. loss += reg * np.sum(W * W)
  41. #############################################################################
  42. # TODO:
  43. # 计算损失函数的梯度并将其存储为dW。
  44. # 与其先计算损失再计算梯度,还不如在计算损失的同时计算梯度更简单。
  45. # 因此,您可能需要修改上面的一些代码来计算梯度。
  46. #############################################################################
  47. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  48. pass
  49. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  50. return loss, dW
  51. def svm_loss_vectorized(W, X, y, reg):
  52. """
  53. Structured SVM loss function, vectorized implementation.
  54. Inputs and outputs are the same as svm_loss_naive.
  55. """
  56. loss = 0.0
  57. dW = np.zeros(W.shape) # initialize the gradient as zero
  58. #############################################################################
  59. # TODO:
  60. # 实现一个向量化SVM损失计算方法,并将结果存储到loss中
  61. #############################################################################
  62. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  63. pass
  64. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  65. #############################################################################
  66. # TODO:
  67. # 实现一个向量化的梯度计算方法,并将结果存储到dW中
  68. #
  69. # 提示:与其从头计算梯度,不如利用一些计算loss时的中间变量
  70. #############################################################################
  71. # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  72. pass
  73. # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
  74. return loss, dW