|
|
- from __future__ import print_function
-
- from builtins import range
- from builtins import object
- import numpy as np
- from daseCV.classifiers.linear_svm import *
- from daseCV.classifiers.softmax import *
- from past.builtins import xrange
-
-
- class LinearClassifier(object):
-
- def __init__(self):
- self.W = None
-
- def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
- batch_size=200, verbose=False):
- """
- Train this linear classifier using stochastic gradient descent.
-
- Inputs:
- - X: A numpy array of shape (N, D) containing training data; there are N
- training samples each of dimension D.
- - y: A numpy array of shape (N,) containing training labels; y[i] = c
- means that X[i] has label 0 <= c < C for C classes.
- - learning_rate: (float) learning rate for optimization.
- - reg: (float) regularization strength.
- - num_iters: (integer) number of steps to take when optimizing
- - batch_size: (integer) number of training examples to use at each step.
- - verbose: (boolean) If true, print progress during optimization.
-
- Outputs:
- A list containing the value of the loss function at each training iteration.
- """
- num_train, dim = X.shape
- num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
- if self.W is None:
- # lazily initialize W
- self.W = 0.001 * np.random.randn(dim, num_classes)
-
- # Run stochastic gradient descent to optimize W
- loss_history = []
- for it in range(num_iters):
- X_batch = None
- y_batch = None
-
- #########################################################################
- # TODO:
- # 从训练数据及其相应的标签中采样batch_size大小的样本,以用于本轮梯度下降。
- # 将数据存储在X_batch中,并将其相应的标签存储在y_batch中:
- # 采样后,X_batch的形状为(batch_size,dim),y_batch的形状(batch_size,)
- #
- # 提示:使用np.random.choice生成索引。 可重复的采样比不可重复的采样要快一点。
- #########################################################################
- # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
-
- pass
-
- # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
-
- # evaluate loss and gradient
- loss, grad = self.loss(X_batch, y_batch, reg)
- loss_history.append(loss)
-
- # perform parameter update
- #########################################################################
- # TODO:
- # 使用梯度和学习率更新权重。
- #########################################################################
- # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
-
- pass
-
- # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
-
- if verbose and it % 100 == 0:
- print('iteration %d / %d: loss %f' % (it, num_iters, loss))
-
- return loss_history
-
- def predict(self, X):
- """
- Use the trained weights of this linear classifier to predict labels for
- data points.
-
- Inputs:
- - X: A numpy array of shape (N, D) containing training data; there are N
- training samples each of dimension D.
-
- Returns:
- - y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
- array of length N, and each element is an integer giving the predicted
- class.
- """
- y_pred = np.zeros(X.shape[0])
- ###########################################################################
- # TODO:
- # 实现此方法。将预测的标签存储在y_pred中。
- ###########################################################################
- # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
-
- pass
-
- # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
- return y_pred
-
- def loss(self, X_batch, y_batch, reg):
- """
- Compute the loss function and its derivative.
- Subclasses will override this.
-
- Inputs:
- - X_batch: A numpy array of shape (N, D) containing a minibatch of N
- data points; each point has dimension D.
- - y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- - reg: (float) regularization strength.
-
- Returns: A tuple containing:
- - loss as a single float
- - gradient with respect to self.W; an array of the same shape as W
- """
- pass
-
-
- class LinearSVM(LinearClassifier):
- """ A subclass that uses the Multiclass SVM loss function """
-
- def loss(self, X_batch, y_batch, reg):
- return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
-
-
- class Softmax(LinearClassifier):
- """ A subclass that uses the Softmax + Cross-entropy loss function """
-
- def loss(self, X_batch, y_batch, reg):
- return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
|