云计算课程实验
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

56 lines
1.9 KiB

  1. import os
  2. import tensorflow as tf
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. mnist = tf.keras.datasets.mnist
  6. (x_train, y_train),(x_test, y_test) = mnist.load_data(path="/mnist.npz") #加载mnist数据集
  7. #验证mnist数据集大小。x为数据,y为标签。mnist每张图的像素为28*28
  8. print(x_train.shape)
  9. print(y_train.shape)
  10. print(x_test.shape)
  11. print(y_test.shape)
  12. #打印训练集中前9张,看看是什么数字
  13. for i in range(9):
  14. plt.subplot(3,3,1+i)
  15. plt.imshow(x_train[i], cmap='gray')
  16. plt.show()
  17. plt.savefig('./mnist/output/1.jpg')
  18. #打印相应的标签
  19. print(y_train[:9])
  20. #基操:将像素标准化一下
  21. x_train, x_test = x_train / 255.0, x_test / 255.0
  22. #搭建一个两层神经网络
  23. model = tf.keras.models.Sequential([
  24. tf.keras.layers.Flatten(input_shape=(28, 28)), #拉伸图像成一维向量
  25. tf.keras.layers.Dense(128, activation='relu'), #第一层全连接+ReLU激活
  26. tf.keras.layers.Dropout(0.2), #dropout层
  27. tf.keras.layers.Dense(10, activation='softmax') #第二层全连接+softmax激活,输出预测标签
  28. ])
  29. #设置训练超参,优化器为sgd,损失函数为交叉熵,训练衡量指标为accuracy
  30. model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
  31. #开始训练,训练5个epoch,一个epoch代表所有图像计算一遍。每一个epoch能观察到训练精度的提升
  32. model.fit(x_train, y_train, epochs=5)
  33. #计算训练了5个epoch的模型在测试集上的表现
  34. model.evaluate(x_test, y_test)
  35. #直观看一下模型预测结果,打印测试集中的前9张图像
  36. for i in range(9):
  37. plt.subplot(3,3,1+i)
  38. plt.imshow(x_test[i], cmap='gray')
  39. plt.show()
  40. plt.savefig('./mnist/output/2.jpg')
  41. #打印模型识别的数字,是否正确?
  42. # np.argmax(model(x_test[:9]).numpy(), axis=1)
  43. #保存训练好的模型
  44. model.save("./mnist/output/model_epoch_5")