CS计算机代考程序代写 Keras In [1]:

In [1]:
import numpy as np
%matplotlib inline
In [2]:
from keras.datasets import mnist
import matplotlib.pyplot as plt
# load (downloaded if needed) the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# plot 4 images as gray scale
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap(‘gray’))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap(‘gray’))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap(‘gray’))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap(‘gray’))
# show the plot
plt.show()

Using TensorFlow backend.


In [3]:
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels)
X_test = X_test.reshape(X_test.shape[0], num_pixels)
In [4]:
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
In [5]:
from keras.utils import np_utils
# one hot encode outputs
Y_test = y_test
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
In [6]:
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
hidden_layer_nodes=50
def baseline_model():
# create model
model = Sequential()
model.add(Dense(hidden_layer_nodes, input_dim=num_pixels, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(hidden_layer_nodes, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(num_classes, kernel_initializer=’normal’, activation=’sigmoid’))
# Compile model
#sgd = optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
#model.compile(loss=’mean_squared_error’, optimizer=’adam’, metrics=[‘accuracy’])
model.compile(loss=’mean_squared_error’, optimizer=’sgd’, metrics=[‘accuracy’])

return model
In [7]:
model = baseline_model()
# Fit the model
nn_simple = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=2)
print(“Baseline Error: %.2f%%” % (100-scores[1]*100))
print(“Baseline Accuracy: %.2f%%” % (scores[1]*100))

Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] – 1s 15us/step – loss: 0.1832 – acc: 0.0986 – val_loss: 0.1461 – val_acc: 0.0958
Epoch 2/10
60000/60000 [==============================] – 1s 12us/step – loss: 0.1286 – acc: 0.0986 – val_loss: 0.1160 – val_acc: 0.0958
Epoch 3/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.1093 – acc: 0.0986 – val_loss: 0.1041 – val_acc: 0.0958
Epoch 4/10
60000/60000 [==============================] – 1s 12us/step – loss: 0.1010 – acc: 0.1059 – val_loss: 0.0985 – val_acc: 0.1135
Epoch 5/10
60000/60000 [==============================] – 1s 12us/step – loss: 0.0969 – acc: 0.1124 – val_loss: 0.0955 – val_acc: 0.1135
Epoch 6/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.0945 – acc: 0.1124 – val_loss: 0.0937 – val_acc: 0.1135
Epoch 7/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.0931 – acc: 0.1124 – val_loss: 0.0926 – val_acc: 0.1135
Epoch 8/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.0922 – acc: 0.1124 – val_loss: 0.0919 – val_acc: 0.1135
Epoch 9/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.0916 – acc: 0.1124 – val_loss: 0.0914 – val_acc: 0.1135
Epoch 10/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.0912 – acc: 0.1124 – val_loss: 0.0910 – val_acc: 0.1135
Baseline Error: 88.65%
Baseline Accuracy: 11.35%

Task 1¶
In [8]:
def task1_model(hidden_layer_nodes=65):
# create model
model = Sequential()
model.add(Dense(hidden_layer_nodes, input_dim=num_pixels, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(hidden_layer_nodes, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(num_classes, kernel_initializer=’normal’, activation=’sigmoid’))
# Compile model
#sgd = optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
#model.compile(loss=’mean_squared_error’, optimizer=’adam’, metrics=[‘accuracy’])
model.compile(loss=’mean_squared_error’, optimizer=’sgd’, metrics=[‘accuracy’])

return model

model = task1_model()
# Fit the model
nn_simple = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=2)
print(“task1 model Error: %.2f%%” % (100-scores[1]*100))
print(“task1 model Accuracy: %.2f%%” % (scores[1]*100))

Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] – 1s 16us/step – loss: 0.1874 – acc: 0.0974 – val_loss: 0.1406 – val_acc: 0.0982
Epoch 2/10
60000/60000 [==============================] – 1s 14us/step – loss: 0.1226 – acc: 0.0974 – val_loss: 0.1104 – val_acc: 0.0982
Epoch 3/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.1046 – acc: 0.0998 – val_loss: 0.1002 – val_acc: 0.1013
Epoch 4/10
60000/60000 [==============================] – 1s 12us/step – loss: 0.0978 – acc: 0.1185 – val_loss: 0.0958 – val_acc: 0.1135
Epoch 5/10
60000/60000 [==============================] – 1s 13us/step – loss: 0.0946 – acc: 0.1124 – val_loss: 0.0936 – val_acc: 0.1135
Epoch 6/10
60000/60000 [==============================] – 1s 14us/step – loss: 0.0929 – acc: 0.1124 – val_loss: 0.0924 – val_acc: 0.1135
Epoch 7/10
60000/60000 [==============================] – 1s 14us/step – loss: 0.0920 – acc: 0.1124 – val_loss: 0.0916 – val_acc: 0.1135
Epoch 8/10
60000/60000 [==============================] – 1s 14us/step – loss: 0.0913 – acc: 0.1124 – val_loss: 0.0911 – val_acc: 0.1135
Epoch 9/10
60000/60000 [==============================] – 1s 14us/step – loss: 0.0909 – acc: 0.1124 – val_loss: 0.0908 – val_acc: 0.1135
Epoch 10/10
60000/60000 [==============================] – 1s 14us/step – loss: 0.0907 – acc: 0.1124 – val_loss: 0.0906 – val_acc: 0.1135
task1 model Error: 88.65%
task1 model Accuracy: 11.35%

Comparison With Baseline¶
It has the same accuracy as the baseline model.

Task 2¶
In [9]:
def task2_model(hidden_layer_nodes=50):
# create model
model = Sequential()
model.add(Dense(hidden_layer_nodes, input_dim=num_pixels, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(hidden_layer_nodes, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(num_classes, kernel_initializer=’normal’, activation=’sigmoid’))
# Compile model
#sgd = optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
#model.compile(loss=’mean_squared_error’, optimizer=’adam’, metrics=[‘accuracy’])
model.compile(loss=’mean_squared_error’, optimizer=’sgd’, metrics=[‘accuracy’])

return model

model = task2_model()
# Fit the model
nn_simple = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=30, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=2)
print(“task1 model Error: %.2f%%” % (100-scores[1]*100))
print(“task1 model Accuracy: %.2f%%” % (scores[1]*100))

Train on 60000 samples, validate on 10000 samples
Epoch 1/30
60000/60000 [==============================] – 1s 15us/step – loss: 0.1966 – acc: 0.0747 – val_loss: 0.1539 – val_acc: 0.0847
Epoch 2/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.1337 – acc: 0.0777 – val_loss: 0.1193 – val_acc: 0.1092
Epoch 3/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.1116 – acc: 0.1123 – val_loss: 0.1057 – val_acc: 0.1135
Epoch 4/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.1022 – acc: 0.1124 – val_loss: 0.0994 – val_acc: 0.1135
Epoch 5/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0976 – acc: 0.1124 – val_loss: 0.0960 – val_acc: 0.1135
Epoch 6/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0950 – acc: 0.1124 – val_loss: 0.0941 – val_acc: 0.1135
Epoch 7/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0934 – acc: 0.1124 – val_loss: 0.0928 – val_acc: 0.1135
Epoch 8/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0924 – acc: 0.1124 – val_loss: 0.0921 – val_acc: 0.1135
Epoch 9/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0918 – acc: 0.1124 – val_loss: 0.0915 – val_acc: 0.1135
Epoch 10/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0913 – acc: 0.1124 – val_loss: 0.0911 – val_acc: 0.1135
Epoch 11/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0910 – acc: 0.1124 – val_loss: 0.0909 – val_acc: 0.1135
Epoch 12/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0907 – acc: 0.1124 – val_loss: 0.0907 – val_acc: 0.1135
Epoch 13/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0906 – acc: 0.1124 – val_loss: 0.0905 – val_acc: 0.1135
Epoch 14/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0904 – acc: 0.1124 – val_loss: 0.0904 – val_acc: 0.1135
Epoch 15/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0903 – acc: 0.1124 – val_loss: 0.0903 – val_acc: 0.1135
Epoch 16/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0903 – acc: 0.1124 – val_loss: 0.0902 – val_acc: 0.1135
Epoch 17/30
60000/60000 [==============================] – 1s 14us/step – loss: 0.0902 – acc: 0.1124 – val_loss: 0.0902 – val_acc: 0.1135
Epoch 18/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0902 – acc: 0.1124 – val_loss: 0.0901 – val_acc: 0.1135
Epoch 19/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0901 – acc: 0.1124 – val_loss: 0.0901 – val_acc: 0.1135
Epoch 20/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0901 – acc: 0.1124 – val_loss: 0.0901 – val_acc: 0.1135
Epoch 21/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0901 – acc: 0.1124 – val_loss: 0.0901 – val_acc: 0.1135
Epoch 22/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0901 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 23/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 24/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 25/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 26/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 27/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 28/30
60000/60000 [==============================] – 1s 13us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 29/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 30/30
60000/60000 [==============================] – 1s 12us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
task1 model Error: 88.65%
task1 model Accuracy: 11.35%

Comparison With Baseline¶
Task 2 model still has no effect, it has the same accuracy as the baseline model.

Task 3¶
In [13]:
def task3_model(hidden_layer_nodes=50):
# create model
model = Sequential()
model.add(Dense(hidden_layer_nodes, input_dim=num_pixels, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(hidden_layer_nodes, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(20, kernel_initializer=’normal’, activation=’relu’))
model.add(Dense(num_classes, kernel_initializer=’normal’, activation=’sigmoid’))
# Compile model
#sgd = optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
model.compile(loss=’mean_squared_error’, optimizer=’adam’, metrics=[‘accuracy’])
#model.compile(loss=’mean_squared_error’, optimizer=’sgd’, metrics=[‘accuracy’])

return model

mode3 = task3_model()
# Fit the model
nn_simple = mode3.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=200)
# Final evaluation of the model
scores = mode3.evaluate(X_test, y_test, verbose=2)
print(“task3 model Error: %.2f%%” % (100-scores[1]*100))
print(“task3 model Accuracy: %.2f%%” % (scores[1]*100))

Train on 60000 samples, validate on 10000 samples
Epoch 1/20
60000/60000 [==============================] – 1s 22us/step – loss: 0.1062 – acc: 0.1068 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 2/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0900 – acc: 0.1124 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 3/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0900 – acc: 0.1115 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 4/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0900 – acc: 0.1102 – val_loss: 0.0900 – val_acc: 0.1135
Epoch 5/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0899 – acc: 0.1126 – val_loss: 0.0899 – val_acc: 0.1135
Epoch 6/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0867 – acc: 0.1595 – val_loss: 0.0801 – val_acc: 0.2450
Epoch 7/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0767 – acc: 0.3018 – val_loss: 0.0727 – val_acc: 0.3954
Epoch 8/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0705 – acc: 0.4274 – val_loss: 0.0674 – val_acc: 0.4563
Epoch 9/20
60000/60000 [==============================] – 1s 15us/step – loss: 0.0615 – acc: 0.5197 – val_loss: 0.0537 – val_acc: 0.6122
Epoch 10/20
60000/60000 [==============================] – 1s 15us/step – loss: 0.0419 – acc: 0.7739 – val_loss: 0.0305 – val_acc: 0.8187
Epoch 11/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0250 – acc: 0.8673 – val_loss: 0.0195 – val_acc: 0.9007
Epoch 12/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0163 – acc: 0.9095 – val_loss: 0.0139 – val_acc: 0.9173
Epoch 13/20
60000/60000 [==============================] – 1s 16us/step – loss: 0.0130 – acc: 0.9217 – val_loss: 0.0123 – val_acc: 0.9231
Epoch 14/20
60000/60000 [==============================] – 1s 15us/step – loss: 0.0115 – acc: 0.9303 – val_loss: 0.0114 – val_acc: 0.9279
Epoch 15/20
60000/60000 [==============================] – 1s 15us/step – loss: 0.0105 – acc: 0.9358 – val_loss: 0.0107 – val_acc: 0.9328
Epoch 16/20
60000/60000 [==============================] – 1s 15us/step – loss: 0.0097 – acc: 0.9405 – val_loss: 0.0101 – val_acc: 0.9352
Epoch 17/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0091 – acc: 0.9434 – val_loss: 0.0096 – val_acc: 0.9387
Epoch 18/20
60000/60000 [==============================] – 1s 14us/step – loss: 0.0086 – acc: 0.9463 – val_loss: 0.0093 – val_acc: 0.9406
Epoch 19/20
60000/60000 [==============================] – 1s 13us/step – loss: 0.0081 – acc: 0.9487 – val_loss: 0.0090 – val_acc: 0.9430
Epoch 20/20
60000/60000 [==============================] – 1s 13us/step – loss: 0.0078 – acc: 0.9511 – val_loss: 0.0088 – val_acc: 0.9444
task3 model Error: 5.56%
task3 model Accuracy: 94.44%

Compare Accuracy¶
Model in task 3 has accuracy over 90% which is much higher than baseline model and models in task 1 and task 2.
The graph for task 3 model are drawn in the following cells.
In [14]:
plt.subplot(2,1,1)
plt.plot(nn_simple.history[‘acc’])
plt.plot(nn_simple.history[‘val_acc’])
plt.title(‘model accuracy’)
plt.ylabel(‘accuracy’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’lower right’)

plt.subplot(2,1,2)
plt.plot(nn_simple.history[‘loss’])
plt.plot(nn_simple.history[‘val_loss’])
plt.title(‘model loss’)
plt.ylabel(‘loss’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’upper right’)

plt.show()


In [15]:
import numpy as np
predicted_classes = mode3.predict_classes(X_test)

# see which we predicted correctly and which not
correct_indices = np.nonzero(predicted_classes == Y_test)[0]
incorrect_indices = np.nonzero(predicted_classes != Y_test)[0]
print()
print(len(correct_indices),” classified correctly”)
print(len(incorrect_indices),” classified incorrectly”)

# adapt figure size to accomodate 18 subplots
plt.rcParams[‘figure.figsize’] = (7,14)

figure_evaluation = plt.figure()

# plot 9 correct predictions
for i, correct in enumerate(correct_indices[:9]):
plt.subplot(6,3,i+1)
plt.imshow(X_test[correct].reshape(28,28), cmap=’gray’, interpolation=’none’)
plt.title(
“Predicted: {}, Truth: {}”.format(predicted_classes[correct],
Y_test[correct]))
plt.xticks([])
plt.yticks([])

# plot 9 incorrect predictions
for i, incorrect in enumerate(incorrect_indices[:9]):
plt.subplot(6,3,i+10)
plt.imshow(X_test[incorrect].reshape(28,28), cmap=’gray’, interpolation=’none’)
plt.title(
“Predicted {}, Truth: {}”.format(predicted_classes[incorrect],
Y_test[incorrect]))
plt.xticks([])
plt.yticks([])

figure_evaluation

9444 classified correctly
556 classified incorrectly
Out[15]:


In [ ]: