CS计算机代考程序代写 Keras In [1]:

In [1]:
import numpy as np
In [7]:
from keras.datasets import mnist
import matplotlib.pyplot as plt
# load (downloaded if needed) the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# plot 4 images as gray scale
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap(‘gray’))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap(‘gray’))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap(‘gray’))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap(‘gray’))
# show the plot
plt.show()


In [10]:
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels)
X_test = X_test.reshape(X_test.shape[0], num_pixels)
In [11]:
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
In [12]:
from keras.utils import np_utils
# one hot encode outputs
Y_test = y_test
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
In [14]:
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
hidden_layers=16
def baseline_model():
# create model
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(hidden_layers, kernel_initializer=’normal’, activation=’sigmoid’))
model.add(Dense(num_classes, kernel_initializer=’normal’, activation=’sigmoid’))
# Compile model
#sgd = optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
#model.compile(loss=’mean_squared_error’, optimizer=’adam’, metrics=[‘accuracy’])
model.compile(loss=’mean_squared_error’, optimizer=’sgd’, metrics=[‘accuracy’])

return model
In [15]:
model = baseline_model()
# Fit the model
nn_simple = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test)
print(“Baseline Error: %.2f%%” % (100-scores[1]*100))
print(“Baseline Accuracy: %.2f%%” % (scores[1]*100))

Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] – 10s 173us/step – loss: 0.2220 – acc: 0.0904 – val_loss: 0.1947 – val_acc: 0.0892
Epoch 2/10
60000/60000 [==============================] – 13s 211us/step – loss: 0.1696 – acc: 0.0774 – val_loss: 0.1471 – val_acc: 0.0553
Epoch 3/10
60000/60000 [==============================] – 12s 195us/step – loss: 0.1317 – acc: 0.0582 – val_loss: 0.1195 – val_acc: 0.0843
Epoch 4/10
60000/60000 [==============================] – 11s 178us/step – loss: 0.1121 – acc: 0.1008 – val_loss: 0.1063 – val_acc: 0.1028
Epoch 5/10
60000/60000 [==============================] – 10s 163us/step – loss: 0.1027 – acc: 0.1044 – val_loss: 0.0998 – val_acc: 0.1028
Epoch 6/10
60000/60000 [==============================] – 9s 157us/step – loss: 0.0979 – acc: 0.1044 – val_loss: 0.0964 – val_acc: 0.1028
Epoch 7/10
60000/60000 [==============================] – 9s 150us/step – loss: 0.0953 – acc: 0.1045 – val_loss: 0.0943 – val_acc: 0.1028
Epoch 8/10
60000/60000 [==============================] – 10s 160us/step – loss: 0.0936 – acc: 0.1045 – val_loss: 0.0930 – val_acc: 0.1030
Epoch 9/10
60000/60000 [==============================] – 12s 198us/step – loss: 0.0926 – acc: 0.0922 – val_loss: 0.0922 – val_acc: 0.0634
Epoch 10/10
60000/60000 [==============================] – 11s 190us/step – loss: 0.0919 – acc: 0.0498 – val_loss: 0.0916 – val_acc: 0.1073
10000/10000 [==============================] – 2s 177us/step
Baseline Error: 89.27%
Baseline Accuracy: 10.73%
In [16]:
plt.subplot(2,1,1)
plt.plot(nn_simple.history[‘acc’])
plt.plot(nn_simple.history[‘val_acc’])
plt.title(‘model accuracy’)
plt.ylabel(‘accuracy’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’lower right’)

plt.subplot(2,1,2)
plt.plot(nn_simple.history[‘loss’])
plt.plot(nn_simple.history[‘val_loss’])
plt.title(‘model loss’)
plt.ylabel(‘loss’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’upper right’)

plt.show()


In [17]:
import numpy as np
predicted_classes = model.predict_classes(X_test)

# see which we predicted correctly and which not
correct_indices = np.nonzero(predicted_classes == Y_test)[0]
incorrect_indices = np.nonzero(predicted_classes != Y_test)[0]
print()
print(len(correct_indices),” classified correctly”)
print(len(incorrect_indices),” classified incorrectly”)

# adapt figure size to accomodate 18 subplots
plt.rcParams[‘figure.figsize’] = (7,14)

figure_evaluation = plt.figure()

# plot 9 correct predictions
for i, correct in enumerate(correct_indices[:9]):
plt.subplot(6,3,i+1)
plt.imshow(X_test[correct].reshape(28,28), cmap=’gray’, interpolation=’none’)
plt.title(
“Predicted: {}, Truth: {}”.format(predicted_classes[correct],
Y_test[correct]))
plt.xticks([])
plt.yticks([])

# plot 9 incorrect predictions
for i, incorrect in enumerate(incorrect_indices[:9]):
plt.subplot(6,3,i+10)
plt.imshow(X_test[incorrect].reshape(28,28), cmap=’gray’, interpolation=’none’)
plt.title(
“Predicted {}, Truth: {}”.format(predicted_classes[incorrect],
Y_test[incorrect]))
plt.xticks([])
plt.yticks([])

figure_evaluation

1073 classified correctly
8927 classified incorrectly
Out[17]:

In [23]:
for i, correct in enumerate(correct_indices):
print(Y_test[correct])

1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1