CS计算机代考程序代写 Keras In [1]:

In [1]:
# read the MNIST dataset
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import matplotlib.pyplot as plt
from keras.utils import np_utils
from keras.layers import Dense
from keras import optimizers
from keras import backend as K

# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()

C:\Users\rsadeghian\AppData\Local\Continuum\anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
In [6]:
# plot 4 images as gray scale
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap(‘gray’))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap(‘gray’))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap(‘gray’))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap(‘gray’))
# show the plot
plt.show()


In [34]:
#preprocessing
# input image dimensions
img_rows, img_cols = 28, 28

if K.image_data_format() == ‘channels_first’:
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = x_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
In [37]:
# preprocessing
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]

# normalize inputs from 0-255 to 0-1
X_train = X_train/255
X_test = X_test/ 255

# one hot encode outputs
Y_test = y_test
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
In [22]:
model = Sequential()

model.add(Conv2D(32, (3, 3), input_shape=(28,28,1)))
model.add(Activation(‘relu’))
BatchNormalization(axis=-1)
model.add(Conv2D(32, (3, 3)))
model.add(Activation(‘relu’))
model.add(MaxPooling2D(pool_size=(2,2)))

BatchNormalization(axis=-1)
model.add(Conv2D(64,(3, 3)))
model.add(Activation(‘relu’))
BatchNormalization(axis=-1)
model.add(Conv2D(64, (3, 3)))
model.add(Activation(‘relu’))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Flatten())
# Fully connected layer

BatchNormalization()
model.add(Dense(512))
model.add(Activation(‘relu’))
BatchNormalization()
model.add(Dropout(0.2))
model.add(Dense(10))

model.add(Activation(‘softmax’))
In [ ]:
model.compile(loss=’categorical_crossentropy’, optimizer=’adam’, metrics=[‘accuracy’])
model.fit(X_train, y_train, batch_size=128, epochs=10, validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test)

print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
In [2]:
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)

training_set = train_datagen.flow_from_directory(‘C:/My Courses/Latefall2018/Lecture09/data/train’, target_size = (50, 50),
batch_size = 32, class_mode = ‘binary’)
test_set = test_datagen.flow_from_directory(‘C:/My Courses/Latefall2018/Lecture09/data/test’, target_size = (50, 50),
batch_size = 32, class_mode = ‘binary’)

Found 2000 images belonging to 2 classes.
Found 0 images belonging to 0 classes.
In [3]:
# Model design
# Initialising the CNN
model = Sequential()

# Convolution
model.add(Conv2D(32, (3, 3), input_shape = (50, 50, 3), activation = ‘relu’))

# Pooling
model.add(MaxPooling2D(pool_size = (2, 2)))

# Second convolutional layer
model.add(Conv2D(32, (3, 3), activation = ‘relu’))
model.add(MaxPooling2D(pool_size = (2, 2)))

# Flattening
model.add(Flatten())

# Full connection
model.add(Dense(units = 128, activation = ‘relu’))
model.add(Dense(units = 1, activation = ‘sigmoid’))

# Compiling the CNN
model.compile(optimizer = ‘adam’, loss = ‘binary_crossentropy’, metrics = [‘accuracy’])
In [7]:
# Train
model.fit_generator(training_set, steps_per_epoch = 8000, epochs = 25, validation_steps = 2000)

Epoch 1/25
8000/8000 [==============================] – 1692s 212ms/step – loss: 0.0076 – acc: 0.9977
Epoch 2/25
8000/8000 [==============================] – 1748s 219ms/step – loss: 0.0073 – acc: 0.9979
Epoch 3/25
8000/8000 [==============================] – 1889s 236ms/step – loss: 0.0057 – acc: 0.9984
Epoch 4/25
8000/8000 [==============================] – 1989s 249ms/step – loss: 0.0059 – acc: 0.9984
Epoch 5/25
8000/8000 [==============================] – 12367s 2s/step – loss: 0.0058 – acc: 0.9985
Epoch 6/25
8000/8000 [==============================] – 1596s 199ms/step – loss: 0.0047 – acc: 0.9988
Epoch 7/25
8000/8000 [==============================] – 1535s 192ms/step – loss: 0.0049 – acc: 0.9988
Epoch 8/25
8000/8000 [==============================] – 1595s 199ms/step – loss: 0.0047 – acc: 0.9989
Epoch 9/25
8000/8000 [==============================] – 1753s 219ms/step – loss: 0.0045 – acc: 0.9990
Epoch 10/25
8000/8000 [==============================] – 1812s 226ms/step – loss: 0.0040 – acc: 0.9990
Epoch 11/25
8000/8000 [==============================] – 1806s 226ms/step – loss: 0.0042 – acc: 0.9990
Epoch 12/25
8000/8000 [==============================] – 1730s 216ms/step – loss: 0.0045 – acc: 0.9990
Epoch 13/25
8000/8000 [==============================] – 1944s 243ms/step – loss: 0.0043 – acc: 0.9990
Epoch 14/25
8000/8000 [==============================] – 2034s 254ms/step – loss: 0.0039 – acc: 0.9991
Epoch 15/25
8000/8000 [==============================] – 2002s 250ms/step – loss: 0.0042 – acc: 0.9991
Epoch 16/25
8000/8000 [==============================] – 32549s 4s/step – loss: 0.0039 – acc: 0.9992
Epoch 17/25
8000/8000 [==============================] – 1629s 204ms/step – loss: 0.0036 – acc: 0.9992
Epoch 18/25
8000/8000 [==============================] – 2017s 252ms/step – loss: 0.0033 – acc: 0.9993
Epoch 19/25
8000/8000 [==============================] – 1675s 209ms/step – loss: 0.0042 – acc: 0.9992
Epoch 20/25
8000/8000 [==============================] – 1791s 224ms/step – loss: 0.0031 – acc: 0.9994
Epoch 21/25
8000/8000 [==============================] – 1559s 195ms/step – loss: 0.0038 – acc: 0.9993
Epoch 22/25
8000/8000 [==============================] – 1876s 234ms/step – loss: 0.0041 – acc: 0.9993
Epoch 23/25
8000/8000 [==============================] – 1939s 242ms/step – loss: 0.0040 – acc: 0.9993
Epoch 24/25
8000/8000 [==============================] – 1976s 247ms/step – loss: 0.0030 – acc: 0.9994
Epoch 25/25
8000/8000 [==============================] – 2198s 275ms/step – loss: 0.0034 – acc: 0.9994
Out[7]:

In [11]:
import cv2
# 1 is dog and 0 is cat
img = cv2.imread(“C:/My Courses/Latefall2018/Lecture09/data/test/190.jpg”)
plt.imshow(img)
plt.show() # display it
img = cv2.resize(img,(50,50))
img = img.reshape(1,50,50,3)
if (model.predict(img).item()):
print (“This is a dog”)
else:
print (“This is a cat”)

This is a dog
In [42]:
fig=plt.figure(figsize=(16, 12))
num=0

for i in range(201,209):
y = fig.add_subplot(4, 4, num+1)
img = cv2.imread(“C:/My Courses/Latefall2018/Lecture09/data/test/”+str(i)+”.jpg”)
y.imshow(img)
#plt.show() # display it
img = cv2.resize(img,(50,50))
img = img.reshape(1,50,50,3)
if (model.predict(img).item()):
pred = “Dog”
else:
pred = “Cat”

plt.title(pred)
y.axes.get_xaxis().set_visible(False)
y.axes.get_yaxis().set_visible(False)
num = num+1

plt.show()