In [0]:
from tensorflow.python.client import device_lib
print(“Show System RAM Memory:\n\n”)
!cat /proc/meminfo | egrep “MemTotal*”
print(“\n\nShow Devices:\n\n”+str(device_lib.list_local_devices()))
Show System RAM Memory:
MemTotal: 13335188 kB
Show Devices:
[name: “/device:CPU:0”
device_type: “CPU”
memory_limit: 268435456
locality {
}
incarnation: 8700158686858789265
, name: “/device:XLA_CPU:0”
device_type: “XLA_CPU”
memory_limit: 17179869184
locality {
}
incarnation: 8342104309289264332
physical_device_desc: “device: XLA_CPU device”
, name: “/device:XLA_GPU:0”
device_type: “XLA_GPU”
memory_limit: 17179869184
locality {
}
incarnation: 16107158158178871312
physical_device_desc: “device: XLA_GPU device”
, name: “/device:GPU:0”
device_type: “GPU”
memory_limit: 11330115994
locality {
bus_id: 1
links {
}
}
incarnation: 5997946455289902644
physical_device_desc: “device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7”
]
In [2]:
!pip install q tensorflow-gpu==2.0.0
Collecting q
Downloading https://files.pythonhosted.org/packages/53/bc/51619d89e0bd855567e7652fa16d06f1ed36a85f108a7fe71f6629bf719d/q-2.6-py2.py3-none-any.whl
Collecting tensorflow-gpu==2.0.0
Downloading https://files.pythonhosted.org/packages/25/44/47f0722aea081697143fbcf5d2aa60d1aee4aaacb5869aee2b568974777b/tensorflow_gpu-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl (380.8MB)
|████████████████████████████████| 380.8MB 45kB/s
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.1.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.1.0)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.11.2)
Requirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.0.8)
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.12.0)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.27.1)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (3.1.0)
Requirement already satisfied: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (1.17.5)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (0.9.0)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (0.8.1)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (0.34.2)
Collecting tensorboard<2.1.0,>=2.0.0
Downloading https://files.pythonhosted.org/packages/76/54/99b9d5d52d5cb732f099baaaf7740403e83fe6b0cedde940fabd2b13d75a/tensorboard-2.0.2-py3-none-any.whl (3.8MB)
|████████████████████████████████| 3.8MB 44.0MB/s
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (3.10.0)
Collecting tensorflow-estimator<2.1.0,>=2.0.0
Downloading https://files.pythonhosted.org/packages/fc/08/8b927337b7019c374719145d1dceba21a8bb909b93b1ad6f8fb7d22c1ca1/tensorflow_estimator-2.0.1-py2.py3-none-any.whl (449kB)
|████████████████████████████████| 450kB 54.9MB/s
Requirement already satisfied: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (0.2.2)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0) (0.1.8)
Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow-gpu==2.0.0) (2.8.0)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (2.21.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (0.4.1)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (45.2.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (3.2.1)
Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (1.7.2)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (1.0.0)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (2019.11.28)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (3.0.4)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (1.24.3)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (1.3.0)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (0.2.8)
Requirement already satisfied: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (4.0)
Requirement already satisfied: cachetools<3.2,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (3.1.1)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (3.1.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow-gpu==2.0.0) (0.4.8)
ERROR: tensorflow 1.15.0 has requirement tensorboard<1.16.0,>=1.15.0, but you’ll have tensorboard 2.0.2 which is incompatible.
ERROR: tensorflow 1.15.0 has requirement tensorflow-estimator==1.15.1, but you’ll have tensorflow-estimator 2.0.1 which is incompatible.
Installing collected packages: q, tensorboard, tensorflow-estimator, tensorflow-gpu
Found existing installation: tensorboard 1.15.0
Uninstalling tensorboard-1.15.0:
Successfully uninstalled tensorboard-1.15.0
Found existing installation: tensorflow-estimator 1.15.1
Uninstalling tensorflow-estimator-1.15.1:
Successfully uninstalled tensorflow-estimator-1.15.1
Successfully installed q-2.6 tensorboard-2.0.2 tensorflow-estimator-2.0.1 tensorflow-gpu-2.0.0
In [1]:
import tensorflow as tf
tf.__version__
Out[1]:
‘2.0.0’
In [4]:
from google.colab import drive
drive.mount(‘/content/gdrive’)
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/gdrive
In [5]:
import time
start = time.time()
!python3 “/content/gdrive/My Drive/mnist_cnn.py”
end = time.time()
print(end – start)
Using TensorFlow backend.
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] – 0s 0us/step
x_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
2020-03-15 04:07:23.403698: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1
2020-03-15 04:07:23.460364: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:23.460956: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1618] Found device 0 with properties:
name: Tesla P4 major: 6 minor: 1 memoryClockRate(GHz): 1.1135
pciBusID: 0000:00:04.0
2020-03-15 04:07:23.473097: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.0
2020-03-15 04:07:23.677780: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10.0
2020-03-15 04:07:23.782451: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10.0
2020-03-15 04:07:23.806920: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10.0
2020-03-15 04:07:24.041933: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10.0
2020-03-15 04:07:24.155093: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10.0
2020-03-15 04:07:24.659566: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2020-03-15 04:07:24.659779: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.660571: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.661155: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1746] Adding visible gpu devices: 0
2020-03-15 04:07:24.661527: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2020-03-15 04:07:24.665759: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2200000000 Hz
2020-03-15 04:07:24.666182: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x1d94bc0 executing computations on platform Host. Devices:
2020-03-15 04:07:24.666215: I tensorflow/compiler/xla/service/service.cc:175] StreamExecutor device (0): Host, Default Version
2020-03-15 04:07:24.789097: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.789676: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x1d94d80 executing computations on platform CUDA. Devices:
2020-03-15 04:07:24.789706: I tensorflow/compiler/xla/service/service.cc:175] StreamExecutor device (0): Tesla P4, Compute Capability 6.1
2020-03-15 04:07:24.790963: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.791332: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1618] Found device 0 with properties:
name: Tesla P4 major: 6 minor: 1 memoryClockRate(GHz): 1.1135
pciBusID: 0000:00:04.0
2020-03-15 04:07:24.791393: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.0
2020-03-15 04:07:24.791414: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10.0
2020-03-15 04:07:24.791434: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10.0
2020-03-15 04:07:24.791453: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10.0
2020-03-15 04:07:24.791473: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10.0
2020-03-15 04:07:24.791490: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10.0
2020-03-15 04:07:24.791509: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2020-03-15 04:07:24.791567: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.791954: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.792262: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1746] Adding visible gpu devices: 0
2020-03-15 04:07:24.792393: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.0
2020-03-15 04:07:24.793463: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1159] Device interconnect StreamExecutor with strength 1 edge matrix:
2020-03-15 04:07:24.793493: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1165] 0
2020-03-15 04:07:24.793504: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1178] 0: N
2020-03-15 04:07:24.793602: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.793970: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1006] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-03-15 04:07:24.794312: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
2020-03-15 04:07:24.794354: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1304] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 7123 MB memory) -> physical GPU (device: 0, name: Tesla P4, pci bus id: 0000:00:04.0, compute capability: 6.1)
Train on 48000 samples, validate on 12000 samples
Epoch 1/12
2020-03-15 04:07:26.224788: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10.0
2020-03-15 04:07:27.472301: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
48000/48000 [==============================] – 10s 215us/sample – loss: 0.2758 – accuracy: 0.9161 – val_loss: 0.0644 – val_accuracy: 0.9807
Epoch 2/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0907 – accuracy: 0.9735 – val_loss: 0.0487 – val_accuracy: 0.9857
Epoch 3/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0676 – accuracy: 0.9793 – val_loss: 0.0429 – val_accuracy: 0.9877
Epoch 4/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0567 – accuracy: 0.9821 – val_loss: 0.0412 – val_accuracy: 0.9889
Epoch 5/12
48000/48000 [==============================] – 4s 81us/sample – loss: 0.0462 – accuracy: 0.9851 – val_loss: 0.0388 – val_accuracy: 0.9893
Epoch 6/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0394 – accuracy: 0.9877 – val_loss: 0.0395 – val_accuracy: 0.9879
Epoch 7/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0334 – accuracy: 0.9889 – val_loss: 0.0381 – val_accuracy: 0.9896
Epoch 8/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0328 – accuracy: 0.9891 – val_loss: 0.0356 – val_accuracy: 0.9905
Epoch 9/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0304 – accuracy: 0.9902 – val_loss: 0.0385 – val_accuracy: 0.9903
Epoch 10/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0260 – accuracy: 0.9913 – val_loss: 0.0407 – val_accuracy: 0.9896
Epoch 11/12
48000/48000 [==============================] – 4s 79us/sample – loss: 0.0242 – accuracy: 0.9918 – val_loss: 0.0392 – val_accuracy: 0.9908
Epoch 12/12
48000/48000 [==============================] – 4s 80us/sample – loss: 0.0219 – accuracy: 0.9926 – val_loss: 0.0362 – val_accuracy: 0.9899
Test loss: 0.03045261118811104
Test accuracy: 0.9911
62.37748980522156
In [0]:
#Q1
#Run time on my computer:31.655904531478882 seconds
#Run time on GPU on Colab:82.71783566474915 seconds
#Interesting fact…I’m sure I did select runtime type “GPU”
In [0]:
#Step2. Implement handwritten recognition in Tensorflow using CNN
In [0]:
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
#Conv2D:build convolutional layer
#Maxpooling2D:build maxpooling layer
#Flatten:create fully connected layer
In [0]:
batch_size = 128
num_classes = 10
epochs = 12
In [0]:
# input image dimensions
img_rows, img_cols = 28, 28
In [0]:
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
In [0]:
if K.image_data_format() == ‘channels_first’:
x_train= x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test= x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
#Conv2D shows shape of the inputs (vector shape we put Conv1D, matrix shape we put Conv2D, 3Dimage we put Conv3D or Maxpooling3D)
#We fit matrix into convolutional layer in CNN
#in addtion to the dimension of the matrix (28*28), we need to add an additional layer: number of channels which shows the image is black&white or colorful.
#1 means B&W, 3 means RGB. Ex:(60000,28,28,1)
In [12]:
x_train = x_train.astype(‘float32’)
x_test = x_test.astype(‘float32’) #fit matrix into convolutional layer
x_train /= 255
x_test /= 255
print(‘x_train shape:’, x_train.shape)
print(x_train.shape[0], ‘train samples’)
print(x_test.shape[0], ‘test samples’)
x_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
In [0]:
import keras
In [0]:
import tensorflow as tf
In [0]:
# convert class vectors to binary class matrices
y_train= keras.utils.to_categorical(y_train, num_classes)
y_test= keras.utils.to_categorical(y_test, num_classes)
In [0]:
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation=’relu’,input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation=’relu’))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation=’relu’))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation=’softmax’))
In [0]:
#Q2- Explain the way that this model is designed. Talk about all the layers and their functionality.
#1.model.add(Conv2D(32, kernel_size=(3, 3),activation=’relu’,input_shape=input_shape))
#means add my first convolutional layer, with 32 filters, filter size is 3 by 3. (normal standard number for convolutional layer)
#first hiddenlayer has to define how many inputs are there that is the “input_shape”.
#2.model.add(Conv2D(64, (3, 3), activation=’relu’))
#then we add another convolutional layer with 64 filters
#3.model.add(MaxPooling2D(pool_size=(2, 2)))
#then add a Maxpooling layer with maxpooling size 2 by 2. (should be smaller than convolutional layer)
#4.model.add(Dropout(0.25))
#0.25 dropout for overfitting
#5.model.add(Flatten())
#fully connected layer (Flatten layer)
#from now is similar to nn
#add a layer with 128 nodes, activation is relu, dropout 0.5
#output layer num_classes, activation softmax
In [17]:
model.compile(loss=’categorical_crossentropy’, optimizer=’adam’, metrics=[‘accuracy’])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.2) #20% of training data is used for validation
score = model.evaluate(x_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
Train on 48000 samples, validate on 12000 samples
Epoch 1/12
48000/48000 [==============================] – 6s 126us/sample – loss: 0.2687 – accuracy: 0.9164 – val_loss: 0.0610 – val_accuracy: 0.9817
Epoch 2/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0932 – accuracy: 0.9723 – val_loss: 0.0491 – val_accuracy: 0.9858
Epoch 3/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0709 – accuracy: 0.9780 – val_loss: 0.0418 – val_accuracy: 0.9876
Epoch 4/12
48000/48000 [==============================] – 4s 85us/sample – loss: 0.0565 – accuracy: 0.9821 – val_loss: 0.0444 – val_accuracy: 0.9878
Epoch 5/12
48000/48000 [==============================] – 4s 83us/sample – loss: 0.0480 – accuracy: 0.9851 – val_loss: 0.0359 – val_accuracy: 0.9908
Epoch 6/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0403 – accuracy: 0.9875 – val_loss: 0.0389 – val_accuracy: 0.9892
Epoch 7/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0371 – accuracy: 0.9885 – val_loss: 0.0456 – val_accuracy: 0.9866
Epoch 8/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0332 – accuracy: 0.9891 – val_loss: 0.0368 – val_accuracy: 0.9901
Epoch 9/12
48000/48000 [==============================] – 4s 83us/sample – loss: 0.0301 – accuracy: 0.9899 – val_loss: 0.0379 – val_accuracy: 0.9895
Epoch 10/12
48000/48000 [==============================] – 4s 83us/sample – loss: 0.0264 – accuracy: 0.9913 – val_loss: 0.0349 – val_accuracy: 0.9912
Epoch 11/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0232 – accuracy: 0.9928 – val_loss: 0.0412 – val_accuracy: 0.9905
Epoch 12/12
48000/48000 [==============================] – 4s 85us/sample – loss: 0.0219 – accuracy: 0.9926 – val_loss: 0.0376 – val_accuracy: 0.9904
Test loss: 0.03435139556707076
Test accuracy: 0.9903
In [18]:
model.compile(loss=’categorical_crossentropy’, optimizer=’adam’, metrics=[‘accuracy’])
hist = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.2)
loss, accuracy = model.evaluate(x_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
Train on 48000 samples, validate on 12000 samples
Epoch 1/12
48000/48000 [==============================] – 5s 95us/sample – loss: 0.0220 – accuracy: 0.9923 – val_loss: 0.0370 – val_accuracy: 0.9911
Epoch 2/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0197 – accuracy: 0.9937 – val_loss: 0.0358 – val_accuracy: 0.9909
Epoch 3/12
48000/48000 [==============================] – 4s 85us/sample – loss: 0.0185 – accuracy: 0.9935 – val_loss: 0.0393 – val_accuracy: 0.9908
Epoch 4/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0167 – accuracy: 0.9942 – val_loss: 0.0429 – val_accuracy: 0.9898
Epoch 5/12
48000/48000 [==============================] – 4s 83us/sample – loss: 0.0182 – accuracy: 0.9933 – val_loss: 0.0450 – val_accuracy: 0.9905
Epoch 6/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0160 – accuracy: 0.9947 – val_loss: 0.0400 – val_accuracy: 0.9910
Epoch 7/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0148 – accuracy: 0.9952 – val_loss: 0.0369 – val_accuracy: 0.9912
Epoch 8/12
48000/48000 [==============================] – 4s 83us/sample – loss: 0.0142 – accuracy: 0.9952 – val_loss: 0.0444 – val_accuracy: 0.9919
Epoch 9/12
48000/48000 [==============================] – 4s 85us/sample – loss: 0.0137 – accuracy: 0.9951 – val_loss: 0.0404 – val_accuracy: 0.9911
Epoch 10/12
48000/48000 [==============================] – 4s 84us/sample – loss: 0.0136 – accuracy: 0.9955 – val_loss: 0.0449 – val_accuracy: 0.9912
Epoch 11/12
48000/48000 [==============================] – 4s 85us/sample – loss: 0.0127 – accuracy: 0.9956 – val_loss: 0.0402 – val_accuracy: 0.9918
Epoch 12/12
48000/48000 [==============================] – 4s 83us/sample – loss: 0.0110 – accuracy: 0.9962 – val_loss: 0.0442 – val_accuracy: 0.9908
Test loss: 0.03435139556707076
Test accuracy: 0.9903
In [0]:
#Q3- Design the learning curve and talk about what you see.
#Please see below
#we have a very nice accuracy here
In [0]:
import matplotlib.pyplot as plt
plt.subplot(2,1,1)
plt.plot(hist.history[‘accuracy’])
plt.plot(hist.history[‘val_accuracy’])
plt.title(‘model accuracy’)
plt.ylabel(‘accuracy’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’lower right’)
plt.subplot(2,1,2)
plt.plot(hist.history[‘loss’])
plt.plot(hist.history[‘val_loss’])
plt.title(‘model loss’)
plt.ylabel(‘loss’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’upper right’)
plt.show()

In [0]:
#Step3. Text mining using CNN
In [0]:
import pandas as pd
In [0]:
df = pd.read_csv(‘/content/gdrive/My Drive//amazon_cells_labelled.txt’, names=[‘sentence’, ‘label’], sep=’\t’)
In [21]:
print(df.iloc[0]) #print first review
sentence So there is no way for me to plug it in here i…
label 0
Name: 0, dtype: object
In [0]:
sentences = [‘John likes ice cream’, ‘John hates chocolate.’]
In [0]:
from sklearn.feature_extraction.text import CountVectorizer
In [24]:
vectorizer = CountVectorizer(min_df=0, lowercase=False)
vectorizer.fit(sentences)
vectorizer.vocabulary_
Out[24]:
{‘John’: 0, ‘chocolate’: 1, ‘cream’: 2, ‘hates’: 3, ‘ice’: 4, ‘likes’: 5}
In [26]:
vectorizer.transform(sentences).toarray()
Out[26]:
array([[1, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 0, 0]])
In [0]:
from sklearn.model_selection import train_test_split
sentences = df[‘sentence’].values
y = df[‘label’].values
In [0]:
sentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000) #25% for test 75% for train
In [0]:
from sklearn.feature_extraction.text import CountVectorizer
In [30]:
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
Out[30]:
CountVectorizer(analyzer=’word’, binary=False, decode_error=’strict’,
dtype=
lowercase=True, max_df=1.0, max_features=None, min_df=1,
ngram_range=(1, 1), preprocessor=None, stop_words=None,
strip_accents=None, token_pattern='(?u)\\b\\w\\w+\\b’,
tokenizer=None, vocabulary=None)
In [31]:
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
X_train
#It shows 750 samples which are the number of training samples. Each sample has 1546 dimensions,which is the size of the vocabulary
Out[31]:
<750x1546 sparse matrix of type '
with 6817 stored elements in Compressed Sparse Row format>
In [0]:
#we really don’t need to always use fancy algorithms. For example here even using a logistic regression model, gives us a reasonable result:
In [33]:
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print(“Accuracy:”, score)
Accuracy: 0.796
In [0]:
#Now, we can implement a normal DNN:
In [35]:
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
input_dim = X_train.shape[1] # Number of features
model = Sequential()
model.add(layers.Dense(10, input_dim=input_dim, activation=’relu’))
model.add(layers.Dense(1, activation=’sigmoid’))
model.compile(loss=’binary_crossentropy’, optimizer=’adam’,
metrics=[‘accuracy’])
hist = model.fit(X_train, y_train, epochs=100, validation_split=0.2 ,
batch_size=10)
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print(“Test Accuracy: “,accuracy*100)
WARNING:tensorflow:Falling back from v2 loop because of error: Failed to find data adapter that can handle input:
Train on 600 samples, validate on 150 samples
Epoch 1/100
600/600 [==============================] – 0s 503us/sample – loss: 0.6924 – accuracy: 0.5150 – val_loss: 0.6733 – val_accuracy: 0.6667
Epoch 2/100
600/600 [==============================] – 0s 281us/sample – loss: 0.6359 – accuracy: 0.8000 – val_loss: 0.6395 – val_accuracy: 0.7667
Epoch 3/100
600/600 [==============================] – 0s 287us/sample – loss: 0.5610 – accuracy: 0.8917 – val_loss: 0.5887 – val_accuracy: 0.8067
Epoch 4/100
600/600 [==============================] – 0s 301us/sample – loss: 0.4719 – accuracy: 0.9500 – val_loss: 0.5372 – val_accuracy: 0.8333
Epoch 5/100
600/600 [==============================] – 0s 319us/sample – loss: 0.3835 – accuracy: 0.9700 – val_loss: 0.4957 – val_accuracy: 0.8333
Epoch 6/100
600/600 [==============================] – 0s 302us/sample – loss: 0.3104 – accuracy: 0.9783 – val_loss: 0.4615 – val_accuracy: 0.8400
Epoch 7/100
600/600 [==============================] – 0s 304us/sample – loss: 0.2511 – accuracy: 0.9850 – val_loss: 0.4335 – val_accuracy: 0.8533
Epoch 8/100
600/600 [==============================] – 0s 290us/sample – loss: 0.2031 – accuracy: 0.9933 – val_loss: 0.4130 – val_accuracy: 0.8600
Epoch 9/100
600/600 [==============================] – 0s 289us/sample – loss: 0.1668 – accuracy: 0.9967 – val_loss: 0.3974 – val_accuracy: 0.8600
Epoch 10/100
600/600 [==============================] – 0s 294us/sample – loss: 0.1383 – accuracy: 0.9983 – val_loss: 0.3884 – val_accuracy: 0.8467
Epoch 11/100
600/600 [==============================] – 0s 287us/sample – loss: 0.1154 – accuracy: 1.0000 – val_loss: 0.3795 – val_accuracy: 0.8400
Epoch 12/100
600/600 [==============================] – 0s 300us/sample – loss: 0.0975 – accuracy: 1.0000 – val_loss: 0.3745 – val_accuracy: 0.8600
Epoch 13/100
600/600 [==============================] – 0s 323us/sample – loss: 0.0831 – accuracy: 1.0000 – val_loss: 0.3703 – val_accuracy: 0.8600
Epoch 14/100
600/600 [==============================] – 0s 339us/sample – loss: 0.0713 – accuracy: 1.0000 – val_loss: 0.3691 – val_accuracy: 0.8533
Epoch 15/100
600/600 [==============================] – 0s 284us/sample – loss: 0.0617 – accuracy: 1.0000 – val_loss: 0.3676 – val_accuracy: 0.8600
Epoch 16/100
600/600 [==============================] – 0s 295us/sample – loss: 0.0538 – accuracy: 1.0000 – val_loss: 0.3657 – val_accuracy: 0.8600
Epoch 17/100
600/600 [==============================] – 0s 280us/sample – loss: 0.0470 – accuracy: 1.0000 – val_loss: 0.3663 – val_accuracy: 0.8600
Epoch 18/100
600/600 [==============================] – 0s 321us/sample – loss: 0.0414 – accuracy: 1.0000 – val_loss: 0.3658 – val_accuracy: 0.8533
Epoch 19/100
600/600 [==============================] – 0s 292us/sample – loss: 0.0367 – accuracy: 1.0000 – val_loss: 0.3675 – val_accuracy: 0.8533
Epoch 20/100
600/600 [==============================] – 0s 290us/sample – loss: 0.0327 – accuracy: 1.0000 – val_loss: 0.3673 – val_accuracy: 0.8467
Epoch 21/100
600/600 [==============================] – 0s 300us/sample – loss: 0.0291 – accuracy: 1.0000 – val_loss: 0.3686 – val_accuracy: 0.8533
Epoch 22/100
600/600 [==============================] – 0s 288us/sample – loss: 0.0262 – accuracy: 1.0000 – val_loss: 0.3702 – val_accuracy: 0.8467
Epoch 23/100
600/600 [==============================] – 0s 273us/sample – loss: 0.0236 – accuracy: 1.0000 – val_loss: 0.3726 – val_accuracy: 0.8467
Epoch 24/100
600/600 [==============================] – 0s 312us/sample – loss: 0.0213 – accuracy: 1.0000 – val_loss: 0.3741 – val_accuracy: 0.8467
Epoch 25/100
600/600 [==============================] – 0s 307us/sample – loss: 0.0194 – accuracy: 1.0000 – val_loss: 0.3756 – val_accuracy: 0.8467
Epoch 26/100
600/600 [==============================] – 0s 291us/sample – loss: 0.0177 – accuracy: 1.0000 – val_loss: 0.3792 – val_accuracy: 0.8333
Epoch 27/100
600/600 [==============================] – 0s 297us/sample – loss: 0.0161 – accuracy: 1.0000 – val_loss: 0.3801 – val_accuracy: 0.8400
Epoch 28/100
600/600 [==============================] – 0s 290us/sample – loss: 0.0147 – accuracy: 1.0000 – val_loss: 0.3826 – val_accuracy: 0.8333
Epoch 29/100
600/600 [==============================] – 0s 303us/sample – loss: 0.0136 – accuracy: 1.0000 – val_loss: 0.3859 – val_accuracy: 0.8333
Epoch 30/100
600/600 [==============================] – 0s 290us/sample – loss: 0.0125 – accuracy: 1.0000 – val_loss: 0.3873 – val_accuracy: 0.8467
Epoch 31/100
600/600 [==============================] – 0s 307us/sample – loss: 0.0115 – accuracy: 1.0000 – val_loss: 0.3902 – val_accuracy: 0.8400
Epoch 32/100
600/600 [==============================] – 0s 288us/sample – loss: 0.0106 – accuracy: 1.0000 – val_loss: 0.3924 – val_accuracy: 0.8400
Epoch 33/100
600/600 [==============================] – 0s 297us/sample – loss: 0.0098 – accuracy: 1.0000 – val_loss: 0.3947 – val_accuracy: 0.8467
Epoch 34/100
600/600 [==============================] – 0s 287us/sample – loss: 0.0091 – accuracy: 1.0000 – val_loss: 0.3968 – val_accuracy: 0.8400
Epoch 35/100
600/600 [==============================] – 0s 319us/sample – loss: 0.0085 – accuracy: 1.0000 – val_loss: 0.3997 – val_accuracy: 0.8467
Epoch 36/100
600/600 [==============================] – 0s 306us/sample – loss: 0.0079 – accuracy: 1.0000 – val_loss: 0.4019 – val_accuracy: 0.8400
Epoch 37/100
600/600 [==============================] – 0s 303us/sample – loss: 0.0074 – accuracy: 1.0000 – val_loss: 0.4045 – val_accuracy: 0.8467
Epoch 38/100
600/600 [==============================] – 0s 295us/sample – loss: 0.0069 – accuracy: 1.0000 – val_loss: 0.4062 – val_accuracy: 0.8467
Epoch 39/100
600/600 [==============================] – 0s 276us/sample – loss: 0.0065 – accuracy: 1.0000 – val_loss: 0.4086 – val_accuracy: 0.8533
Epoch 40/100
600/600 [==============================] – 0s 296us/sample – loss: 0.0060 – accuracy: 1.0000 – val_loss: 0.4104 – val_accuracy: 0.8533
Epoch 41/100
600/600 [==============================] – 0s 330us/sample – loss: 0.0057 – accuracy: 1.0000 – val_loss: 0.4138 – val_accuracy: 0.8533
Epoch 42/100
600/600 [==============================] – 0s 317us/sample – loss: 0.0053 – accuracy: 1.0000 – val_loss: 0.4161 – val_accuracy: 0.8533
Epoch 43/100
600/600 [==============================] – 0s 308us/sample – loss: 0.0050 – accuracy: 1.0000 – val_loss: 0.4196 – val_accuracy: 0.8533
Epoch 44/100
600/600 [==============================] – 0s 304us/sample – loss: 0.0047 – accuracy: 1.0000 – val_loss: 0.4200 – val_accuracy: 0.8533
Epoch 45/100
600/600 [==============================] – 0s 292us/sample – loss: 0.0044 – accuracy: 1.0000 – val_loss: 0.4235 – val_accuracy: 0.8533
Epoch 46/100
600/600 [==============================] – 0s 300us/sample – loss: 0.0042 – accuracy: 1.0000 – val_loss: 0.4254 – val_accuracy: 0.8533
Epoch 47/100
600/600 [==============================] – 0s 286us/sample – loss: 0.0039 – accuracy: 1.0000 – val_loss: 0.4269 – val_accuracy: 0.8533
Epoch 48/100
600/600 [==============================] – 0s 311us/sample – loss: 0.0037 – accuracy: 1.0000 – val_loss: 0.4297 – val_accuracy: 0.8533
Epoch 49/100
600/600 [==============================] – 0s 277us/sample – loss: 0.0035 – accuracy: 1.0000 – val_loss: 0.4313 – val_accuracy: 0.8533
Epoch 50/100
600/600 [==============================] – 0s 273us/sample – loss: 0.0033 – accuracy: 1.0000 – val_loss: 0.4338 – val_accuracy: 0.8533
Epoch 51/100
600/600 [==============================] – 0s 290us/sample – loss: 0.0032 – accuracy: 1.0000 – val_loss: 0.4360 – val_accuracy: 0.8533
Epoch 52/100
600/600 [==============================] – 0s 313us/sample – loss: 0.0030 – accuracy: 1.0000 – val_loss: 0.4386 – val_accuracy: 0.8533
Epoch 53/100
600/600 [==============================] – 0s 292us/sample – loss: 0.0028 – accuracy: 1.0000 – val_loss: 0.4415 – val_accuracy: 0.8533
Epoch 54/100
600/600 [==============================] – 0s 292us/sample – loss: 0.0027 – accuracy: 1.0000 – val_loss: 0.4428 – val_accuracy: 0.8533
Epoch 55/100
600/600 [==============================] – 0s 293us/sample – loss: 0.0026 – accuracy: 1.0000 – val_loss: 0.4455 – val_accuracy: 0.8600
Epoch 56/100
600/600 [==============================] – 0s 339us/sample – loss: 0.0024 – accuracy: 1.0000 – val_loss: 0.4473 – val_accuracy: 0.8533
Epoch 57/100
600/600 [==============================] – 0s 305us/sample – loss: 0.0023 – accuracy: 1.0000 – val_loss: 0.4493 – val_accuracy: 0.8533
Epoch 58/100
600/600 [==============================] – 0s 300us/sample – loss: 0.0022 – accuracy: 1.0000 – val_loss: 0.4519 – val_accuracy: 0.8533
Epoch 59/100
600/600 [==============================] – 0s 291us/sample – loss: 0.0021 – accuracy: 1.0000 – val_loss: 0.4541 – val_accuracy: 0.8600
Epoch 60/100
600/600 [==============================] – 0s 296us/sample – loss: 0.0020 – accuracy: 1.0000 – val_loss: 0.4560 – val_accuracy: 0.8533
Epoch 61/100
600/600 [==============================] – 0s 288us/sample – loss: 0.0019 – accuracy: 1.0000 – val_loss: 0.4575 – val_accuracy: 0.8533
Epoch 62/100
600/600 [==============================] – 0s 296us/sample – loss: 0.0018 – accuracy: 1.0000 – val_loss: 0.4602 – val_accuracy: 0.8533
Epoch 63/100
600/600 [==============================] – 0s 302us/sample – loss: 0.0017 – accuracy: 1.0000 – val_loss: 0.4621 – val_accuracy: 0.8600
Epoch 64/100
600/600 [==============================] – 0s 287us/sample – loss: 0.0016 – accuracy: 1.0000 – val_loss: 0.4643 – val_accuracy: 0.8600
Epoch 65/100
600/600 [==============================] – 0s 317us/sample – loss: 0.0016 – accuracy: 1.0000 – val_loss: 0.4668 – val_accuracy: 0.8533
Epoch 66/100
600/600 [==============================] – 0s 284us/sample – loss: 0.0015 – accuracy: 1.0000 – val_loss: 0.4686 – val_accuracy: 0.8533
Epoch 67/100
600/600 [==============================] – 0s 295us/sample – loss: 0.0014 – accuracy: 1.0000 – val_loss: 0.4702 – val_accuracy: 0.8600
Epoch 68/100
600/600 [==============================] – 0s 285us/sample – loss: 0.0014 – accuracy: 1.0000 – val_loss: 0.4726 – val_accuracy: 0.8600
Epoch 69/100
600/600 [==============================] – 0s 314us/sample – loss: 0.0013 – accuracy: 1.0000 – val_loss: 0.4745 – val_accuracy: 0.8600
Epoch 70/100
600/600 [==============================] – 0s 279us/sample – loss: 0.0013 – accuracy: 1.0000 – val_loss: 0.4764 – val_accuracy: 0.8600
Epoch 71/100
600/600 [==============================] – 0s 293us/sample – loss: 0.0012 – accuracy: 1.0000 – val_loss: 0.4787 – val_accuracy: 0.8467
Epoch 72/100
600/600 [==============================] – 0s 310us/sample – loss: 0.0011 – accuracy: 1.0000 – val_loss: 0.4808 – val_accuracy: 0.8600
Epoch 73/100
600/600 [==============================] – 0s 303us/sample – loss: 0.0011 – accuracy: 1.0000 – val_loss: 0.4831 – val_accuracy: 0.8467
Epoch 74/100
600/600 [==============================] – 0s 300us/sample – loss: 0.0010 – accuracy: 1.0000 – val_loss: 0.4851 – val_accuracy: 0.8400
Epoch 75/100
600/600 [==============================] – 0s 286us/sample – loss: 0.0010 – accuracy: 1.0000 – val_loss: 0.4865 – val_accuracy: 0.8400
Epoch 76/100
600/600 [==============================] – 0s 309us/sample – loss: 9.6262e-04 – accuracy: 1.0000 – val_loss: 0.4888 – val_accuracy: 0.8467
Epoch 77/100
600/600 [==============================] – 0s 285us/sample – loss: 9.2137e-04 – accuracy: 1.0000 – val_loss: 0.4909 – val_accuracy: 0.8467
Epoch 78/100
600/600 [==============================] – 0s 274us/sample – loss: 8.8467e-04 – accuracy: 1.0000 – val_loss: 0.4930 – val_accuracy: 0.8400
Epoch 79/100
600/600 [==============================] – 0s 292us/sample – loss: 8.4739e-04 – accuracy: 1.0000 – val_loss: 0.4948 – val_accuracy: 0.8533
Epoch 80/100
600/600 [==============================] – 0s 310us/sample – loss: 8.1277e-04 – accuracy: 1.0000 – val_loss: 0.4964 – val_accuracy: 0.8467
Epoch 81/100
600/600 [==============================] – 0s 322us/sample – loss: 7.8026e-04 – accuracy: 1.0000 – val_loss: 0.4988 – val_accuracy: 0.8533
Epoch 82/100
600/600 [==============================] – 0s 297us/sample – loss: 7.4824e-04 – accuracy: 1.0000 – val_loss: 0.5013 – val_accuracy: 0.8467
Epoch 83/100
600/600 [==============================] – 0s 273us/sample – loss: 7.1872e-04 – accuracy: 1.0000 – val_loss: 0.5022 – val_accuracy: 0.8533
Epoch 84/100
600/600 [==============================] – 0s 288us/sample – loss: 6.8920e-04 – accuracy: 1.0000 – val_loss: 0.5046 – val_accuracy: 0.8533
Epoch 85/100
600/600 [==============================] – 0s 288us/sample – loss: 6.6169e-04 – accuracy: 1.0000 – val_loss: 0.5061 – val_accuracy: 0.8533
Epoch 86/100
600/600 [==============================] – 0s 312us/sample – loss: 6.3518e-04 – accuracy: 1.0000 – val_loss: 0.5083 – val_accuracy: 0.8600
Epoch 87/100
600/600 [==============================] – 0s 296us/sample – loss: 6.1022e-04 – accuracy: 1.0000 – val_loss: 0.5106 – val_accuracy: 0.8533
Epoch 88/100
600/600 [==============================] – 0s 319us/sample – loss: 5.8642e-04 – accuracy: 1.0000 – val_loss: 0.5119 – val_accuracy: 0.8533
Epoch 89/100
600/600 [==============================] – 0s 289us/sample – loss: 5.6434e-04 – accuracy: 1.0000 – val_loss: 0.5143 – val_accuracy: 0.8533
Epoch 90/100
600/600 [==============================] – 0s 284us/sample – loss: 5.4141e-04 – accuracy: 1.0000 – val_loss: 0.5158 – val_accuracy: 0.8533
Epoch 91/100
600/600 [==============================] – 0s 304us/sample – loss: 5.1981e-04 – accuracy: 1.0000 – val_loss: 0.5177 – val_accuracy: 0.8533
Epoch 92/100
600/600 [==============================] – 0s 290us/sample – loss: 5.0032e-04 – accuracy: 1.0000 – val_loss: 0.5194 – val_accuracy: 0.8533
Epoch 93/100
600/600 [==============================] – 0s 301us/sample – loss: 4.8108e-04 – accuracy: 1.0000 – val_loss: 0.5213 – val_accuracy: 0.8533
Epoch 94/100
600/600 [==============================] – 0s 295us/sample – loss: 4.6245e-04 – accuracy: 1.0000 – val_loss: 0.5234 – val_accuracy: 0.8533
Epoch 95/100
600/600 [==============================] – 0s 297us/sample – loss: 4.4451e-04 – accuracy: 1.0000 – val_loss: 0.5251 – val_accuracy: 0.8533
Epoch 96/100
600/600 [==============================] – 0s 293us/sample – loss: 4.2783e-04 – accuracy: 1.0000 – val_loss: 0.5276 – val_accuracy: 0.8533
Epoch 97/100
600/600 [==============================] – 0s 345us/sample – loss: 4.1180e-04 – accuracy: 1.0000 – val_loss: 0.5290 – val_accuracy: 0.8533
Epoch 98/100
600/600 [==============================] – 0s 306us/sample – loss: 3.9622e-04 – accuracy: 1.0000 – val_loss: 0.5310 – val_accuracy: 0.8533
Epoch 99/100
600/600 [==============================] – 0s 280us/sample – loss: 3.8106e-04 – accuracy: 1.0000 – val_loss: 0.5333 – val_accuracy: 0.8533
Epoch 100/100
600/600 [==============================] – 0s 314us/sample – loss: 3.6665e-04 – accuracy: 1.0000 – val_loss: 0.5350 – val_accuracy: 0.8533
WARNING:tensorflow:Falling back from v2 loop because of error: Failed to find data adapter that can handle input:
Test Accuracy: 78.79999876022339
In [36]:
import matplotlib.pyplot as plt
plt.subplot(2,1,1)
plt.plot(hist.history[‘accuracy’])
plt.plot(hist.history[‘val_accuracy’])
plt.title(‘model accuracy’)
plt.ylabel(‘accuracy’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’lower right’)
plt.subplot(2,1,2)
plt.plot(hist.history[‘loss’])
plt.plot(hist.history[‘val_loss’])
plt.title(‘model loss’)
plt.ylabel(‘loss’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’upper right’)
plt.show()

In [0]:
#Q4- Explain these graphs. If you see any issue, suggest a solution to resolve it. Make the model by creating 3 hidden layers
#(first one 200 nodes, second one 100 nodes and last one 50 nodes and after each step, add dropout of 0.2 and report the accuracy.
#If you don’t see a huge improvement, don’t worry we are not done with the model yet.
#Answer: Overfitting problem
#Please see below
#Now the accuracy is 46.00000083446503
#Conclusion: we don’t see a huge different. The problem is not because of the model that we used, is because of the natural language processing technic we used.
#So we will improve the accuracy in embedding word section.
In [38]:
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
input_dim = X_train.shape[1] # Number of features
model = Sequential()
model.add(layers.Dense(10, input_dim=input_dim, activation=’relu’))
model.add(Dense(200, activation=’relu’))
model.add(Dropout(0.2))
model.add(Dense(100, activation=’relu’))
model.add(Dropout(0.2))
model.add(Dense(50, activation=’relu’))
model.add(Dropout(0.2))
model.add(layers.Dense(1, activation=’sigmoid’))
model.compile(loss=’binary_crossentropy’, optimizer=’adam’,
metrics=[‘accuracy’])
hist = model.fit(X_train, y_train, epochs=100, validation_split=0.2 ,
batch_size=10)
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print(“Test Accuracy: “,accuracy*100)
WARNING:tensorflow:Falling back from v2 loop because of error: Failed to find data adapter that can handle input:
Train on 600 samples, validate on 150 samples
Epoch 1/100
600/600 [==============================] – 0s 758us/sample – loss: 0.6920 – accuracy: 0.4950 – val_loss: 0.6833 – val_accuracy: 0.6000
Epoch 2/100
600/600 [==============================] – 0s 364us/sample – loss: 0.6034 – accuracy: 0.7417 – val_loss: 0.5430 – val_accuracy: 0.7267
Epoch 3/100
600/600 [==============================] – 0s 383us/sample – loss: 0.2259 – accuracy: 0.9350 – val_loss: 0.5269 – val_accuracy: 0.7667
Epoch 4/100
600/600 [==============================] – 0s 382us/sample – loss: 0.0509 – accuracy: 0.9917 – val_loss: 0.6231 – val_accuracy: 0.8000
Epoch 5/100
600/600 [==============================] – 0s 379us/sample – loss: 0.0137 – accuracy: 0.9967 – val_loss: 0.7352 – val_accuracy: 0.8133
Epoch 6/100
600/600 [==============================] – 0s 362us/sample – loss: 0.0045 – accuracy: 0.9983 – val_loss: 0.8153 – val_accuracy: 0.8067
Epoch 7/100
600/600 [==============================] – 0s 358us/sample – loss: 0.0017 – accuracy: 1.0000 – val_loss: 0.8621 – val_accuracy: 0.8000
Epoch 8/100
600/600 [==============================] – 0s 374us/sample – loss: 0.0011 – accuracy: 1.0000 – val_loss: 0.9322 – val_accuracy: 0.8000
Epoch 9/100
600/600 [==============================] – 0s 354us/sample – loss: 7.7469e-04 – accuracy: 1.0000 – val_loss: 0.9477 – val_accuracy: 0.7800
Epoch 10/100
600/600 [==============================] – 0s 364us/sample – loss: 7.6807e-04 – accuracy: 1.0000 – val_loss: 0.9584 – val_accuracy: 0.7800
Epoch 11/100
600/600 [==============================] – 0s 357us/sample – loss: 4.0414e-04 – accuracy: 1.0000 – val_loss: 0.9776 – val_accuracy: 0.8000
Epoch 12/100
600/600 [==============================] – 0s 350us/sample – loss: 3.9808e-04 – accuracy: 1.0000 – val_loss: 1.0156 – val_accuracy: 0.7933
Epoch 13/100
600/600 [==============================] – 0s 370us/sample – loss: 2.6204e-04 – accuracy: 1.0000 – val_loss: 1.0538 – val_accuracy: 0.8000
Epoch 14/100
600/600 [==============================] – 0s 381us/sample – loss: 1.7639e-04 – accuracy: 1.0000 – val_loss: 1.1036 – val_accuracy: 0.8000
Epoch 15/100
600/600 [==============================] – 0s 348us/sample – loss: 1.5331e-04 – accuracy: 1.0000 – val_loss: 1.1588 – val_accuracy: 0.8000
Epoch 16/100
600/600 [==============================] – 0s 357us/sample – loss: 1.3282e-04 – accuracy: 1.0000 – val_loss: 1.1443 – val_accuracy: 0.8067
Epoch 17/100
600/600 [==============================] – 0s 365us/sample – loss: 9.1947e-05 – accuracy: 1.0000 – val_loss: 1.1476 – val_accuracy: 0.8000
Epoch 18/100
600/600 [==============================] – 0s 340us/sample – loss: 8.0089e-05 – accuracy: 1.0000 – val_loss: 1.1667 – val_accuracy: 0.8000
Epoch 19/100
600/600 [==============================] – 0s 349us/sample – loss: 9.7134e-05 – accuracy: 1.0000 – val_loss: 1.1636 – val_accuracy: 0.8000
Epoch 20/100
600/600 [==============================] – 0s 344us/sample – loss: 6.4377e-05 – accuracy: 1.0000 – val_loss: 1.1786 – val_accuracy: 0.8000
Epoch 21/100
600/600 [==============================] – 0s 347us/sample – loss: 6.8406e-05 – accuracy: 1.0000 – val_loss: 1.1910 – val_accuracy: 0.8000
Epoch 22/100
600/600 [==============================] – 0s 385us/sample – loss: 5.9579e-05 – accuracy: 1.0000 – val_loss: 1.2198 – val_accuracy: 0.8000
Epoch 23/100
600/600 [==============================] – 0s 355us/sample – loss: 3.8790e-05 – accuracy: 1.0000 – val_loss: 1.2267 – val_accuracy: 0.8000
Epoch 24/100
600/600 [==============================] – 0s 340us/sample – loss: 5.0714e-05 – accuracy: 1.0000 – val_loss: 1.2392 – val_accuracy: 0.8000
Epoch 25/100
600/600 [==============================] – 0s 349us/sample – loss: 6.7547e-05 – accuracy: 1.0000 – val_loss: 1.2513 – val_accuracy: 0.8000
Epoch 26/100
600/600 [==============================] – 0s 350us/sample – loss: 3.9578e-05 – accuracy: 1.0000 – val_loss: 1.2568 – val_accuracy: 0.8000
Epoch 27/100
600/600 [==============================] – 0s 389us/sample – loss: 8.4186e-05 – accuracy: 1.0000 – val_loss: 1.2320 – val_accuracy: 0.7933
Epoch 28/100
600/600 [==============================] – 0s 359us/sample – loss: 3.6451e-05 – accuracy: 1.0000 – val_loss: 1.2392 – val_accuracy: 0.7933
Epoch 29/100
600/600 [==============================] – 0s 359us/sample – loss: 3.9309e-05 – accuracy: 1.0000 – val_loss: 1.2534 – val_accuracy: 0.7933
Epoch 30/100
600/600 [==============================] – 0s 346us/sample – loss: 3.4274e-05 – accuracy: 1.0000 – val_loss: 1.2731 – val_accuracy: 0.8000
Epoch 31/100
600/600 [==============================] – 0s 411us/sample – loss: 2.6533e-05 – accuracy: 1.0000 – val_loss: 1.2843 – val_accuracy: 0.8000
Epoch 32/100
600/600 [==============================] – 0s 370us/sample – loss: 2.9755e-05 – accuracy: 1.0000 – val_loss: 1.2893 – val_accuracy: 0.8000
Epoch 33/100
600/600 [==============================] – 0s 346us/sample – loss: 1.8291e-05 – accuracy: 1.0000 – val_loss: 1.2989 – val_accuracy: 0.8000
Epoch 34/100
600/600 [==============================] – 0s 379us/sample – loss: 4.3805e-05 – accuracy: 1.0000 – val_loss: 1.3238 – val_accuracy: 0.8000
Epoch 35/100
600/600 [==============================] – 0s 364us/sample – loss: 3.5778e-05 – accuracy: 1.0000 – val_loss: 1.3520 – val_accuracy: 0.8000
Epoch 36/100
600/600 [==============================] – 0s 369us/sample – loss: 2.3998e-05 – accuracy: 1.0000 – val_loss: 1.3575 – val_accuracy: 0.8000
Epoch 37/100
600/600 [==============================] – 0s 350us/sample – loss: 3.2399e-05 – accuracy: 1.0000 – val_loss: 1.3455 – val_accuracy: 0.8000
Epoch 38/100
600/600 [==============================] – 0s 371us/sample – loss: 2.1492e-05 – accuracy: 1.0000 – val_loss: 1.3670 – val_accuracy: 0.8000
Epoch 39/100
600/600 [==============================] – 0s 352us/sample – loss: 1.5074e-05 – accuracy: 1.0000 – val_loss: 1.3767 – val_accuracy: 0.8000
Epoch 40/100
600/600 [==============================] – 0s 353us/sample – loss: 1.2205e-05 – accuracy: 1.0000 – val_loss: 1.3846 – val_accuracy: 0.8000
Epoch 41/100
600/600 [==============================] – 0s 354us/sample – loss: 1.4152e-05 – accuracy: 1.0000 – val_loss: 1.3889 – val_accuracy: 0.8000
Epoch 42/100
600/600 [==============================] – 0s 361us/sample – loss: 1.6817e-05 – accuracy: 1.0000 – val_loss: 1.3990 – val_accuracy: 0.8000
Epoch 43/100
600/600 [==============================] – 0s 362us/sample – loss: 3.4315e-05 – accuracy: 1.0000 – val_loss: 1.4202 – val_accuracy: 0.8067
Epoch 44/100
600/600 [==============================] – 0s 367us/sample – loss: 7.2254e-06 – accuracy: 1.0000 – val_loss: 1.4325 – val_accuracy: 0.8067
Epoch 45/100
600/600 [==============================] – 0s 356us/sample – loss: 9.0852e-06 – accuracy: 1.0000 – val_loss: 1.4323 – val_accuracy: 0.8067
Epoch 46/100
600/600 [==============================] – 0s 352us/sample – loss: 2.1387e-05 – accuracy: 1.0000 – val_loss: 1.4103 – val_accuracy: 0.8000
Epoch 47/100
600/600 [==============================] – 0s 360us/sample – loss: 7.7287e-06 – accuracy: 1.0000 – val_loss: 1.4179 – val_accuracy: 0.8000
Epoch 48/100
600/600 [==============================] – 0s 359us/sample – loss: 7.6327e-06 – accuracy: 1.0000 – val_loss: 1.4250 – val_accuracy: 0.8000
Epoch 49/100
600/600 [==============================] – 0s 339us/sample – loss: 4.7941e-06 – accuracy: 1.0000 – val_loss: 1.4305 – val_accuracy: 0.8000
Epoch 50/100
600/600 [==============================] – 0s 364us/sample – loss: 9.0609e-06 – accuracy: 1.0000 – val_loss: 1.4411 – val_accuracy: 0.8000
Epoch 51/100
600/600 [==============================] – 0s 365us/sample – loss: 1.2289e-05 – accuracy: 1.0000 – val_loss: 1.4510 – val_accuracy: 0.8000
Epoch 52/100
600/600 [==============================] – 0s 347us/sample – loss: 6.2717e-06 – accuracy: 1.0000 – val_loss: 1.4522 – val_accuracy: 0.8000
Epoch 53/100
600/600 [==============================] – 0s 342us/sample – loss: 3.3539e-06 – accuracy: 1.0000 – val_loss: 1.4567 – val_accuracy: 0.8000
Epoch 54/100
600/600 [==============================] – 0s 348us/sample – loss: 1.6332e-05 – accuracy: 1.0000 – val_loss: 1.4631 – val_accuracy: 0.8000
Epoch 55/100
600/600 [==============================] – 0s 387us/sample – loss: 4.1592e-06 – accuracy: 1.0000 – val_loss: 1.4633 – val_accuracy: 0.8000
Epoch 56/100
600/600 [==============================] – 0s 346us/sample – loss: 1.4147e-05 – accuracy: 1.0000 – val_loss: 1.5028 – val_accuracy: 0.8000
Epoch 57/100
600/600 [==============================] – 0s 366us/sample – loss: 9.3105e-06 – accuracy: 1.0000 – val_loss: 1.5140 – val_accuracy: 0.8000
Epoch 58/100
600/600 [==============================] – 0s 347us/sample – loss: 8.1316e-06 – accuracy: 1.0000 – val_loss: 1.5061 – val_accuracy: 0.8000
Epoch 59/100
600/600 [==============================] – 0s 347us/sample – loss: 6.4121e-06 – accuracy: 1.0000 – val_loss: 1.5190 – val_accuracy: 0.8000
Epoch 60/100
600/600 [==============================] – 0s 365us/sample – loss: 9.1253e-06 – accuracy: 1.0000 – val_loss: 1.5092 – val_accuracy: 0.8000
Epoch 61/100
600/600 [==============================] – 0s 360us/sample – loss: 8.9344e-06 – accuracy: 1.0000 – val_loss: 1.5261 – val_accuracy: 0.8000
Epoch 62/100
600/600 [==============================] – 0s 340us/sample – loss: 3.0213e-06 – accuracy: 1.0000 – val_loss: 1.5305 – val_accuracy: 0.8000
Epoch 63/100
600/600 [==============================] – 0s 348us/sample – loss: 4.6653e-06 – accuracy: 1.0000 – val_loss: 1.5357 – val_accuracy: 0.8000
Epoch 64/100
600/600 [==============================] – 0s 364us/sample – loss: 1.4668e-06 – accuracy: 1.0000 – val_loss: 1.5405 – val_accuracy: 0.8000
Epoch 65/100
600/600 [==============================] – 0s 353us/sample – loss: 4.4788e-06 – accuracy: 1.0000 – val_loss: 1.5405 – val_accuracy: 0.8000
Epoch 66/100
600/600 [==============================] – 0s 353us/sample – loss: 1.9542e-05 – accuracy: 1.0000 – val_loss: 1.6462 – val_accuracy: 0.8067
Epoch 67/100
600/600 [==============================] – 0s 362us/sample – loss: 1.2576e-05 – accuracy: 1.0000 – val_loss: 1.5656 – val_accuracy: 0.8000
Epoch 68/100
600/600 [==============================] – 0s 354us/sample – loss: 4.0968e-06 – accuracy: 1.0000 – val_loss: 1.5915 – val_accuracy: 0.8000
Epoch 69/100
600/600 [==============================] – 0s 368us/sample – loss: 5.4746e-06 – accuracy: 1.0000 – val_loss: 1.5989 – val_accuracy: 0.8000
Epoch 70/100
600/600 [==============================] – 0s 353us/sample – loss: 2.0669e-06 – accuracy: 1.0000 – val_loss: 1.6054 – val_accuracy: 0.8000
Epoch 71/100
600/600 [==============================] – 0s 352us/sample – loss: 3.3052e-06 – accuracy: 1.0000 – val_loss: 1.6062 – val_accuracy: 0.8000
Epoch 72/100
600/600 [==============================] – 0s 342us/sample – loss: 1.3255e-06 – accuracy: 1.0000 – val_loss: 1.6133 – val_accuracy: 0.8000
Epoch 73/100
600/600 [==============================] – 0s 373us/sample – loss: 3.5655e-06 – accuracy: 1.0000 – val_loss: 1.6251 – val_accuracy: 0.8000
Epoch 74/100
600/600 [==============================] – 0s 395us/sample – loss: 2.0590e-06 – accuracy: 1.0000 – val_loss: 1.6348 – val_accuracy: 0.8000
Epoch 75/100
600/600 [==============================] – 0s 350us/sample – loss: 2.6948e-06 – accuracy: 1.0000 – val_loss: 1.6437 – val_accuracy: 0.8133
Epoch 76/100
600/600 [==============================] – 0s 349us/sample – loss: 2.0972e-06 – accuracy: 1.0000 – val_loss: 1.6477 – val_accuracy: 0.8133
Epoch 77/100
600/600 [==============================] – 0s 345us/sample – loss: 1.5779e-06 – accuracy: 1.0000 – val_loss: 1.6604 – val_accuracy: 0.8133
Epoch 78/100
600/600 [==============================] – 0s 394us/sample – loss: 3.0688e-06 – accuracy: 1.0000 – val_loss: 1.6567 – val_accuracy: 0.8000
Epoch 79/100
600/600 [==============================] – 0s 357us/sample – loss: 2.2465e-06 – accuracy: 1.0000 – val_loss: 1.6589 – val_accuracy: 0.8000
Epoch 80/100
600/600 [==============================] – 0s 361us/sample – loss: 1.2367e-06 – accuracy: 1.0000 – val_loss: 1.6630 – val_accuracy: 0.8000
Epoch 81/100
600/600 [==============================] – 0s 368us/sample – loss: 2.8340e-06 – accuracy: 1.0000 – val_loss: 1.6608 – val_accuracy: 0.8000
Epoch 82/100
600/600 [==============================] – 0s 357us/sample – loss: 1.9189e-06 – accuracy: 1.0000 – val_loss: 1.6654 – val_accuracy: 0.8000
Epoch 83/100
600/600 [==============================] – 0s 352us/sample – loss: 1.5252e-06 – accuracy: 1.0000 – val_loss: 1.6672 – val_accuracy: 0.8000
Epoch 84/100
600/600 [==============================] – 0s 359us/sample – loss: 8.1988e-07 – accuracy: 1.0000 – val_loss: 1.6738 – val_accuracy: 0.8000
Epoch 85/100
600/600 [==============================] – 0s 371us/sample – loss: 1.0839e-06 – accuracy: 1.0000 – val_loss: 1.6794 – val_accuracy: 0.8067
Epoch 86/100
600/600 [==============================] – 0s 362us/sample – loss: 1.4705e-06 – accuracy: 1.0000 – val_loss: 1.6800 – val_accuracy: 0.8067
Epoch 87/100
600/600 [==============================] – 0s 396us/sample – loss: 1.8604e-06 – accuracy: 1.0000 – val_loss: 1.6900 – val_accuracy: 0.8067
Epoch 88/100
600/600 [==============================] – 0s 347us/sample – loss: 2.3301e-06 – accuracy: 1.0000 – val_loss: 1.6963 – val_accuracy: 0.8067
Epoch 89/100
600/600 [==============================] – 0s 351us/sample – loss: 1.5431e-06 – accuracy: 1.0000 – val_loss: 1.7086 – val_accuracy: 0.8133
Epoch 90/100
600/600 [==============================] – 0s 348us/sample – loss: 1.3459e-06 – accuracy: 1.0000 – val_loss: 1.6933 – val_accuracy: 0.8067
Epoch 91/100
600/600 [==============================] – 0s 357us/sample – loss: 2.2468e-06 – accuracy: 1.0000 – val_loss: 1.6983 – val_accuracy: 0.8067
Epoch 92/100
600/600 [==============================] – 0s 365us/sample – loss: 8.9002e-07 – accuracy: 1.0000 – val_loss: 1.6993 – val_accuracy: 0.8067
Epoch 93/100
600/600 [==============================] – 0s 354us/sample – loss: 1.3009e-06 – accuracy: 1.0000 – val_loss: 1.7010 – val_accuracy: 0.8067
Epoch 94/100
600/600 [==============================] – 0s 360us/sample – loss: 1.8493e-06 – accuracy: 1.0000 – val_loss: 1.7072 – val_accuracy: 0.8067
Epoch 95/100
600/600 [==============================] – 0s 367us/sample – loss: 1.9571e-06 – accuracy: 1.0000 – val_loss: 1.7152 – val_accuracy: 0.8067
Epoch 96/100
600/600 [==============================] – 0s 375us/sample – loss: 2.8436e-06 – accuracy: 1.0000 – val_loss: 1.6796 – val_accuracy: 0.8000
Epoch 97/100
600/600 [==============================] – 0s 380us/sample – loss: 1.3394e-06 – accuracy: 1.0000 – val_loss: 1.6827 – val_accuracy: 0.8000
Epoch 98/100
600/600 [==============================] – 0s 361us/sample – loss: 1.3019e-06 – accuracy: 1.0000 – val_loss: 1.6967 – val_accuracy: 0.8000
Epoch 99/100
600/600 [==============================] – 0s 360us/sample – loss: 1.1421e-06 – accuracy: 1.0000 – val_loss: 1.7074 – val_accuracy: 0.8000
Epoch 100/100
600/600 [==============================] – 0s 344us/sample – loss: 3.1129e-06 – accuracy: 1.0000 – val_loss: 1.7144 – val_accuracy: 0.8000
WARNING:tensorflow:Falling back from v2 loop because of error: Failed to find data adapter that can handle input:
Test Accuracy: 74.40000176429749
In [39]:
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index
print(sentences_train[3])
This is the phone to get for 2005…. I just bought my S710a and all I can say is WOW!
In [40]:
print(X_train[3])
[7, 5, 1, 9, 8, 92, 11, 676, 2, 59, 101, 10, 677, 3, 32, 2, 71, 225, 5, 449]
In [41]:
for word in [‘the’, ‘all’, ‘happy’]:
print(‘{}: {}’.format(word, tokenizer.word_index[word]))
the: 1
all: 32
happy: 86
In [42]:
from keras.preprocessing.sequence import pad_sequences
maxlen = 100
# Pad variables with zeros
X_train = pad_sequences(X_train, padding=’post’, maxlen=maxlen)
X_test = pad_sequences(X_test, padding=’post’, maxlen=maxlen)
print(X_train[0, :])
[ 7 24 5 16 4 137 148 6 223 315 2 71 224 8 1 673 111 444
18 316 11 445 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
In [43]:
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
embedding_dim = 50
model = Sequential()
model.add(layers.Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=maxlen))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation=’relu’))
model.add(layers.Dense(1, activation=’sigmoid’))
model.compile(optimizer=’adam’, loss=’binary_crossentropy’,
metrics=[‘accuracy’])
model.summary()
Model: “sequential_3”
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 50) 78700
_________________________________________________________________
global_max_pooling1d (Global (None, 50) 0
_________________________________________________________________
dense_9 (Dense) (None, 10) 510
_________________________________________________________________
dense_10 (Dense) (None, 1) 11
=================================================================
Total params: 79,221
Trainable params: 79,221
Non-trainable params: 0
_________________________________________________________________
In [44]:
hist = model.fit(X_train, y_train,epochs=50,validation_split=0.2,batch_size=10)
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print(“Accuracy: “,accuracy)
Train on 600 samples, validate on 150 samples
Epoch 1/50
600/600 [==============================] – 1s 1ms/sample – loss: 0.6889 – accuracy: 0.5200 – val_loss: 0.6835 – val_accuracy: 0.6067
Epoch 2/50
600/600 [==============================] – 0s 471us/sample – loss: 0.6607 – accuracy: 0.7133 – val_loss: 0.6543 – val_accuracy: 0.7067
Epoch 3/50
600/600 [==============================] – 0s 474us/sample – loss: 0.5932 – accuracy: 0.8533 – val_loss: 0.5994 – val_accuracy: 0.7800
Epoch 4/50
600/600 [==============================] – 0s 461us/sample – loss: 0.4755 – accuracy: 0.9483 – val_loss: 0.5238 – val_accuracy: 0.8000
Epoch 5/50
600/600 [==============================] – 0s 479us/sample – loss: 0.3332 – accuracy: 0.9567 – val_loss: 0.4553 – val_accuracy: 0.8133
Epoch 6/50
600/600 [==============================] – 0s 490us/sample – loss: 0.2113 – accuracy: 0.9667 – val_loss: 0.4163 – val_accuracy: 0.8067
Epoch 7/50
600/600 [==============================] – 0s 475us/sample – loss: 0.1337 – accuracy: 0.9767 – val_loss: 0.4008 – val_accuracy: 0.8200
Epoch 8/50
600/600 [==============================] – 0s 479us/sample – loss: 0.0864 – accuracy: 0.9900 – val_loss: 0.3900 – val_accuracy: 0.8200
Epoch 9/50
600/600 [==============================] – 0s 465us/sample – loss: 0.0579 – accuracy: 0.9950 – val_loss: 0.3878 – val_accuracy: 0.8267
Epoch 10/50
600/600 [==============================] – 0s 470us/sample – loss: 0.0397 – accuracy: 0.9983 – val_loss: 0.3873 – val_accuracy: 0.8267
Epoch 11/50
600/600 [==============================] – 0s 467us/sample – loss: 0.0280 – accuracy: 0.9983 – val_loss: 0.3905 – val_accuracy: 0.8467
Epoch 12/50
600/600 [==============================] – 0s 473us/sample – loss: 0.0199 – accuracy: 1.0000 – val_loss: 0.3914 – val_accuracy: 0.8333
Epoch 13/50
600/600 [==============================] – 0s 477us/sample – loss: 0.0146 – accuracy: 1.0000 – val_loss: 0.3966 – val_accuracy: 0.8333
Epoch 14/50
600/600 [==============================] – 0s 469us/sample – loss: 0.0109 – accuracy: 1.0000 – val_loss: 0.4018 – val_accuracy: 0.8400
Epoch 15/50
600/600 [==============================] – 0s 477us/sample – loss: 0.0085 – accuracy: 1.0000 – val_loss: 0.4045 – val_accuracy: 0.8400
Epoch 16/50
600/600 [==============================] – 0s 471us/sample – loss: 0.0067 – accuracy: 1.0000 – val_loss: 0.4100 – val_accuracy: 0.8400
Epoch 17/50
600/600 [==============================] – 0s 466us/sample – loss: 0.0054 – accuracy: 1.0000 – val_loss: 0.4140 – val_accuracy: 0.8333
Epoch 18/50
600/600 [==============================] – 0s 490us/sample – loss: 0.0045 – accuracy: 1.0000 – val_loss: 0.4185 – val_accuracy: 0.8400
Epoch 19/50
600/600 [==============================] – 0s 458us/sample – loss: 0.0037 – accuracy: 1.0000 – val_loss: 0.4212 – val_accuracy: 0.8400
Epoch 20/50
600/600 [==============================] – 0s 482us/sample – loss: 0.0032 – accuracy: 1.0000 – val_loss: 0.4231 – val_accuracy: 0.8400
Epoch 21/50
600/600 [==============================] – 0s 467us/sample – loss: 0.0027 – accuracy: 1.0000 – val_loss: 0.4264 – val_accuracy: 0.8400
Epoch 22/50
600/600 [==============================] – 0s 460us/sample – loss: 0.0023 – accuracy: 1.0000 – val_loss: 0.4297 – val_accuracy: 0.8400
Epoch 23/50
600/600 [==============================] – 0s 473us/sample – loss: 0.0020 – accuracy: 1.0000 – val_loss: 0.4325 – val_accuracy: 0.8400
Epoch 24/50
600/600 [==============================] – 0s 445us/sample – loss: 0.0018 – accuracy: 1.0000 – val_loss: 0.4359 – val_accuracy: 0.8333
Epoch 25/50
600/600 [==============================] – 0s 477us/sample – loss: 0.0016 – accuracy: 1.0000 – val_loss: 0.4389 – val_accuracy: 0.8333
Epoch 26/50
600/600 [==============================] – 0s 468us/sample – loss: 0.0014 – accuracy: 1.0000 – val_loss: 0.4418 – val_accuracy: 0.8333
Epoch 27/50
600/600 [==============================] – 0s 480us/sample – loss: 0.0013 – accuracy: 1.0000 – val_loss: 0.4454 – val_accuracy: 0.8333
Epoch 28/50
600/600 [==============================] – 0s 512us/sample – loss: 0.0011 – accuracy: 1.0000 – val_loss: 0.4489 – val_accuracy: 0.8333
Epoch 29/50
600/600 [==============================] – 0s 476us/sample – loss: 0.0010 – accuracy: 1.0000 – val_loss: 0.4527 – val_accuracy: 0.8333
Epoch 30/50
600/600 [==============================] – 0s 467us/sample – loss: 9.2528e-04 – accuracy: 1.0000 – val_loss: 0.4570 – val_accuracy: 0.8333
Epoch 31/50
600/600 [==============================] – 0s 461us/sample – loss: 8.3642e-04 – accuracy: 1.0000 – val_loss: 0.4606 – val_accuracy: 0.8333
Epoch 32/50
600/600 [==============================] – 0s 482us/sample – loss: 7.6131e-04 – accuracy: 1.0000 – val_loss: 0.4650 – val_accuracy: 0.8333
Epoch 33/50
600/600 [==============================] – 0s 484us/sample – loss: 6.9509e-04 – accuracy: 1.0000 – val_loss: 0.4687 – val_accuracy: 0.8333
Epoch 34/50
600/600 [==============================] – 0s 472us/sample – loss: 6.3733e-04 – accuracy: 1.0000 – val_loss: 0.4724 – val_accuracy: 0.8267
Epoch 35/50
600/600 [==============================] – 0s 497us/sample – loss: 5.8526e-04 – accuracy: 1.0000 – val_loss: 0.4765 – val_accuracy: 0.8267
Epoch 36/50
600/600 [==============================] – 0s 472us/sample – loss: 5.3997e-04 – accuracy: 1.0000 – val_loss: 0.4795 – val_accuracy: 0.8267
Epoch 37/50
600/600 [==============================] – 0s 449us/sample – loss: 4.9680e-04 – accuracy: 1.0000 – val_loss: 0.4829 – val_accuracy: 0.8267
Epoch 38/50
600/600 [==============================] – 0s 477us/sample – loss: 4.5889e-04 – accuracy: 1.0000 – val_loss: 0.4867 – val_accuracy: 0.8267
Epoch 39/50
600/600 [==============================] – 0s 479us/sample – loss: 4.2506e-04 – accuracy: 1.0000 – val_loss: 0.4906 – val_accuracy: 0.8267
Epoch 40/50
600/600 [==============================] – 0s 475us/sample – loss: 3.9444e-04 – accuracy: 1.0000 – val_loss: 0.4948 – val_accuracy: 0.8267
Epoch 41/50
600/600 [==============================] – 0s 476us/sample – loss: 3.6595e-04 – accuracy: 1.0000 – val_loss: 0.4976 – val_accuracy: 0.8267
Epoch 42/50
600/600 [==============================] – 0s 470us/sample – loss: 3.4147e-04 – accuracy: 1.0000 – val_loss: 0.5003 – val_accuracy: 0.8267
Epoch 43/50
600/600 [==============================] – 0s 482us/sample – loss: 3.1771e-04 – accuracy: 1.0000 – val_loss: 0.5041 – val_accuracy: 0.8267
Epoch 44/50
600/600 [==============================] – 0s 467us/sample – loss: 2.9621e-04 – accuracy: 1.0000 – val_loss: 0.5073 – val_accuracy: 0.8267
Epoch 45/50
600/600 [==============================] – 0s 467us/sample – loss: 2.7774e-04 – accuracy: 1.0000 – val_loss: 0.5099 – val_accuracy: 0.8267
Epoch 46/50
600/600 [==============================] – 0s 478us/sample – loss: 2.5974e-04 – accuracy: 1.0000 – val_loss: 0.5126 – val_accuracy: 0.8267
Epoch 47/50
600/600 [==============================] – 0s 472us/sample – loss: 2.4393e-04 – accuracy: 1.0000 – val_loss: 0.5158 – val_accuracy: 0.8267
Epoch 48/50
600/600 [==============================] – 0s 465us/sample – loss: 2.2871e-04 – accuracy: 1.0000 – val_loss: 0.5184 – val_accuracy: 0.8267
Epoch 49/50
600/600 [==============================] – 0s 495us/sample – loss: 2.1497e-04 – accuracy: 1.0000 – val_loss: 0.5213 – val_accuracy: 0.8267
Epoch 50/50
600/600 [==============================] – 0s 489us/sample – loss: 2.0252e-04 – accuracy: 1.0000 – val_loss: 0.5238 – val_accuracy: 0.8333
Accuracy: 0.812
In [46]:
import matplotlib.pyplot as plt
plt.subplot(2,1,1)
plt.plot(hist.history[‘accuracy’])
plt.plot(hist.history[‘val_accuracy’])
plt.title(‘model accuracy’)
plt.ylabel(‘accuracy’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’lower right’)
plt.subplot(2,1,2)
plt.plot(hist.history[‘loss’])
plt.plot(hist.history[‘val_loss’])
plt.title(‘model loss’)
plt.ylabel(‘loss’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’upper right’)
plt.show()

In [0]:
#Q5- How do you interpret these results?
#Overfitting problem
#Q6- What is your recommendation to improve the accuracy? Implement your idea.
#Add more layers and dropout 0.2 for each layer.
In [81]:
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
embedding_dim = 50
model = Sequential()
model.add(layers.Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=maxlen))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation=’relu’))
model.add(layers.Dense(1, activation=’sigmoid’))
model.compile(optimizer=’SGD’, loss=’binary_crossentropy’,
metrics=[‘accuracy’])
model.summary()
Model: “sequential_14”
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_11 (Embedding) (None, 100, 50) 78700
_________________________________________________________________
global_max_pooling1d_11 (Glo (None, 50) 0
_________________________________________________________________
dense_33 (Dense) (None, 10) 510
_________________________________________________________________
dense_34 (Dense) (None, 1) 11
=================================================================
Total params: 79,221
Trainable params: 79,221
Non-trainable params: 0
_________________________________________________________________
In [82]:
hist = model.fit(X_train, y_train, epochs=50, validation_split=0.2, batch_size=5)
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print(“Accuracy: “,accuracy)
Train on 600 samples, validate on 150 samples
Epoch 1/50
600/600 [==============================] – 1s 1ms/sample – loss: 0.6930 – accuracy: 0.5283 – val_loss: 0.6905 – val_accuracy: 0.6000
Epoch 2/50
600/600 [==============================] – 0s 728us/sample – loss: 0.6905 – accuracy: 0.5533 – val_loss: 0.6889 – val_accuracy: 0.6333
Epoch 3/50
600/600 [==============================] – 0s 727us/sample – loss: 0.6880 – accuracy: 0.6100 – val_loss: 0.6871 – val_accuracy: 0.6333
Epoch 4/50
600/600 [==============================] – 0s 696us/sample – loss: 0.6846 – accuracy: 0.6817 – val_loss: 0.6852 – val_accuracy: 0.6333
Epoch 5/50
600/600 [==============================] – 0s 724us/sample – loss: 0.6811 – accuracy: 0.6300 – val_loss: 0.6825 – val_accuracy: 0.6467
Epoch 6/50
600/600 [==============================] – 0s 718us/sample – loss: 0.6768 – accuracy: 0.6400 – val_loss: 0.6796 – val_accuracy: 0.7000
Epoch 7/50
600/600 [==============================] – 0s 735us/sample – loss: 0.6726 – accuracy: 0.7633 – val_loss: 0.6771 – val_accuracy: 0.6533
Epoch 8/50
600/600 [==============================] – 0s 733us/sample – loss: 0.6681 – accuracy: 0.6500 – val_loss: 0.6734 – val_accuracy: 0.7000
Epoch 9/50
600/600 [==============================] – 0s 712us/sample – loss: 0.6630 – accuracy: 0.7517 – val_loss: 0.6698 – val_accuracy: 0.7000
Epoch 10/50
600/600 [==============================] – 0s 758us/sample – loss: 0.6574 – accuracy: 0.7350 – val_loss: 0.6657 – val_accuracy: 0.7133
Epoch 11/50
600/600 [==============================] – 0s 704us/sample – loss: 0.6512 – accuracy: 0.7867 – val_loss: 0.6610 – val_accuracy: 0.7067
Epoch 12/50
600/600 [==============================] – 0s 744us/sample – loss: 0.6440 – accuracy: 0.7950 – val_loss: 0.6557 – val_accuracy: 0.7200
Epoch 13/50
600/600 [==============================] – 0s 743us/sample – loss: 0.6358 – accuracy: 0.8100 – val_loss: 0.6495 – val_accuracy: 0.7333
Epoch 14/50
600/600 [==============================] – 0s 715us/sample – loss: 0.6266 – accuracy: 0.8000 – val_loss: 0.6426 – val_accuracy: 0.7400
Epoch 15/50
600/600 [==============================] – 0s 710us/sample – loss: 0.6161 – accuracy: 0.8183 – val_loss: 0.6351 – val_accuracy: 0.7267
Epoch 16/50
600/600 [==============================] – 0s 731us/sample – loss: 0.6047 – accuracy: 0.8383 – val_loss: 0.6274 – val_accuracy: 0.7267
Epoch 17/50
600/600 [==============================] – 0s 735us/sample – loss: 0.5927 – accuracy: 0.8733 – val_loss: 0.6190 – val_accuracy: 0.7133
Epoch 18/50
600/600 [==============================] – 0s 695us/sample – loss: 0.5792 – accuracy: 0.8650 – val_loss: 0.6109 – val_accuracy: 0.6933
Epoch 19/50
600/600 [==============================] – 0s 718us/sample – loss: 0.5657 – accuracy: 0.8883 – val_loss: 0.6011 – val_accuracy: 0.7467
Epoch 20/50
600/600 [==============================] – 0s 688us/sample – loss: 0.5515 – accuracy: 0.8750 – val_loss: 0.5922 – val_accuracy: 0.7400
Epoch 21/50
600/600 [==============================] – 0s 711us/sample – loss: 0.5367 – accuracy: 0.8917 – val_loss: 0.5825 – val_accuracy: 0.7667
Epoch 22/50
600/600 [==============================] – 0s 720us/sample – loss: 0.5217 – accuracy: 0.9050 – val_loss: 0.5732 – val_accuracy: 0.7600
Epoch 23/50
600/600 [==============================] – 0s 690us/sample – loss: 0.5057 – accuracy: 0.9033 – val_loss: 0.5644 – val_accuracy: 0.7467
Epoch 24/50
600/600 [==============================] – 0s 716us/sample – loss: 0.4895 – accuracy: 0.9167 – val_loss: 0.5548 – val_accuracy: 0.7667
Epoch 25/50
600/600 [==============================] – 0s 714us/sample – loss: 0.4727 – accuracy: 0.9167 – val_loss: 0.5461 – val_accuracy: 0.7733
Epoch 26/50
600/600 [==============================] – 0s 728us/sample – loss: 0.4554 – accuracy: 0.9250 – val_loss: 0.5368 – val_accuracy: 0.7667
Epoch 27/50
600/600 [==============================] – 0s 707us/sample – loss: 0.4383 – accuracy: 0.9267 – val_loss: 0.5289 – val_accuracy: 0.7800
Epoch 28/50
600/600 [==============================] – 0s 727us/sample – loss: 0.4194 – accuracy: 0.9267 – val_loss: 0.5231 – val_accuracy: 0.7467
Epoch 29/50
600/600 [==============================] – 0s 736us/sample – loss: 0.4013 – accuracy: 0.9433 – val_loss: 0.5143 – val_accuracy: 0.7667
Epoch 30/50
600/600 [==============================] – 0s 703us/sample – loss: 0.3829 – accuracy: 0.9383 – val_loss: 0.5083 – val_accuracy: 0.7733
Epoch 31/50
600/600 [==============================] – 0s 735us/sample – loss: 0.3643 – accuracy: 0.9483 – val_loss: 0.5000 – val_accuracy: 0.7667
Epoch 32/50
600/600 [==============================] – 0s 712us/sample – loss: 0.3455 – accuracy: 0.9467 – val_loss: 0.4928 – val_accuracy: 0.7800
Epoch 33/50
600/600 [==============================] – 0s 695us/sample – loss: 0.3278 – accuracy: 0.9550 – val_loss: 0.4881 – val_accuracy: 0.7733
Epoch 34/50
600/600 [==============================] – 0s 709us/sample – loss: 0.3104 – accuracy: 0.9550 – val_loss: 0.4837 – val_accuracy: 0.7800
Epoch 35/50
600/600 [==============================] – 0s 716us/sample – loss: 0.2929 – accuracy: 0.9567 – val_loss: 0.4842 – val_accuracy: 0.7733
Epoch 36/50
600/600 [==============================] – 0s 731us/sample – loss: 0.2761 – accuracy: 0.9600 – val_loss: 0.4810 – val_accuracy: 0.7733
Epoch 37/50
600/600 [==============================] – 0s 748us/sample – loss: 0.2601 – accuracy: 0.9633 – val_loss: 0.4736 – val_accuracy: 0.7867
Epoch 38/50
600/600 [==============================] – 0s 723us/sample – loss: 0.2438 – accuracy: 0.9633 – val_loss: 0.4726 – val_accuracy: 0.7867
Epoch 39/50
600/600 [==============================] – 0s 706us/sample – loss: 0.2288 – accuracy: 0.9667 – val_loss: 0.4693 – val_accuracy: 0.7867
Epoch 40/50
600/600 [==============================] – 0s 717us/sample – loss: 0.2140 – accuracy: 0.9717 – val_loss: 0.4657 – val_accuracy: 0.7867
Epoch 41/50
600/600 [==============================] – 0s 717us/sample – loss: 0.2001 – accuracy: 0.9733 – val_loss: 0.4653 – val_accuracy: 0.7933
Epoch 42/50
600/600 [==============================] – 0s 694us/sample – loss: 0.1869 – accuracy: 0.9717 – val_loss: 0.4634 – val_accuracy: 0.8000
Epoch 43/50
600/600 [==============================] – 0s 709us/sample – loss: 0.1739 – accuracy: 0.9783 – val_loss: 0.4594 – val_accuracy: 0.7867
Epoch 44/50
600/600 [==============================] – 0s 726us/sample – loss: 0.1625 – accuracy: 0.9783 – val_loss: 0.4589 – val_accuracy: 0.7933
Epoch 45/50
600/600 [==============================] – 0s 715us/sample – loss: 0.1508 – accuracy: 0.9833 – val_loss: 0.4563 – val_accuracy: 0.7800
Epoch 46/50
600/600 [==============================] – 0s 698us/sample – loss: 0.1404 – accuracy: 0.9817 – val_loss: 0.4582 – val_accuracy: 0.7800
Epoch 47/50
600/600 [==============================] – 0s 713us/sample – loss: 0.1304 – accuracy: 0.9867 – val_loss: 0.4544 – val_accuracy: 0.7933
Epoch 48/50
600/600 [==============================] – 0s 706us/sample – loss: 0.1212 – accuracy: 0.9867 – val_loss: 0.4544 – val_accuracy: 0.7800
Epoch 49/50
600/600 [==============================] – 0s 699us/sample – loss: 0.1123 – accuracy: 0.9867 – val_loss: 0.4574 – val_accuracy: 0.7800
Epoch 50/50
600/600 [==============================] – 0s 715us/sample – loss: 0.1050 – accuracy: 0.9883 – val_loss: 0.4539 – val_accuracy: 0.7800
Accuracy: 0.8
In [83]:
import matplotlib.pyplot as plt
plt.subplot(2,1,1)
plt.plot(hist.history[‘accuracy’])
plt.plot(hist.history[‘val_accuracy’])
plt.title(‘model accuracy’)
plt.ylabel(‘accuracy’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’lower right’)
plt.subplot(2,1,2)
plt.plot(hist.history[‘loss’])
plt.plot(hist.history[‘val_loss’])
plt.title(‘model loss’)
plt.ylabel(‘loss’)
plt.xlabel(‘epoch’)
plt.legend([‘train’, ‘test’], loc=’upper right’)
plt.show()

In [0]:
In [0]: