CS计算机代考程序代写 Keras chain In [ ]:

In [ ]:
import cv2
import numpy as np

cap = cv2.VideoCapture(0)

while True:
ret, frame= cap.read() # Forever it returns the frame and ret which is false or true
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #if you want to convert the color
cv2.imshow(‘frame’, frame)
cv2.imshow(‘gray’, gray) # to show the gray video

if cv2.waitKey(1) & 0xFF == ord(‘q’): # If q is pressed stop
break

cap.release()
cv2.destroyAllWindows()
In [ ]:
# To train the model:
%run “C:/My Courses/Spring2020/ANLY535/Lab3/mnist_cnn”
model.save(‘my_model.h5’)
In [1]:
import cv2
import tensorflow
new_model = tensorflow.keras.models.load_model(‘C:/My Courses/Spring2020/ANLY535/Lab3/my_model.h5’)
# Handwritten recognition using camera
import cv2
import numpy as np

def get_img_contour_thresh(img):
x, y, w, h = 0, 0, 300, 300
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (35, 35), 0)
ret, thresh1 = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
thresh1 = thresh1[y:y + h, x:x + w]
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
return img, contours, thresh1

cap = cv2.VideoCapture(0)

while (cap.isOpened()):
ret, img = cap.read()
ret
img, contours, thresh = get_img_contour_thresh(img)
ans1 = ”
if len(contours) > 0:
contour = max(contours, key=cv2.contourArea)
if cv2.contourArea(contour) > 2500:
# print(predict(w_from_model,b_from_model,contour))
x, y, w, h = cv2.boundingRect(contour)
# newImage = thresh[y – 15:y + h + 15, x – 15:x + w +15]
newImage = thresh[y:y + h, x:x + w]
newImage = cv2.resize(newImage, (28, 28))
newImage = np.array(newImage)
newImage = newImage.flatten()
newImage = newImage.reshape(newImage.shape[0], 1)
newImage2 = newImage.flatten().reshape(1,28,28,1)
newImage2 = newImage2.astype(‘float32’)
newImage2 /= 255
result = new_model.predict(newImage2)
ans1= np.argmax(result)
#ans1 = Digit_Recognizer_LR.predict(w_LR, b_LR, newImage)

x, y, w, h = 0, 0, 300, 300
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(img, “Prediction : ” + str(ans1), (10, 320), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

cv2.imshow(“Frame”, img)
cv2.imshow(“Contours”, thresh)
k = cv2.waitKey(10)
if k == 27:
break

cap.release()
cv2.destroyAllWindows()
In [ ]:
# Part 1.3
import cv2

face_cascade = cv2.CascadeClassifier(‘C:/My Courses/Spring2020/ANLY535/Lab3/haarcascade_frontalface_default.xml’)
eye_cascade = cv2.CascadeClassifier(‘C:/My Courses/Spring2020/ANLY535/Lab3/haarcascade_eye.xml’)

cap = cv2.VideoCapture(0)

while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)

for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow(‘Lab 3 Face recognition’,img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break

cap.release()
cv2.destroyAllWindows()
In [ ]:
# Part 2

import numpy as np
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing import sequence
# fix random seed for reproducibility
np.random.seed(7)
In [ ]:
# Just load 5000 cases

top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
In [ ]:
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
In [ ]:
import tensorflow
from tensorflow.keras.layers import Embedding
In [ ]:
# design model
embedding_vecor_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
model.add(LSTM(100))
model.add(Dense(1, activation=’sigmoid’))
model.compile(loss=’binary_crossentropy’, optimizer=’adam’, metrics=[‘accuracy’])
print(model.summary())
hist= model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=64)
In [ ]:
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print(“Accuracy: %.2f%%” % (scores[1]*100))
In [ ]:
import matplotlib.pyplot as plt
plt.style.use(‘ggplot’)

def plot_history(history):
acc = history.history[‘acc’]
val_acc = history.history[‘val_acc’]
loss = history.history[‘loss’]
val_loss = history.history[‘val_loss’]
x = range(1, len(acc) + 1)

plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, ‘b’, label=’Training acc’)
plt.plot(x, val_acc, ‘r’, label=’Validation acc’)
plt.title(‘Training and validation accuracy’)
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, ‘b’, label=’Training loss’)
plt.plot(x, val_loss, ‘r’, label=’Validation loss’)
plt.title(‘Training and validation loss’)
plt.legend()
plt.show()

plot_history(hist)
In [ ]:
from tensorflow.keras.layers import Conv1D, MaxPooling1D
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
# create the model
embedding_vecor_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
model.add(Conv1D(filters=32, kernel_size=3, padding=’same’, activation=’relu’))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100))
model.add(Dense(1, activation=’sigmoid’))
model.compile(loss=’binary_crossentropy’, optimizer=’adam’, metrics=[‘accuracy’])
print(model.summary())
model.fit(X_train, y_train, epochs=3, batch_size=64)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print(“Accuracy: %.2f%%” % (scores[1]*100))
In [ ]:
from __future__ import print_function
import torch
x = torch.rand(5, 3)
print(x)
In [ ]: