程序代写代做代考 machine_learning_hw1 -checkpoint

machine_learning_hw1 -checkpoint

In [1]:

# import statements
datapath = ‘datasets/’
from autograd import numpy as np

A gradient descent and cost function history plotting function¶

In [2]:

# import automatic differentiator to compute gradient module
from autograd import grad

# gradient descent function
def gradient_descent(g,alpha,max_its,w):
# compute gradient module using autograd
gradient = grad(g)

# run the gradient descent loop
weight_history = [w] # weight history container
cost_history = [g(w)] # cost function history container
for k in range(max_its):
# evaluate the gradient
grad_eval = gradient(w)

# take gradient descent step
w = w – alpha*grad_eval

# record weight and cost
weight_history.append(w)
cost_history.append(g(w))
return weight_history,cost_history

In [3]:

# the import statement for matplotlib
import matplotlib.pyplot as plt

# cost function history plotter
def plot_cost_histories(cost_histories,labels):
# create figure
plt.figure()

# loop over cost histories and plot each one
for j in range(len(cost_histories)):
history = cost_histories[j]
label = labels[j]
plt.plot(history,label = label)
plt.legend(loc=’center left’, bbox_to_anchor=(1, 0.5))
plt.show()

Chapter 5 Exercises¶

Complete exercises 2, 6, and 7 from Chapter 5 – make sure to download the most recent version of the text. Below we load in each dataset.

Exercise 2¶

In [4]:

# load in dataset
csvname = datapath + ‘kleibers_law_data.csv’
data = np.loadtxt(csvname,delimiter=’,’)

# get input and output of dataset
x = data[:-1,:]
y = data[-1:,:]

#cost function
def least_absolute_deviation(w):
cost = np.sum(np.abs(model(x,w)-y))
return cost/float(y.size())

def model(x,w):
a = w[0]+np.dot(x.T,w[1:])
return a.T

def least_squares(w):
cost = np.sum((model(x,w)-y)**2)
return cost/float(y.size)

def least_squares_grad(w):
delta_w1 = np.sum(x*(model(x,w)-y))
delta_w1 = 2*delta_w1/float(y.size)
delta_w0 = np.sum((model(x, w) – y))
delta_w0 = 2 * delta_w0 / float(y.size)

deltaw = np.ones(np.shape(w))
deltaw = np.hstack((delta_w0, delta_w1))
return deltaw

def grad(f):
if f.__name__ == “least_squares”:
return least_squares_grad
elif f.__name__ ==”least_absolute_deviation” :
return least_absolute_deviation_grad
elif f.__name__ == “softmax”:
return softmax_grad
elif f.__name__ == “logsoftmax”:
return logsoftmax_grad
elif f.__name__ == “Perceptron”:
return Perceptron_grad

x= np.log(x)
y= np.log(y)

n = np.size(y)
w = [0.0,1.0]

alpha = 0.1
max_iter = 100
weight_history, cost_history = gradient_descent(least_squares,alpha,max_iter,w)

w=weight_history[-1]
print(“y = %fx+%f” %(w[1],w[0]))

print(“an animal weighing 10 kg requires %f calories” %((10*w[1]+w[0])/4.18))

y = 13.475128x+4.335793
an animal weighing 10 kg requires 33.274420 calories

Exercise 6¶

In [5]:

#cost function
def least_absolute_deviation(w):
cost = np.sum(np.abs(model(x,w)-y))
return cost/float(y.size)

def least_absolute_deviation_grad(w):
res = 0
res0 = 0
yp = model(x, w) – y
for i in range(yp.size):
if yp[0][i] >= 0:
res += x[0][i]
res0 += 1
else:
res -= x[0][i]
res0 -= 1

delta_w1 = 2 * res / float(y.size)
delta_w0 = 2 * res0 / float(y.size)
deltaw = np.ones(np.shape(w))
deltaw = np.hstack((delta_w0, delta_w1))
return deltaw

# load in dataset
csvname = datapath + ‘regression_outliers.csv’
data = np.loadtxt(csvname,delimiter = ‘,’)

# get input and output of dataset
x = data[:-1,:]
y = data[-1:,:]

w = [0,1]
alpha = 0.1
max_iter = 100
weight_history, cost_history = gradient_descent(least_absolute_deviation,alpha,max_iter,w)

w = weight_history[-1]

x1 = np.linspace(-10, 8, 10)
y1 = w[1]*x1+w[0]

plt.figure()
plt.scatter(x,y,color=”red”,alpha=0.6)
plt.plot(x1, y1, color=”green”)
plt.show()

Exercise 7¶

In [29]:

from mpl_toolkits.mplot3d import Axes3D
# load in dataset
csvname = datapath + ‘linear_2output_regression.csv’
data = np.loadtxt(csvname,delimiter=’,’)

# get input and output of dataset
x = data[:2,:]
y = data[2:,:]

w = np.ones([3,2])
alpha = 0.1
max_iter = 100
weight_history, cost_history = gradient_descent(least_squares,alpha,max_iter,w)

w = weight_history[-1]

x1 = np.linspace(-10, 8, 10)
x2 = np.linspace(-10, 8, 10)
X, Y = np.meshgrid(x1, x2)

w1 = w[:,0]
yy1 = w1[0] + X*w1[1]+Y*w1[2]

w2 = w[:,1]
yy2 = w2[0] + X*w2[1]+Y*w2[2]

fig = plt.figure()
#ax = Axes3D(fig)
ax = fig.add_subplot(121, projection=’3d’)
#ax.view_init(elev=50,azim=-10)
ax.scatter(x[0],x[1],y[0],color=”red”,alpha=1.0)
ax.plot_surface(X,Y, yy1, color=”green”,alpha=0.5)

#ax = Axes3D(fig)
ax = fig.add_subplot(122, projection=’3d’)
#ax.view_init(elev=20,azim=10)
ax.scatter(x[0],x[1],y[1],color=”red”,alpha=1.0)
ax.plot_surface(X,Y, yy2, color=”green”,alpha=0.5)

plt.show()

Chapter 6¶

Complete exercises 7, 8, 9, and 14 from Chapter 6 – make sure to download the most recent version of the text. Below we load in each dataset.

Exercise 7¶

In [21]:

# load in dataset
csvname = datapath + ‘2d_classification_data_v1.csv’
data = np.loadtxt(csvname,delimiter = ‘,’)

# get input and output of dataset
x = data[:-1,:]
y = data[-1:,:]

#the convex softmax cost function
def softmax(w):
#cost = np.sum(1+np.exp(-y*model(x,w)))
#return cost/float(np.size(y))
cost = 0
for i in range(y.size):
xi = x[0][i]
yp = model(xi,w)
if y[0][i]>0:
cost += 1/(1+np.exp(-yp))
else:
cost += 1 – 1/(1+np.exp(-yp))

return cost/float(np.size(y))

def softmax_grad(w):
dw1 = np.sum(np.exp(-model(x,w))/((1+np.exp(-model(x,w)))**2)*y*x)
dw1 = -dw1 / float(np.size(y))
dw0 = np.sum(np.exp(-y*model(x,w))/((1+np.exp(-y*model(x,w)))**2)*y)
dw0 = -dw0 / float(np.size(y))
deltaw = np.ones(np.shape(w))
deltaw = np.hstack((dw0, dw1))
return deltaw

# load in dataset
csvname = datapath + ‘2d_classification_data_v1.csv’
data = np.loadtxt(csvname,delimiter = ‘,’)

# get input and output of dataset
x = data[:-1,:]
y = data[-1:,:]

w = [0,1]
alpha = 0.1
max_iter = 300
weight_history, cost_history = gradient_descent(softmax,alpha,max_iter,w)

w = weight_history[-1]
misclass = 0
for i in range(y.size):
xi = x[0][i]
yp = model(xi,w)
if yp >= 0:
yp = 1
else:
yp = -1
if yp != y[0][i]:
misclass =misclass + 1

x1 = np.linspace(-5, 5, 40)
y1 = 1/(1+np.exp(-model(np.reshape(x1,[1,x1.size]),w)))
y0= (1-1/(1+np.exp(-model(np.reshape(x1,[1,x1.size]),w))))

for i in range(y1.size):
if y1[i]= 0:
ypi = 1
else:
ypi = -1
if ypi != y[0][i]:
misclass = misclass + 1

print(“misclassifacations number:%d” %misclass)

misclassifacations number:5

Exercise 9¶

In [115]:

def Perceptron(w):
cost = 0
yp = np.array((-y*model(x,w)))
for i in range(y.size):
if yp[0][i] > 0:
cost += yp[0][i]
return cost / float(np.size(y))

def Perceptron_grad(w):
dw1 = np.zeros(np.shape(w[1:]))
dw0 = 0
yp = np.array((-y*model(x, w)))

for i in range(yp.size):
if yp[0][i] >= 0:
tmp = -y[0][i]*x[:,i]
dw1 += np.reshape(tmp,[tmp.size,1])
dw0 += -y[0][i]
dw1 = dw1 / float(y.size)
dw0 = dw0 / float(y.size)
deltaw = np.ones(np.shape(w))
deltaw = np.vstack((dw0, dw1))
return deltaw

# load in dataset
csvname = datapath + ‘3d_classification_data_v0.csv’
data = np.loadtxt(csvname,delimiter = ‘,’)

# get input and output of dataset
x = data[:-1,:]
y = data[-1:,:]

w = np.float64(np.ones([3,1]))
w[0] = 0
w1 = w

alpha = 0.1
max_iter = 50
weight_history, cost_history = gradient_descent(Perceptron,alpha,max_iter,w)

alpha = 0.01
weight_history1, cost_history1 = gradient_descent(Perceptron,alpha,max_iter,w1)

w = weight_history[-1]
w1 = weight_history1[-1]

misclass = 0
yp = np.array(model(x,w).T)
misclass1 = 0
yp1 = np.array(model(x,w1).T)
for i in range(y.size):
ypi = 1
if yp[i] >= 0:
ypi = 1
else:
ypi = -1
if ypi != y[0][i]:
misclass =misclass + 1

ypi1 = 1
if yp1[i] >= 0:
ypi1 = 1
else:
ypi1 = -1
if ypi1 != y[0][i]:
misclass1 =misclass1 + 1

if misclass > misclass1:
print(“steplength = 0.01 achieves perfect classification first”)
elif misclass < misclass1: print("steplength = 0.1 achieves perfect classification first") else: print("Both achieves perfect classification at the same time") steplength = 0.1 achieves perfect classification first Exercise 14¶ In [99]: # load in dataset csvname = datapath + 'breast_cancer_data.csv' data1 = np.loadtxt(csvname,delimiter = ',') # get input and output of dataset x = data1[:-1,:] y = data1[-1:,:] w = np.float64(np.ones([9,1])) w[0] = 0 w1 = np.float64(np.ones([9,1])) w1[0] = 0 alpha = 0.1 max_iter = 300 weight__histtory_soft, cost_history_soft = gradient_descent(logsoftmax,alpha,max_iter,w) weight__histtory_perceptron, cost_history_perceptron = gradient_descent(logsoftmax,alpha,max_iter,w1) w_soft = weight__histtory_soft[-1] w_perceptron = weight__histtory_perceptron[-1] misclass_soft = 0 misclass_perceptron = 0 for i in range(y.size): xi = x[:,i] yp_soft = model(xi,w_soft) yp_perceptron= model(xi,w_perceptron) if yp_soft >= 0:
yp_soft = 1
else:
yp_soft = -1
if yp_soft != y[0][i]:
misclass_soft =misclass_soft + 1

if yp_perceptron >= 0:
yp_perceptron = 1
else:
yp_perceptron = -1
if yp_perceptron != y[0][i]:
misclass_perceptron =misclass_perceptron + 1

if misclass_soft>misclass_perceptron:
print(“softmax achieve better efficacy”)
elif misclass_soft