程序代写代做代考 python MyPanorama

MyPanorama

In [1]:

import numpy as np
import matplotlib
import matplotlib.image as image
import matplotlib.pyplot as plt
from skimage.transform import AffineTransform, ProjectiveTransform, warp
%matplotlib notebook
# NOTE: all “magic” options for backend plotting are: inline, notebook, and “external” (default)
# see http://ipython.readthedocs.io/en/stable/interactive/plotting.html for details

LineBuilder class for entering correspoinding points in each image¶
Note: left mouse clicks add polyline intervals, the right mouse click terminates the polyline, ‘DEL’ key removes the last click point¶

In [2]:

class LineBuilder:

def __init__(self, axes, line):
self.line = line
self.ax = axes
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.dashed_line = plt.Line2D((0,0),(0,0), color = line.get_c(), ls=’:’, lw=line.get_linewidth())
self.terminated = False

self.connect()

def connect(self):
# ‘connect to all the events we need’
self.cidpress = self.ax.figure.canvas.mpl_connect(‘button_press_event’, self.on_click)
self.cidmotion = self.ax.figure.canvas.mpl_connect(‘motion_notify_event’, self.on_motion)
self.cidkey = self.ax.figure.canvas.mpl_connect(‘key_press_event’, self.on_key_down)

def on_click(self, event):
if ((self.terminated==True) or (event.inaxes!=self.line.axes)): return
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.line.set_data(self.xs, self.ys)
#self.ax.add_patch(plt.Circle((event.xdata,event.ydata), radius=5, fc=’y’))
self.line.figure.canvas.draw()
if (event.button==3): self.terminated = True #right mouse click terminates the polyline

def on_motion(self, event):
if ((self.terminated==True) or (event.inaxes!=self.line.axes) or (len(self.xs)==0)): return
self.dashed_line.set_xdata([self.xs[-1],event.xdata])
self.dashed_line.set_ydata([self.ys[-1],event.ydata])
self.ax.add_line(self.dashed_line)

def on_key_down(self, e): # do the following when certain “key” is pressed
if (e.inaxes!=self.ax.axes): return
if e.key == ‘delete’ : # pressing ‘DELETE’ key deletes the last click
if self.xs : self.xs.pop()
if self.ys : self.ys.pop()
self.line.set_data(self.xs, self.ys)
self.dashed_line.set_xdata([])
self.dashed_line.set_ydata([])
self.line.figure.canvas.draw()
self.terminated = False

def disconnect(self):
# ‘disconnect all the stored connection ids’
self.ax.figure.canvas.mpl_disconnect(self.cidmotion)
self.ax.figure.canvas.mpl_disconnect(self.cidpress)
self.ax.figure.canvas.mpl_disconnect(self.cidkey)

CrossDissolver class¶
Note: click on the left side to move back to image A, clicks on the right side to move forward to image B¶

In [3]:

# ASSUMPTION: images A and B should have the same size and the same data type (‘uint8’ {0,1,2,…,255} or ‘float’ [0,1])
class CrossDissolver:

def __init__(self, axes, imA, imB, dtype = ‘uint8’):
self.ax = axes
scaler = 1.0/255.0
if (dtype == ‘float’) : scaler = 1.0
self.A = scaler * np.array(imA) # generating array A with (RGB) float values in the range [0,1]
self.B = scaler * np.array(imB) # generating array B with (RGB) float values in the range [0,1]
self.t = 0.5
self.width = imA.shape[1]
self.text_pos = (self.width+20, 20)
self.cidpress = self.ax.figure.canvas.mpl_connect(‘button_press_event’, self.on_click)
self.showMix()

def on_click(self, event):
if (event.inaxes!=self.ax.axes): return
if (event.xdata>(self.width/2)): self.t = min(1.0,self.t+0.1)
if (event.xdata<(self.width/2)): self.t = max(0.0,self.t-0.1) self.showMix() def showMix(self): self.ax.imshow((1.0-self.t)*self.A + self.t*self.B) self.ax.text(self.text_pos[0], self.text_pos[1], 't = {:4.2f}'.format(self.t), color='y', backgroundcolor='gray') Homographies¶ In [4]: imA = image.imread("images/hall.png") imB = np.zeros((imA.shape[0],imA.shape[1]), dtype=np.int8) fig = plt.figure(1,figsize = (12, 4)) axA = plt.subplot(121) polylineA = LineBuilder(axA, plt.plot([], [], 'ro-')[0] ) # instantiates LineBuilder object axA.imshow(imA) plt.title("image A") axB = plt.subplot(122) polylineB = LineBuilder(axB, plt.plot([], [], 'bo-')[0] ) # instantiates LineBuilder object axB.imshow(imB, cmap = "gray") plt.title("frame B") # (Left) click on two images below to consecutively enter corresponding points (right clicks enter the last points) # Press "DEL" key to delete the last click point. # You should enter at least 4 points (to define a proper homography) Out[4]:

Warping image A onto frame B using a homographic transofmation (projective map)¶
NOTE: function “warp” uses “inverse map” as a (second) argument as it uses “inverse warping” to compute output intensities¶

In [5]:

ptsA = polylineA.line.get_xydata() # extracts a sequnce of 2D points (x,y) from the polyline
ptsB = polylineB.line.get_xydata()

ProjAtoB = ProjectiveTransform()
ProjAtoB.estimate(ptsA, ptsB) # computes projective map from points ptsA to points ptsB
imB = warp(imA, ProjAtoB.inverse)

axB.imshow(imB)
axB.set_title(“Projecting A onto frame B”)

Out[5]:

Panoramas (projective and other transformations)¶

In [6]:

imL = image.imread(“images/CMU_left.jpg”)
imR = image.imread(“images/CMU_right.jpg”)

fig = plt.figure(2,figsize = (12, 4))
axL = plt.subplot(121)
polylineL = LineBuilder(axL, plt.plot([], [], ‘ro-‘)[0] ) # instantiates LineBuilder object
axL.imshow(imL)
axL.set_title(“Left image”)

axR = plt.subplot(122)
polylineR = LineBuilder(axR, plt.plot([], [], ‘bo-‘)[0] ) # instantiates LineBuilder object
axR.imshow(imR)
axR.set_title(“Right image”)

plt.show()

# (Left) click on two images below to consecutively enter corresponding points (right clicks enter the last points)
# Press “DELETE” key to delete the last click, if necessary.
# You should enter at least 4 points (to define a proper homography)

Estimate affine and projective transformations between left and right images¶

In [7]:

ptsL = polylineL.line.get_xydata() # extracts a sequnce of 2D points (x,y) from the polyline
ptsR = polylineR.line.get_xydata()

AffLtoR = AffineTransform()
AffLtoR.estimate(ptsL, ptsR) # computes affine transformation from points ptsL to points ptsR

PrjLtoR = ProjectiveTransform()
PrjLtoR.estimate(ptsL, ptsR) # computes projective transformation from points ptsL to points ptsR

Out[7]:

True

Reprojecting (warping) right image to the left image (reference frame)¶
NOTE: function “warp” uses “inverse map” as a (second) argument as it uses “inverse warping” to compute output intensities¶

In [8]:

# estimating size of the reference frame (Ref) – the same image plane as “imL”
shapeRef = (imL.shape[0]+30, imL.shape[1]+imR.shape[1])

# copying Left image into reference frame (Ref)
LtoRef = warp(imL, np.identity(3), output_shape = shapeRef) # uses 3×3 identity matrix as transformation matrix

# WRITE YOUR CODE BELOW

# warp the Right image (imR) into a reference frame of size “shapeRef” using two transformation estimated earlier
# That is, obtain two new images:
# (A) RaffRef – using AffLtoR (affine map estimated from the corresponding points)
# (B) RprjRef – using PrjLtoR (homography estimated from the corresponding points)

RaffRef = warp(imR, AffLtoR, output_shape = shapeRef)

RprjRef = warp(imR, PrjLtoR, output_shape = shapeRef)

# compare registration of features in image LtoRef and the two images generated in (A) and (B) (RaffRef and RprjRef)
# HINT: you can use CrossDissolve to visualize pairs of images (LtoRef-RaffRef) and (LtoRef-RprjRef) together
# WHICH TRANSFORMATION (Affine or Homographic) WORKS BETTER FOR ALIGNING (REGISTERING) imR to imL (LtoRef) ?
# SHOW YOUR RESULTS IN A FIGURE BELOW AND EXPLAIN IN A MARKDOWN CELL BELOW THE FIGURE

fig = plt.figure(3,figsize = (12, 4))
ax12 = plt.subplot(121)
css12 = CrossDissolver(ax12,LtoRef,RaffRef, ‘float’)
plt.title(“cross-dissolving LtoRef and RaffRef”)

ax34 = plt.subplot(122)
css34 = CrossDissolver(ax34,LtoRef, RprjRef ,’float’)
plt.title(“cross-dissolving LtoRef and RprjRef”)

Out[8]:

Homographic works better. In the result of affine, the right side of the image is black and the railing is not smooth.

Blending¶

In [9]:

# This function compute a distance map from the boundary of an image.
# It returns a numpy array of the same size as image with distances from each pixel to the border (float values)
def boundaryDT(image):
# WRITE YOUR CODE THAT RETURNS A DISTANCE MAP (numpy array) THE SAME SHAPE (SIZE) AS “image”
# WHERE EACH PIXEL (ARRAY ELEMENT) CONTAINS DISTANCE TO THE SLOSEST SIDE OF THE IMAGE
# NOTE: the pixels right on the edge of the image should have distance 0.5 or 1.0
# while each step away from the border should increase the distance by 1.0
res = np.full(image.shape, fill_value = 0.0, dtype=np.float)

rows = image.shape[0]

cols = image.shape[1]

for i in range(rows):
for j in range(cols):
res[i, j] = np.min([i+1, rows – i, j+1, cols-j])

return res

In [10]:

# computing boundary distance map/transform (bdt) for two images
Ldt = boundaryDT(imL)
Rdt = boundaryDT(imR)

LdtRef = warp(Ldt, np.identity(3), output_shape = shapeRef)

#WRITE YOUR CODE BLEOW
# project/warp Rdt image (distance masp) into the reference frame to obtain RdtRef
# 1 line below is wrong and should be replaced!!
RdtRef = warp(Rdt, PrjLtoR, output_shape = shapeRef)

# when debigging, you can visulaize your LdtRef and RdtRef below to check if you are getting expected results

fig = plt.figure(4)
ax12 = plt.subplot()
css12 = CrossDissolver(ax12,LdtRef,RdtRef, ‘float’)
plt.title(“cross-dissolving LtoRef and RaffRef”)

# ax34 = plt.subplot(122)
# css34 = CrossDissolver(ax34,LtoRef, RprjRef ,’float’)
# plt.title(“cross-dissolving LtoRef and RprjRef”)

Out[10]:

In [11]:

# WRITE YORU CODE BELOW:
# you should generate a mosaic by blending two images LtoRef and RprjRef
# USE TWO DIFFERENT BLENDING alphas (AS IN TOPIC 9) AND COMPARE YOUR RESULTS
# (SHOW THE RESULTS AND DISCUSS THE DIFFERENCES IN A MARKDOWN CELL BELOW)

# METHOD 1: averaging intensities of two images in the overlap area

# METHOD 2: DT-weighted averaging in the overlap area (using LdtRef and RdtRef generated above)

# numpy HINT 1: if you have an array “alpha” of shape (w,h) with scalers for each pixel
# of an RGB image A, which is an array of shape (w,h,3), then
# you can use np.dstack((alpha,alpha,alpha)) to generate an array of shape (w,h,3)
# assuming that you use the same slaler for R, G, and B values in each pixel
# numpy HINT 2: you can use operator A*B for pointwise multiplication of two matrices of the same shape
# It returns array of the same shape where each element is the product of the corresponding elements in A and B
# numpy HINT 3: you can add the same scalar a to all elements in matrix A using a+A operation

# METHOD 1: averaging intensities of two images in the overlap area
imgA = LtoRef + RprjRef
overlapArea = np.mean(LdtRef, 2) * np.mean(RdtRef, 2) > 0
imgA[overlapArea] = (LtoRef[overlapArea] + RprjRef[overlapArea])/2
plt.figure()
plt.imshow(imgA)

# METHOD 2: DT-weighted averaging in the overlap area (using LdtRef and RdtRef generated above)
alphaA = LdtRef / (LdtRef + RdtRef + 0.00001) # add 0.00001 to avoid dividing 0
imgB = LtoRef * alphaA + RprjRef * (1 – alphaA)
plt.figure()
plt.imshow(imgB)

plt.show()

Results And The Differences¶
From the two graph above, we can see that the method with DT-weighted averaging is better than averaging intensities of two images in the overlap area. The result of method 1 has obvious verticle fissure, the result of DT-weighted averaging is much more smoother.

In [ ]: