程序代写 PEP 517) … done

Copyright By PowCoder代写 加微信 powcoder

pip install mpi4py

Collecting mpi4py
Downloading mpi4py-3.1.3.tar.gz (2.5 MB)
|████████████████████████████████| 2.5 MB 4.7 MB/s
Installing build dependencies … done
Getting requirements to build wheel … done
Preparing wheel metadata … done
Building wheels for collected packages: mpi4py
Building wheel for mpi4py (PEP 517) … done
Created wheel for mpi4py: filename=mpi4py-3.1.3-cp37-cp37m-linux_x86_64.whl size=2185302 sha256=b62fc41a07ece3eee3a80b2d71a8a2f6a31cc006a1e896b660fd23339e64d31e
Stored in directory: /root/.cache/pip/wheels/7a/07/14/6a0c63fa2c6e473c6edc40985b7d89f05c61ff25ee7f0ad9ac
Successfully built mpi4py
Installing collected packages: mpi4py
Successfully installed mpi4py-3.1.3

Send Receive¶

%%writefile sndrec.py

from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size

shared = (rank+1)*7

comm.send(shared, dest=(rank+1)%size)
data = comm.recv(source= (rank-1)%size)

print(‘Rank:’, rank)
print(‘Received:’, data, ‘which came from rank:’, (rank-1)%size)

Overwriting sndrec.py

!mpiexec –allow-run-as-root -n 2 python sndrec.py

Received: 14 which came from rank: 1
Received: 7 which came from rank: 0

Send Receive without tag¶

%%writefile sndrecwotag.py
from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size

if rank == 0:
shared = {‘dl’:55, ‘d2’:42}
comm.send(shared, dest=1)
shared2 = {‘d3’:25, ‘d4’:22}
comm.send(shared2, dest=1)
shared3 = {‘d5’:35, ‘d6’:32}
comm.send(shared3, dest=1)
shared4 = {‘d7’:65, ‘d8’:62}
comm.send(shared4, dest=1)
if rank == 1:
receive = comm.recv(source=0)
print(receive)
receive2 = comm.recv(source=0)
print(receive2)
receive3 = comm.recv(source=0)
print(receive3)
receive4 = comm.recv(source=0)
print(receive4)

Overwriting sndrecwotag.py

!mpiexec –allow-run-as-root -n 2 python sndrecwotag.py

{‘dl’: 55, ‘d2’: 42}
{‘d3’: 25, ‘d4’: 22}
{‘d5’: 35, ‘d6’: 32}
{‘d7’: 65, ‘d8’: 62}

Send Receive with tag¶

%%writefile sndrectag.py
from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size

if rank == 0:
shared = {‘dl’:55, ‘d2’:42}
comm.send(shared, dest=1, tag=1)
shared2 = {‘d3’:25, ‘d4’:22}
comm.send(shared2, dest=1, tag=2)
shared3 = {‘d5’:35, ‘d6’:32}
comm.send(shared3, dest=1, tag=3)
shared4 = {‘d7’:65, ‘d8’:62}
comm.send(shared4, dest=1, tag=4)
if rank == 1:
receive = comm.recv(source=0, tag=2)
print(receive)
receive2 = comm.recv(source=0, tag=1)
print(receive2)
receive3 = comm.recv(source=0, tag=4)
print(receive3)
receive4 = comm.recv(source=0, tag=3)
print(receive4)

Writing sndrectag.py

!mpiexec –allow-run-as-root -n 2 python sndrectag.py

{‘d3’: 25, ‘d4’: 22}
{‘dl’: 55, ‘d2’: 42}
{‘d7’: 65, ‘d8’: 62}
{‘d5’: 35, ‘d6’: 32}

Broadcast¶

%%writefile bcast.py

from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.rank
if rank == 0:
data = {‘a’:1,’b’:2, ‘c’:3}
data = None
data = comm.bcast(data, root=0)

print(‘rank’, rank, data)

Writing bcast.py

!mpiexec –allow-run-as-root -n 4 python bcast.py

rank 0 {‘a’: 1, ‘b’: 2, ‘c’: 3}
rank 1 {‘a’: 1, ‘b’: 2, ‘c’: 3}
rank 3 {‘a’: 1, ‘b’: 2, ‘c’: 3}
rank 2 {‘a’: 1, ‘b’: 2, ‘c’: 3}

%%writefile scatter.py

from mpi4py import MPI

comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()

if rank == 0:
data = [(x+1)**x for x in range(size) ]
print(‘we will be scattering:’, data)
data = None

data = comm. scatter (data, root=0)
print(‘rank’, rank, ‘has data:’, data)

Overwriting scatter.py

!mpiexec –allow-run-as-root -n 5 python scatter.py

we will be scattering: [1, 2, 9, 64, 625]
rank 0 has data: 1
rank 1 has data: 2
rank 4 has data: 625
rank 3 has data: 64
rank 2 has data: 9

%%writefile gather.py

from mpi4py import MPI

comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()

if rank == 0:
data = [(x+1)**x for x in range(size) ]
print(‘we will be scattering:’, data)
data = None

data = comm.scatter(data, root=0)
print(‘rank’, rank, ‘has data:’, data)

newData = comm.gather(data, root=0)

if rank == 0:
print(‘master collected: ‘, newData)

Writing gather.py

!mpiexec –allow-run-as-root -n 5 python gather.py

we will be scattering: [1, 2, 9, 64, 625]
rank 0 has data: 1
rank 4 has data: 625
rank 1 has data: 2
rank 2 has data: 9
rank 3 has data: 64
master collected: [1, 2, 9, 64, 625]

Calculate using Scatter and Gather¶

%%writefile avg.py

from mpi4py import MPI
import numpy as np

comm = MPI.COMM_WORLD

my_rank = comm.Get_rank()
p = comm.Get_size()

if my_rank == 0:
data = np.arange(n,dtype=’f’) #shape: (100)
data = data.reshape(p, int(n/p)) #shape: (4,25), each process gets 25

data = None

recvbuf = np.empty(int(n/p),dtype=’f’)

#Scatter the numbers to all processes
comm.Scatter(data, recvbuf, root = 0)

#Compute the average of subset
sub_avg = np.mean(recvbuf)

print(‘Process {}, sub array average is {}’.format(my_rank,sub_avg))

sub_avgs = None

if my_rank == 0:
sub_avgs = np.empty(p,dtype=’f’)

#Gather the partial averages down to the root process
comm.Gather(sub_avg, sub_avgs, root=0)

#Compute the total average of all numbers
if my_rank == 0:
avg = np.mean(sub_avgs)
print(‘Average is’,avg)

Overwriting avg.py

!mpiexec –allow-run-as-root -n 4 python avg.py

Process 0, sub array average is 12.0
Process 1, sub array average is 37.0
Process 2, sub array average is 62.0
Process 3, sub array average is 87.0
Average is 49.5

Broadcasting¶

Serial version¶

# Compute the inner sum
def integral(a_i, h, n):
integ = 0.0
for j in range(n):
a_ij = a_i + ( + 0.5) * h
integ += np.cos(a_ij) * h
return integ
b = np.pi / 2.0
h = (b – a) / (n * p)

integral_sum = 0.0
# Compute the outer sum
for i in range(p):
a_i = a + i * n * h
integral_sum += integral(a_i, h, n)

print(integral_sum)

0.002366309434786632

Parallel version¶

%%writefile integralSum.py

from mpi4py import MPI
import numpy as np

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

def integral(a_r, h, n):
integ = 0.0
for j in range(n):
t = a_r + (j + 0.5) * h
integ += np.sin(t) * h
return integ

b = np.pi /2
my_int = np.zeros(1)
integral_sum = np.zeros(1)

# Initialize value of n only if this is rank 0
if rank == 0:
n = np.full(1, 500, dtype=int) # default value
n = np.zeros(1, dtype=int)
# Broadcast n to all processes
print(“Process “, rank, ” before n =”, n[0])
comm.Bcast(n, root=0)
print(“Process”, rank, ” after n =”, n[0])

# Compute partition
h = (b – a) / (n * size) # calculate h *after* we receive n
a_r = a + rank * h *n
my_int[0] = integral(a_r, h, n[0])

# Send partition back to root process, computing sum across all partitions
print(“Process “, rank, ” has the partial integral “, my_int[0])
comm.Reduce(my_int, integral_sum, MPI.SUM, dest)

Overwriting integralSum.py

!mpiexec –allow-run-as-root -n 4 python integralSum.py

Process 3 before n = 0
Process 0 before n = 500
Process 0 after n = 500
Process 2 before n = 0
Process 2 after n = 500
Process 0 has the partial integral 0.07612046944516872
Process 2 has the partial integral 0.3244233571598174
Process 1 before n = 0
Process 1 after n = 500
Process 3 after n = 500
Process 3 has the partial integral 0.38268344220085593
Process 1 has the partial integral 0.216772756896253

程序代写 CS代考 加微信: powcoder QQ: 1823890830 Email: powcoder@163.com