Select Git revision
numbapig.py
-
Neil Gershenfeld authoredNeil Gershenfeld authored
numbapig.py 2.69 KiB
#
# numbapig.py
# Neil Gershenfeld 2/9/20
# calculation of pi by a Numba GPU sum
# pi = 3.14159265358979323846
#
from numba import cuda
import numpy as np
import time
#
# problem size
#
block_size = 2**10
grid_size = 2**21
NPTS = grid_size*block_size
#
# kernels and functions
#
@cuda.jit
def init(arr):
i = 1+cuda.grid(1)
arr[i-1] = 0.5/((i-0.75)*(i-0.25))
#arr[i-1] = i # for testing reduction
#
@cuda.reduce
def Numba_reduce(a,b):
return a+b
#
@cuda.jit
def CUDA_sum(arr,len):
i = cuda.grid(1)
if (i < len):
arr[i] += arr[i+len]
#
def CUDA_reduce(arr,NPTS):
len = NPTS >> 1
while (1):
CUDA_sum[grid_size,block_size](arr,len)
len = len >> 1
if (len == 0):
return
#
@cuda.jit
def CUDA_result(arr,result):
i = cuda.grid(1)
if (i == 0):
result[0] = arr[0]
#
# device array
#
arr = cuda.device_array(NPTS,np.float32)
result = cuda.device_array(1,np.float32)
#arr = cuda.device_array(NPTS,np.int64) # for testing reduction
#result = cuda.device_array(1,np.int64) # for testing reduction
#
# compile kernels by calling them
#
init[grid_size,block_size](arr)
pi = Numba_reduce(arr)
CUDA_reduce(arr,NPTS)
CUDA_result(arr,result)
#
# CUDA kernel array calculation
#
start_time = time.time()
init[grid_size,block_size](arr)
end_time = time.time()
mflops = NPTS*4.0/(1.0e6*(end_time-start_time))
print("CUDA kernel array calculation:")
print(" time = %f, estimated MFlops = %f"%(end_time-start_time,mflops))
#
# Numba reduce
#
init[grid_size,block_size](arr)
start_time = time.time()
pi = Numba_reduce(arr)
end_time = time.time()
mflops = NPTS*1.0/(1.0e6*(end_time-start_time))
print("Numba reduce:")
print(" time = %f, estimated MFlops = %f"%(end_time-start_time,mflops))
#
# both with Numba reduce
#
start_time = time.time()
init[grid_size,block_size](arr)
pi = Numba_reduce(arr)
end_time = time.time()
mflops = NPTS*5.0/(1.0e6*(end_time-start_time))
print("both with Numba reduce:")
print(" NPTS = %d, pi = %f"%(NPTS,pi))
print(" time = %f, estimated MFlops = %f"%(end_time-start_time,mflops))
#
# CUDA kernel reduction
#
init[grid_size,block_size](arr)
start_time = time.time()
CUDA_reduce(arr,NPTS)
end_time = time.time()
mflops = NPTS*1.0/(1.0e6*(end_time-start_time))
print("CUDA kernel reduction:")
print(" time = %f, estimated MFlops = %f"%(end_time-start_time,mflops))
#
# both with CUDA kernel reduction
#
start_time = time.time()
init[grid_size,block_size](arr)
CUDA_reduce(arr,NPTS)
CUDA_result(arr,result)
end_time = time.time()
pi = result.copy_to_host()
mflops = NPTS*5.0/(1.0e6*(end_time-start_time))
print("both with CUDA kernel reduction:")
print(" NPTS = %d, pi = %f"%(NPTS,pi[0]))
print(" time = %f, estimated MFlops = %f"%(end_time-start_time,mflops))