From 2c184c51de3b33705daea397658f6bf55be37c96 Mon Sep 17 00:00:00 2001 From: Alexandru Fikl Date: Fri, 27 Dec 2024 19:30:00 +0200 Subject: [PATCH] examples: reformat --- examples/demo.py | 8 ++-- examples/demo_plain.py | 16 +++----- examples/nvcc-test.py | 83 +++++++++++++++++++----------------------- 3 files changed, 46 insertions(+), 61 deletions(-) diff --git a/examples/demo.py b/examples/demo.py index 0923b4a..4f4940c 100644 --- a/examples/demo.py +++ b/examples/demo.py @@ -1,19 +1,17 @@ import cgen as c from codepy.bpl import BoostPythonModule +from codepy.toolchain import guess_toolchain mod = BoostPythonModule() - mod.add_function( c.FunctionBody( c.FunctionDeclaration(c.Const(c.Pointer(c.Value("char", "greet"))), []), c.Block([c.Statement('return "hello world"')]) )) -from codepy.toolchain import guess_toolchain - - -cmod = mod.compile(guess_toolchain()) +toolchain = guess_toolchain() +cmod = mod.compile(toolchain) print(cmod.greet()) diff --git a/examples/demo_plain.py b/examples/demo_plain.py index 80f0ce1..950bd85 100644 --- a/examples/demo_plain.py +++ b/examples/demo_plain.py @@ -1,3 +1,8 @@ +from codepy.jit import extension_from_string +from codepy.libraries import add_boost_python +from codepy.toolchain import guess_toolchain + + MODULE_CODE = """ #include @@ -15,19 +20,8 @@ } """ -from codepy.toolchain import guess_toolchain - - toolchain = guess_toolchain() - -from codepy.libraries import add_boost_python - - add_boost_python(toolchain) -from codepy.jit import extension_from_string - - cmod = extension_from_string(toolchain, "module", MODULE_CODE) - print(cmod.greet()) diff --git a/examples/nvcc-test.py b/examples/nvcc-test.py index ba1e891..8f191ae 100644 --- a/examples/nvcc-test.py +++ b/examples/nvcc-test.py @@ -1,24 +1,26 @@ +import math +import sys + +import numpy as np +import pycuda.autoinit +import pycuda.driver +import pycuda.gpuarray + import cgen as c from cgen.cuda import CudaGlobal from codepy.bpl import BoostPythonModule from codepy.cuda import CudaModule +from codepy.toolchain import guess_nvcc_toolchain, guess_toolchain # This file tests the ability to use compile and link CUDA code into the # Python interpreter. Running this test requires PyCUDA # as well as CUDA 3.0beta (or greater) - # The host module should include a function which is callable from Python host_mod = BoostPythonModule() -import math - -# Are we on a 32 or 64 bit platform? -import sys - - bitness = math.log(sys.maxsize) + 1 ptr_sz_uint_conv = "K" if bitness > 32 else "I" @@ -49,15 +51,14 @@ "PyObject* remoteResult = PyObject_Call(GPUArrayClass, args, kwargs)", "return remoteResult"] - host_mod.add_function( c.FunctionBody( - c.FunctionDeclaration(c.Pointer(c.Value("PyObject", "adjacentDifference")), - [c.Pointer(c.Value("PyObject", "gpuArray"))]), + c.FunctionDeclaration( + c.Pointer(c.Value("PyObject", "adjacentDifference")), + [c.Pointer(c.Value("PyObject", "gpuArray"))]), c.Block([c.Statement(x) for x in statements]))) host_mod.add_to_preamble([c.Include("boost/python/extract.hpp")]) - cuda_mod = CudaModule(host_mod) cuda_mod.add_to_preamble([c.Include("cuda.h")]) @@ -72,55 +73,46 @@ diff = [ c.Template("typename T", - CudaGlobal(c.FunctionDeclaration(c.Value("void", "diffKernel"), - [c.Value("T*", "inputPtr"), - c.Value("int", "length"), - c.Value("T*", "outputPtr")]))), - c.Block([c.Statement(global_index), - c.If("index == 0", - c.Statement("outputPtr[0] = inputPtr[0]"), - c.If("index < length", - c.Statement(compute_diff), - c.Statement("")))]), + CudaGlobal(c.FunctionDeclaration(c.Value("void", "diffKernel"), + [c.Value("T*", "inputPtr"), + c.Value("int", "length"), + c.Value("T*", "outputPtr")]))), + c.Block([ + c.Statement(global_index), + c.If("index == 0", + c.Statement("outputPtr[0] = inputPtr[0]"), + c.If("index < length", + c.Statement(compute_diff), + c.Statement("")))]), c.Template("typename T", - c.FunctionDeclaration(c.Value("CUdeviceptr", "difference"), - [c.Value("CUdeviceptr", "inputPtr"), - c.Value("int", "length")])), + c.FunctionDeclaration(c.Value("CUdeviceptr", "difference"), + [c.Value("CUdeviceptr", "inputPtr"), + c.Value("int", "length")])), c.Block([c.Statement(x) for x in launch])] - cuda_mod.add_to_module(diff) + +# CudaModule.add_function also adds a declaration of this function to the +# BoostPythonModule which is responsible for the host function. + diff_instance = c.FunctionBody( c.FunctionDeclaration(c.Value("CUdeviceptr", "diffInstance"), - [c.Value("CUdeviceptr", "inputPtr"), - c.Value("int", "length")]), + [c.Value("CUdeviceptr", "inputPtr"), + c.Value("int", "length")]), c.Block([c.Statement("return difference(inputPtr, length)")])) - -# CudaModule.add_function also adds a declaration of this -# function to the BoostPythonModule which -# is responsible for the host function. cuda_mod.add_function(diff_instance) -import codepy.jit -import codepy.toolchain - - -gcc_toolchain = codepy.toolchain.guess_toolchain() -nvcc_toolchain = codepy.toolchain.guess_nvcc_toolchain() - +gcc_toolchain = guess_toolchain() +nvcc_toolchain = guess_nvcc_toolchain() module = cuda_mod.compile(gcc_toolchain, nvcc_toolchain, debug=True) -import numpy as np -import pycuda.autoinit -import pycuda.driver -import pycuda.gpuarray - -length = 25 -constant_value = 2 # This is a strange way to create a GPUArray, but is meant to illustrate # how to construct a GPUArray if the GPU buffer it owns has been # created by something else +length = 25 +constant_value = 2 + pointer = pycuda.driver.mem_alloc(length * 4) pycuda.driver.memset_d32(pointer, constant_value, length) a = pycuda.gpuarray.GPUArray((length,), np.int32, gpudata=pointer) @@ -129,6 +121,7 @@ golden = [constant_value] + [0] * (length - 1) difference = [(x-y)*(x-y) for x, y in zip(b, golden, strict=True)] error = sum(difference) + if error == 0: print("Test passed!") else: