Skip to content

Commit

Permalink
examples: reformat
Browse files Browse the repository at this point in the history
  • Loading branch information
alexfikl authored and inducer committed Dec 28, 2024
1 parent d583819 commit 2c184c5
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 61 deletions.
8 changes: 3 additions & 5 deletions examples/demo.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,17 @@
import cgen as c

from codepy.bpl import BoostPythonModule
from codepy.toolchain import guess_toolchain


mod = BoostPythonModule()

mod.add_function(
c.FunctionBody(
c.FunctionDeclaration(c.Const(c.Pointer(c.Value("char", "greet"))), []),
c.Block([c.Statement('return "hello world"')])
))

from codepy.toolchain import guess_toolchain


cmod = mod.compile(guess_toolchain())
toolchain = guess_toolchain()
cmod = mod.compile(toolchain)

print(cmod.greet())
16 changes: 5 additions & 11 deletions examples/demo_plain.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
from codepy.jit import extension_from_string
from codepy.libraries import add_boost_python
from codepy.toolchain import guess_toolchain


MODULE_CODE = """
#include <boost/python.hpp>
Expand All @@ -15,19 +20,8 @@
}
"""

from codepy.toolchain import guess_toolchain


toolchain = guess_toolchain()

from codepy.libraries import add_boost_python


add_boost_python(toolchain)

from codepy.jit import extension_from_string


cmod = extension_from_string(toolchain, "module", MODULE_CODE)

print(cmod.greet())
83 changes: 38 additions & 45 deletions examples/nvcc-test.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,26 @@
import math
import sys

import numpy as np
import pycuda.autoinit
import pycuda.driver
import pycuda.gpuarray

import cgen as c
from cgen.cuda import CudaGlobal

from codepy.bpl import BoostPythonModule
from codepy.cuda import CudaModule
from codepy.toolchain import guess_nvcc_toolchain, guess_toolchain


# This file tests the ability to use compile and link CUDA code into the
# Python interpreter. Running this test requires PyCUDA
# as well as CUDA 3.0beta (or greater)


# The host module should include a function which is callable from Python
host_mod = BoostPythonModule()

import math

# Are we on a 32 or 64 bit platform?
import sys


bitness = math.log(sys.maxsize) + 1
ptr_sz_uint_conv = "K" if bitness > 32 else "I"

Expand Down Expand Up @@ -49,15 +51,14 @@
"PyObject* remoteResult = PyObject_Call(GPUArrayClass, args, kwargs)",
"return remoteResult"]


host_mod.add_function(
c.FunctionBody(
c.FunctionDeclaration(c.Pointer(c.Value("PyObject", "adjacentDifference")),
[c.Pointer(c.Value("PyObject", "gpuArray"))]),
c.FunctionDeclaration(
c.Pointer(c.Value("PyObject", "adjacentDifference")),
[c.Pointer(c.Value("PyObject", "gpuArray"))]),
c.Block([c.Statement(x) for x in statements])))
host_mod.add_to_preamble([c.Include("boost/python/extract.hpp")])


cuda_mod = CudaModule(host_mod)
cuda_mod.add_to_preamble([c.Include("cuda.h")])

Expand All @@ -72,55 +73,46 @@

diff = [
c.Template("typename T",
CudaGlobal(c.FunctionDeclaration(c.Value("void", "diffKernel"),
[c.Value("T*", "inputPtr"),
c.Value("int", "length"),
c.Value("T*", "outputPtr")]))),
c.Block([c.Statement(global_index),
c.If("index == 0",
c.Statement("outputPtr[0] = inputPtr[0]"),
c.If("index < length",
c.Statement(compute_diff),
c.Statement("")))]),
CudaGlobal(c.FunctionDeclaration(c.Value("void", "diffKernel"),
[c.Value("T*", "inputPtr"),
c.Value("int", "length"),
c.Value("T*", "outputPtr")]))),
c.Block([
c.Statement(global_index),
c.If("index == 0",
c.Statement("outputPtr[0] = inputPtr[0]"),
c.If("index < length",
c.Statement(compute_diff),
c.Statement("")))]),

c.Template("typename T",
c.FunctionDeclaration(c.Value("CUdeviceptr", "difference"),
[c.Value("CUdeviceptr", "inputPtr"),
c.Value("int", "length")])),
c.FunctionDeclaration(c.Value("CUdeviceptr", "difference"),
[c.Value("CUdeviceptr", "inputPtr"),
c.Value("int", "length")])),
c.Block([c.Statement(x) for x in launch])]

cuda_mod.add_to_module(diff)

# CudaModule.add_function also adds a declaration of this function to the
# BoostPythonModule which is responsible for the host function.

diff_instance = c.FunctionBody(
c.FunctionDeclaration(c.Value("CUdeviceptr", "diffInstance"),
[c.Value("CUdeviceptr", "inputPtr"),
c.Value("int", "length")]),
[c.Value("CUdeviceptr", "inputPtr"),
c.Value("int", "length")]),
c.Block([c.Statement("return difference<int>(inputPtr, length)")]))

# CudaModule.add_function also adds a declaration of this
# function to the BoostPythonModule which
# is responsible for the host function.
cuda_mod.add_function(diff_instance)

import codepy.jit
import codepy.toolchain


gcc_toolchain = codepy.toolchain.guess_toolchain()
nvcc_toolchain = codepy.toolchain.guess_nvcc_toolchain()

gcc_toolchain = guess_toolchain()
nvcc_toolchain = guess_nvcc_toolchain()
module = cuda_mod.compile(gcc_toolchain, nvcc_toolchain, debug=True)
import numpy as np
import pycuda.autoinit
import pycuda.driver
import pycuda.gpuarray


length = 25
constant_value = 2
# This is a strange way to create a GPUArray, but is meant to illustrate
# how to construct a GPUArray if the GPU buffer it owns has been
# created by something else

length = 25
constant_value = 2

pointer = pycuda.driver.mem_alloc(length * 4)
pycuda.driver.memset_d32(pointer, constant_value, length)
a = pycuda.gpuarray.GPUArray((length,), np.int32, gpudata=pointer)
Expand All @@ -129,6 +121,7 @@
golden = [constant_value] + [0] * (length - 1)
difference = [(x-y)*(x-y) for x, y in zip(b, golden, strict=True)]
error = sum(difference)

if error == 0:
print("Test passed!")
else:
Expand Down

0 comments on commit 2c184c5

Please sign in to comment.