Browse Source

Integrate CUDA into buildsystem and add example function

Former-commit-id: 392acb148a
tempestpy_adaptions
svkurowski 10 years ago
parent
commit
00ec9a7db6
  1. 45
      CMakeLists.txt
  2. 58
      cuda/kernels/graph.cu
  3. 6
      cuda/kernels/graph.h
  4. 3
      storm-config.h.in

45
CMakeLists.txt

@ -73,6 +73,12 @@ else()
set(ENABLE_GUROBI ON)
endif()
if ("${CUDA_ROOT}" STREQUAL "")
set(ENABLE_CUDA OFF)
else()
set(ENABLE_CUDA ON)
endif()
if ("${Z3_ROOT}" STREQUAL "")
set(ENABLE_Z3 OFF)
else()
@ -195,6 +201,13 @@ else()
set(STORM_CPP_GUROBI_DEF "undef")
endif()
# CUDA Defines
if (ENABLE_CUDA)
set(STORM_CPP_CUDA_DEF "define")
else()
set(STORM_CPP_CUDA_DEF "undef")
endif()
# glpk defines
set(STORM_CPP_GLPK_DEF "define")
@ -227,6 +240,28 @@ configure_file (
# Add the binary dir include directory for storm-config.h
include_directories("${PROJECT_BINARY_DIR}/include")
#############################################################
##
## CUDA Library generation
##
#############################################################
if(ENABLE_CUDA)
find_package(CUDA REQUIRED)
set(STORM_CUDA_LIB_NAME "storm-cuda")
file(GLOB_RECURSE STORM_CUDA_KERNEL_FILES ${PROJECT_SOURCE_DIR}/cuda/kernels/*.cu)
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
set(CUDA_NVCC_FLAGS "-arch=sm_13")
cuda_add_library(${STORM_CUDA_LIB_NAME}
${STORM_CUDA_KERNEL_FILES}
SHARED
)
endif()
#############################################################
##
## Source file aggregation and clustering
@ -387,6 +422,16 @@ if (ENABLE_GUROBI)
target_link_libraries(storm-performance-tests "gurobi56")
endif(ENABLE_GUROBI)
#############################################################
##
## CUDA (optional)
##
#############################################################
if (ENABLE_CUDA)
message (STATUS "StoRM - Linking with CUDA")
target_link_libraries(storm ${STORM_CUDA_LIB_NAME})
endif(ENABLE_CUDA)
#############################################################
##
## glpk

58
cuda/kernels/graph.cu

@ -0,0 +1,58 @@
#include "cuda/kernels/graph.h"
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
void helloWorldCuda()
{
printf("CUDA TEST START\n");
printf("Should print \"Hello World\"\n");
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char c[N] = "YELLO \0\0\0\0\0\0";
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaMemcpy( c, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("%s\n", c);
printf("CUDA TEST END\n");
}

6
cuda/kernels/graph.h

@ -0,0 +1,6 @@
#ifndef CUDA_KERNELS_GRAPH_H
#define CUDA_KERNELS_GRAPH_H
void helloWorldCuda();
#endif /* CUDA_KERNELS_GRAPH_H */

3
storm-config.h.in

@ -17,6 +17,9 @@
// Whether Gurobi is available and to be used (define/undef)
#@STORM_CPP_GUROBI_DEF@ STORM_HAVE_GUROBI
// Whether CUDA is available (define/undef)
#@STORM_CPP_CUDA_DEF@ STORM_HAVE_CUDA
// Whether GLPK is available and to be used (define/undef)
#@STORM_CPP_GLPK_DEF@ STORM_HAVE_GLPK

Loading…
Cancel
Save