当前位置: 首页 > 网络知识

Qt 编写CUDA程序

时间:2026-01-29 09:25:02

本文基于的情况是,Qt,CUDA和VS已经安装完成且能够正常运行的情况

1.创建一个空的Qt项目

2.创建一个u文件,本文创建的为kernelu

内容如下

1 #include "cuda_runtime.h" 2 #include "device_launch_parameters.h" 3 #include <stdio.h> 4 cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); 5 __global__ void addKernel(int *c, const int *a, const int *b) 6 10 extern "C" 11 void run() 12 ; 15 const int b[arraySize] = ; 16 int c[arraySize] = ; 17 // Add vectors in parallel. 18 cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); 19 if (cudaStatus != cudaSuccess) 23 printf(" + = \n", 24 c[0], c[1], c[2], c[3], c[4]); 25 // cudaDeviceReset must be called before exiting in order for profiling and 26 // tracing tools such as Nsight and Visual Profiler to show cplete traces. 27 cudaStatus = cudaDeviceReset(); 28 if (cudaStatus != cudaSuccess) 32 // return 0; 33 } 34 // Helper function for using CUDA to add vectors in parallel. 35 cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) 36 47 // Allocate GPU buffers for three vectors (two input, one output) . 48 cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); 49 if (cudaStatus != cudaSuccess) 53 cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); 54 if (cudaStatus != cudaSuccess) 58 cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); 59 if (cudaStatus != cudaSuccess) // Copy input vectors fr host memory to GPU buffers. 64 cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); 65 if (cudaStatus != cudaSuccess) 69 cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); 70 if (cudaStatus != cudaSuccess) 74 // Launch a kernel on the GPU with one thread for each element. 75 addKernel<<<1, size>>>(dev_c, dev_a, dev_b); 76 // Check for any errors launching the kernel 77 cudaStatus = cudaGetLastError(); 78 if (cudaStatus != cudaSuccess) 82 // cudaDeviceSynchronize waits for the kernel to finish, and returns 83 // any errors encountered during the launch. 84 cudaStatus = cudaDeviceSynchronize(); 85 if (cudaStatus != cudaSuccess) 89 // Copy output vector fr GPU buffer to host memory. 90 cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); 91 if (cudaStatus != cudaSuccess) 95 Error: 96 cudaFree(dev_c); 97 cudaFree(dev_a); 98 cudaFree(dev_b); 99 return cudaStatus; 100 }

3.编写pro文件

1 CONFIG += console 2 3 TARGET = test 4 5 # Define output directories 6 DESTDIR = ../bin 7 CUDA_OBJECTS_DIR = OBJECTS_DIR/../cuda 8 9 # This makes the u files appear in your project 10 CUDA_SOURCES += \ 11 kernelu 12 13 # MSVCRT link option (static or dynamic, it must be the same with your Qt SDK link option) 14 MSVCRT_LINK_FLAG_DEBUG = "/MDd" 15 MSVCRT_LINK_FLAG_RELEASE = "/MD" 16 17 # CUDA settings 18 CUDA_DIR = $$(CUDA_PATH) # Path to cuda toolkit install 19 SYSTEM_NAME = x64 # Depending on your system either 'Win32', 'x64', or 'Win64' 20 SYSTEM_TYPE = 64 # '32' or '64', depending on your system 21 CUDA_ARCH = sm_50 # Type of CUDA architecture 22 NVCC_OPTIONS = use_fast_math 23 24 # include paths 25 INCLUDEPATH += $$CUDA_DIR/include \ 26 $$CUDA_DIR/cmon/inc \ 27 $$CUDA_DIR/../shared/inc 28 29 # library directories 30 QMAKE_LIBDIR += $$CUDA_DIR/lib/$$SYSTEM_NAME \ 31 $$CUDA_DIR/cmon/lib/$$SYSTEM_NAME \ 32 $$CUDA_DIR/../shared/lib/$$SYSTEM_NAME 33 34 # The following makes sure all path names (which often include spaces) are put between quotation marks 35 CUDA_INC = $$join(INCLUDEPATH,'" I"','I"','"') 36 37 # Add the necessary libraries 38 CUDA_LIB_NAMES = cudart_static kernel32 user32 gdi32 winspool cdlg32 \ 39 advapi32 shell32 ole32 oleaut32 uuid odbc32 odbccp32 \ 40 #freeglut glew32 41 42 for(lib, CUDA_LIB_NAMES) 45 LIBS += $$CUDA_LIBS 46 47 # Configuration of the Cuda cpiler 48 CONFIG(debug, debug|release) _cuda.obj 52 cuda_dmands = $$CUDA_DIR/bin/nvcc.exe D_DEBUG $$NVCC_OPTIONS $$CUDA_INC $$LIBS \ 53 machine $$SYSTEM_TYPE arch=$$CUDA_ARCH \ 54 cpile cudart static g DWIN32 D_MBCS \ 55 Xcpiler "/wd4819,/EHsc,/W3,/nologo,/Od,/Zi,/RTC1" \ 56 Xcpiler $$MSVCRT_LINK_FLAG_DEBUG \ 57 c o $ $ 58 cuda_d.dependency_type = TYPE_C 59 QMAKE_EXTRA_CPILERS += cuda_d 60 } 61 else _cuda.obj 65 cudamands = $$CUDA_DIR/bin/nvcc.exe $$NVCC_OPTIONS $$CUDA_INC $$LIBS \ 66 machine $$SYSTEM_TYPE arch=$$CUDA_ARCH \ 67 cpile cudart static DWIN32 D_MBCS \ 68 Xcpiler "/wd4819,/EHsc,/W3,/nologo,/O2,/Zi" \ 69 Xcpiler $$MSVCRT_LINK_FLAG_RELEASE \ 70 c o $ $ 71 cuda.dependency_type = TYPE_C 72 QMAKE_EXTRA_CPILERS += cuda 73 } 74 75 SOURCES += \ 76 mainpp

需要注意,path中需要有CUDA_PATH的环境变量,如果没有需要自行添加

4.编译运行即可



上一篇:C++ Boost库介绍
下一篇:CUDA 简单程序的基本框架和自定义设备函数
Qt CUDA
  • 英特尔与 Vertiv 合作开发液冷 AI 处理器
  • 英特尔第五代 Xeon CPU 来了:详细信息和行业反应
  • 由于云计算放缓引发扩张担忧,甲骨文股价暴跌
  • Web开发状况报告详细介绍可组合架构的优点
  • 如何使用 PowerShell 的 Get-Date Cmdlet 创建时间戳
  • 美光在数据中心需求增长后给出了强有力的预测
  • 2027服务器市场价值将接近1960亿美元
  • 生成式人工智能的下一步是什么?
  • 分享在外部存储上安装Ubuntu的5种方法技巧
  • 全球数据中心发展的关键考虑因素
  • 英特尔与 Vertiv 合作开发液冷 AI 处理器

    英特尔第五代 Xeon CPU 来了:详细信息和行业反应

    由于云计算放缓引发扩张担忧,甲骨文股价暴跌

    Web开发状况报告详细介绍可组合架构的优点

    如何使用 PowerShell 的 Get-Date Cmdlet 创建时间戳

    美光在数据中心需求增长后给出了强有力的预测

    2027服务器市场价值将接近1960亿美元

    生成式人工智能的下一步是什么?

    分享在外部存储上安装Ubuntu的5种方法技巧

    全球数据中心发展的关键考虑因素