Donate CUDA Streams tutorial with downloadable code. Concurrent kernel launches with event management and NVIDIA Visual Profiler analysis. Download CUDA Streams code here
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 |
//CUDA Streams | A very simple demonstration of cuda streams //Website: cudaeducation.com //Company: CUDA Education | cudaeducation.com | cudaeducation@gmail.com | Please donate at cudaeducation.com //YouTube Channel (please subscribe): Cuda Education | https://www.youtube.com/channel/UCzpwNg0Ai8zCzbsEtozkfFQ //Twitter: @cudaeducation //Slack: https://bit.ly/2NBBG4h | Join the workspace //Have questions? Comment on the YouTube channel https://www.youtube.com/channel/UCzpwNg0Ai8zCzbsEtozkfFQ or message on Twitter //Donate: Visit cudaeducation.com to donate //DISCLAIMER: This code is for teaching purposes only! CUDA Education does not guarantee the accuracy of this code in any way. This code should not be used in a production or commercial environment. Any liabilities or loss resulting from the use of this code, in whole or in part, will not be the responsibility of CUDA Education. //All rights reserved. This code is the property of CUDA Education. Please contact CUDA Education at cudaeducation@gmail.com if you would like to use this code in any way, shape or form. //CODING ENVIRONMENT: //CUDA Toolkit 9.0 //Windows environment //Visual Studio 2017 Community Edition //nVidia GeForce 1050 ti Graphics Card //Compute Capability 6.1 //make sure to include all relevant libraries #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <windows.h> #include <stdio.h> //function that will run on the GPU __global__ void cuda_education_function_on_CUDA_GPU() { //some pointless processing just to make sure the function is actually called int gpudummy = 0; for (int i = 0; i < 100000; i++) { gpudummy = gpudummy + 1; gpudummy = gpudummy - 1; } //CUDA EDUCATION //Website: cudaeducation.com //Twitter: @cudaeducation //Email: cudaeducation@gmail.com //YouTube: Cuda Education | Please subscribe //Slack: https://bit.ly/2NBBG4h | Join the workspace //Donate: Visit cudaeducation.com to donate } int main(int argc, char **argv) { //get the nVidia GPU running CUDA ready int cuda_education_device = 0; //set the device to be used for CUDA execution cudaSetDevice(cuda_education_device); //set up the CUDA timer for tracking how long it takes the kernel on the device (GPU) to do its job //for more information on this, check out https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //START create variables for the CUDA kernel launch //create THREADS_IN_BLOCK variable and declare the number of threads that should be in each block. //variable type is dim3, which basically means there are 3 dimensions to this variable (no need to discuss further here) dim3 THREADS_IN_BLOCK(64, 1); //create BLOCKS_IN_GRID variable and declare the number of blocks to use. //variable type is dim3, which basically means there are 3 dimensions to this variable (no need to discuss further here) dim3 BLOCKS_IN_GRID(1, 1); //print out the values used for block and grid printf("number of blocks %d | number of threads in each block %d\n", BLOCKS_IN_GRID.x, THREADS_IN_BLOCK.x); //END create variables for the CUDA kernel launch //START create streams for cuda kernel launches //declare stream variables cudaStream_t stream1; cudaStream_t stream2; cudaStream_t stream3; cudaStream_t stream4; cudaStream_t stream5; cudaStream_t stream6; cudaStream_t stream7; cudaStream_t stream8; cudaStream_t stream9; cudaStream_t stream10; cudaStream_t stream11; cudaStream_t stream12; cudaStream_t stream13; cudaStream_t stream14; cudaStream_t stream15; cudaStream_t stream16; cudaStream_t stream17; cudaStream_t stream18; cudaStream_t stream19; cudaStream_t stream20; cudaStream_t stream21; cudaStream_t stream22; //create stream cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); cudaStreamCreate(&stream5); cudaStreamCreate(&stream6); cudaStreamCreate(&stream7); cudaStreamCreate(&stream8); cudaStreamCreate(&stream9); cudaStreamCreate(&stream10); cudaStreamCreate(&stream11); cudaStreamCreate(&stream12); cudaStreamCreate(&stream13); cudaStreamCreate(&stream14); cudaStreamCreate(&stream15); cudaStreamCreate(&stream16); cudaStreamCreate(&stream17); cudaStreamCreate(&stream18); cudaStreamCreate(&stream19); cudaStreamCreate(&stream20); cudaStreamCreate(&stream21); cudaStreamCreate(&stream22); //END create streams for cuda kernel launches //declare shared memory variable //not relevant in this example, but have to fill in a value when launching kernel with streams int my_shared_memory = 0; //make sure all preceding tasks on the device (GPU) has been completed before proceeding with new business. wouldn't want to mix ingredients from two completely different recipes now would you?!?! cudaDeviceSynchronize(); //"start" event marker. this is simply a marker in the code cudaEventRecord(start); //launch the CUDA kernel //take note of the launch parameters for the kernels //several kernel launches on multiple streams at the same time. this is where kernel concurrency happens cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream1 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream2 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream3 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream4 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream5 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream6 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream7 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream8 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream9 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream10 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream11 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream12 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream13 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream14 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream15 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream16 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream17 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream18 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream19 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream20 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream21 >> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK, my_shared_memory, stream22 >> > (); //several kernel launches on the null stream (the default stream). sequential processing. kernels are launched and completed one after the other. no kernel concurrency /* cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); cuda_education_function_on_CUDA_GPU << <BLOCKS_IN_GRID, THREADS_IN_BLOCK>> > (); */ //"stop" event marker. this is a simply a marker in the code. it doesn't not guarantee that everything preceding the marker has finished processing or is concluded. cudaEventRecord(stop); //check to make sure all preceding business before the "stop" marker is complete before proceeding //remember that by default the CPU will launch the kernel on the device (GPU) and immediately continue processing other instructions. //we use cudaEventSynchronize(stop) to tell the CPU to halt further processing until all preceding business before the stop event flag has finished processing or is concluded. cudaEventSynchronize(stop); //now that we can guarantee that all preceding business before the "stop" marker is finished, we can calculate and print the amount of time the event took float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("DEVICE elapsed time: %f milliseconds\n", milliseconds); //practice good housekeeping by resetting the device and destroying event variables when you are done cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); //CUDA EDUCATION //Website: cudaeducation.com //Twitter: @cudaeducation //Email: cudaeducation@gmail.com //YouTube: Cuda Education //Slack: https://bit.ly/2NBBG4h | Join the workspace //Donate: Visit cudaeducation.com to donate to the cause } |
Donate