当前位置: 首页>>代码示例>>C#>>正文


C# GThread.atomicAdd方法代码示例

本文整理汇总了C#中GThread.atomicAdd方法的典型用法代码示例。如果您正苦于以下问题:C# GThread.atomicAdd方法的具体用法?C# GThread.atomicAdd怎么用?C# GThread.atomicAdd使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在GThread的用法示例。


在下文中一共展示了GThread.atomicAdd方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: histo_kernel

        public static void histo_kernel(GThread thread, byte[] buffer, int size, uint[] histo) 
        {
            // clear out the accumulation buffer called temp
            // since we are launched with 256 threads, it is easy
            // to clear that memory with one write per thread
            uint[] temp = thread.AllocateShared<uint>("temp", 256);
            temp[thread.threadIdx.x] = 0;
            thread.SyncThreads();

            // calculate the starting index and the offset to the next
            // block that each thread will be processing
            int i = thread.threadIdx.x + thread.blockIdx.x * thread.blockDim.x;
            int stride = thread.blockDim.x * thread.gridDim.x;
            while (i < size) 
            {
                thread.atomicAdd(ref temp[buffer[i]], 1 );
                i += stride;
            }
            // sync the data from the above writes to shared memory
            // then add the shared memory values to the values from
            // the other thread blocks using global memory
            // atomic adds
            // same as before, since we have 256 threads, updating the
            // global histogram is just one write per thread!
            thread.SyncThreads();

            thread.atomicAdd(ref (histo[thread.threadIdx.x]), temp[thread.threadIdx.x]);
        }
开发者ID:JustasB,项目名称:cudafy,代码行数:28,代码来源:hist_gpu_shmem_atomics.cs

示例2: atomicsTestUInt32

 public static void atomicsTestUInt32(GThread thread, uint[] input, uint[] output)
 {
     int i = 0;
     int x = 0;
     output[i++] = thread.atomicAdd(ref input[x], 42); // 42
     output[i++] = thread.atomicSub(ref input[x], 21); // 21
     output[i++] = thread.atomicIncEx(ref input[x]);   // 22
     output[i++] = thread.atomicIncEx(ref input[x]);   // 23
     output[i++] = thread.atomicMax(ref input[x], 50); // 50
     output[i++] = thread.atomicMin(ref input[x], 40); // 40
     output[i++] = thread.atomicOr(ref input[x], 16);  // 56
     output[i++] = thread.atomicAnd(ref input[x], 15); // 8
     output[i++] = thread.atomicXor(ref input[x], 15); // 7
     output[i++] = thread.atomicExch(ref input[x], 88);// 88
     output[i++] = thread.atomicCAS(ref input[x], 88, 123);// 123
     output[i++] = thread.atomicCAS(ref input[x], 321, 222);// 123
     output[i++] = thread.atomicDecEx(ref input[x]);   // 122
 }
开发者ID:constructor-igor,项目名称:cudafy,代码行数:18,代码来源:GMathUnitTests.cs

示例3: MultiplySparseGPU2

        public static void MultiplySparseGPU2(GThread thread, int kernelCount, int[] indicesA, float[] valuesA, int nonzeroCountA, int colCountA, float[] B, int colCountB, float[] C, int transposeA)
        {
            var index = (thread.blockIdx.x * thread.blockDim.x) + thread.threadIdx.x;
            while (index < kernelCount)
            {
                var colB = index % colCountB;
                var arrayIndex = index / colCountB;
                var value = valuesA[arrayIndex];
                var indexA = indicesA[arrayIndex];
                var rowA = indexA / colCountA;
                var colA = indexA % colCountA;
                if (transposeA != 0)
                {
                    var tmp = rowA;
                    rowA = colA;
                    colA = tmp;
                }

                var valB = B[colA * colCountB + colB];
                var mul = value * valB;
                var indexC = rowA * colCountB + colB;

                thread.atomicAdd(ref C[indexC], mul);
                index += thread.blockDim.x * thread.gridDim.x;
            }
        }
开发者ID:zhimingz,项目名称:kaggle_criteo,代码行数:26,代码来源:GPUModule.cs


注:本文中的GThread.atomicAdd方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。