TensorReductionCuda.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
12 
13 namespace Eigen {
14 namespace internal {
15 
16 
17 #if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
18 // Full reducers for GPU, don't vectorize for now
19 
20 // Reducer function that enables multiple cuda thread to safely accumulate at the same
21 // output address. It basically reads the current value of the output variable, and
22 // attempts to update it with the new value. If in the meantime another cuda thread
23 // updated the content of the output address it will try again.
24 template <typename T, typename R>
25 __device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) {
26 #if __CUDA_ARCH__ >= 300
27  if (sizeof(T) == 4)
28  {
29  unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
30  unsigned int newval = oldval;
31  reducer.reduce(accum, reinterpret_cast<T*>(&newval));
32  if (newval == oldval) {
33  return;
34  }
35  unsigned int readback;
36  while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
37  oldval = readback;
38  newval = oldval;
39  reducer.reduce(accum, reinterpret_cast<T*>(&newval));
40  if (newval == oldval) {
41  return;
42  }
43  }
44  }
45  else if (sizeof(T) == 8) {
46  unsigned long long oldval = *reinterpret_cast<unsigned long long*>(output);
47  unsigned long long newval = oldval;
48  reducer.reduce(accum, reinterpret_cast<T*>(&newval));
49  if (newval == oldval) {
50  return;
51  }
52  unsigned long long readback;
53  while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) {
54  oldval = readback;
55  newval = oldval;
56  reducer.reduce(accum, reinterpret_cast<T*>(&newval));
57  if (newval == oldval) {
58  return;
59  }
60  }
61  }
62  else {
63  assert(0 && "Wordsize not supported");
64  }
65 #else
66  assert(0 && "Shouldn't be called on unsupported device");
67 #endif
68 }
69 
70 
71 #ifdef EIGEN_HAS_CUDA_FP16
72 template <template <typename T> class R>
73 __device__ inline void atomicReduce(half2* output, half2 accum, R<half>& reducer) {
74 #if __CUDA_ARCH__ >= 300
75  unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
76  unsigned int newval = oldval;
77  reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
78  if (newval == oldval) {
79  return;
80  }
81  unsigned int readback;
82  while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
83  oldval = readback;
84  newval = oldval;
85  reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval));
86  if (newval == oldval) {
87  return;
88  }
89  }
90 #else
91  assert(0 && "Shouldn't be called on unsupported device");
92 #endif
93 }
94 #endif
95 
96 template <>
97 __device__ inline void atomicReduce(float* output, float accum, SumReducer<float>&) {
98 #if __CUDA_ARCH__ >= 300
99  atomicAdd(output, accum);
100 #else
101  assert(0 && "Shouldn't be called on unsupported device");
102 #endif
103 }
104 
105 
106 template <typename CoeffType, typename Index>
107 __global__ void ReductionInitKernel(const CoeffType val, Index num_preserved_coeffs, CoeffType* output) {
108  const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
109  const Index num_threads = blockDim.x * gridDim.x;
110  for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
111  output[i] = val;
112  }
113 }
114 
115 
116 template <int BlockSize, int NumPerThread, typename Self,
117  typename Reducer, typename Index>
118 __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs,
119  typename Self::CoeffReturnType* output, unsigned int* semaphore) {
120 #if __CUDA_ARCH__ >= 300
121  // Initialize the output value
122  const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x;
123  if (gridDim.x == 1) {
124  if (first_index == 0) {
125  *output = reducer.initialize();
126  }
127  }
128  else {
129  if (threadIdx.x == 0) {
130  unsigned int block = atomicCAS(semaphore, 0u, 1u);
131  if (block == 0) {
132  // We're the first block to run, initialize the output value
133  atomicExch(output, reducer.initialize());
134  __threadfence();
135  atomicExch(semaphore, 2u);
136  }
137  else {
138  // Wait for the first block to initialize the output value.
139  // Use atomicCAS here to ensure that the reads aren't cached
140  unsigned int val;
141  do {
142  val = atomicCAS(semaphore, 2u, 2u);
143  }
144  while (val < 2u);
145  }
146  }
147  }
148 
149  __syncthreads();
150 
151  eigen_assert(gridDim.x == 1 || *semaphore >= 2u);
152 
153  typename Self::CoeffReturnType accum = reducer.initialize();
154  Index max_iter = numext::mini<Index>(num_coeffs - first_index, NumPerThread*BlockSize);
155  for (Index i = 0; i < max_iter; i+=BlockSize) {
156  const Index index = first_index + i;
157  eigen_assert(index < num_coeffs);
158  typename Self::CoeffReturnType val = input.m_impl.coeff(index);
159  reducer.reduce(val, &accum);
160  }
161 
162 #pragma unroll
163  for (int offset = warpSize/2; offset > 0; offset /= 2) {
164  reducer.reduce(__shfl_down(accum, offset, warpSize), &accum);
165  }
166 
167  if ((threadIdx.x & (warpSize - 1)) == 0) {
168  atomicReduce(output, accum, reducer);
169  }
170 
171  if (gridDim.x > 1 && threadIdx.x == 0) {
172  // Let the last block reset the semaphore
173  atomicInc(semaphore, gridDim.x + 1);
174  }
175 #else
176  assert(0 && "Shouldn't be called on unsupported device");
177 #endif
178 }
179 
180 
181 #ifdef EIGEN_HAS_CUDA_FP16
182 template <typename Self,
183  typename Reducer, typename Index>
184 __global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) {
185  eigen_assert(blockDim.x == 1);
186  eigen_assert(gridDim.x == 1);
187  if (num_coeffs % 2 != 0) {
188  half last = input.m_impl.coeff(num_coeffs-1);
189  *scratch = __halves2half2(last, reducer.initialize());
190  } else {
191  *scratch = reducer.template initializePacket<half2>();
192  }
193 }
194 
195 template <typename Self,
196  typename Reducer, typename Index>
197 __global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) {
198  const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
199  const Index num_threads = blockDim.x * gridDim.x;
200  const Index num_packets = num_coeffs / 2;
201  for (Index i = thread_id; i < num_packets; i += num_threads) {
202  ((half2*)output)[i] = reducer.template initializePacket<half2>();
203  }
204 
205  if (thread_id == 0 && num_coeffs % 2 != 0) {
206  output[num_coeffs-1] = reducer.initialize();
207  }
208 }
209 
210 template <int BlockSize, int NumPerThread, typename Self,
211  typename Reducer, typename Index>
212 __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs,
213  half* output, half2* scratch) {
214  eigen_assert(NumPerThread % 2 == 0);
215 
216  const Index first_index = blockIdx.x * BlockSize * NumPerThread + 2*threadIdx.x;
217 
218  // Initialize the output value if it wasn't initialized by the ReductionInitKernel
219  if (gridDim.x == 1 && first_index == 0) {
220  if (num_coeffs % 2 != 0) {
221  half last = input.m_impl.coeff(num_coeffs-1);
222  *scratch = __halves2half2(last, reducer.initialize());
223  } else {
224  *scratch = reducer.template initializePacket<half2>();
225  }
226  __syncthreads();
227  }
228 
229  half2 accum = reducer.template initializePacket<half2>();
230  const Index max_iter = numext::mini<Index>((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2);
231  for (Index i = 0; i < max_iter; i += BlockSize) {
232  const Index index = first_index + 2*i;
233  eigen_assert(index + 1 < num_coeffs);
234  half2 val = input.m_impl.template packet<Unaligned>(index);
235  reducer.reducePacket(val, &accum);
236  }
237 
238 #pragma unroll
239  for (int offset = warpSize/2; offset > 0; offset /= 2) {
240  reducer.reducePacket(__shfl_down(accum, offset, warpSize), &accum);
241  }
242 
243  if ((threadIdx.x & (warpSize - 1)) == 0) {
244  atomicReduce(scratch, accum, reducer);
245  }
246 
247  __syncthreads();
248 
249  if (gridDim.x == 1 && first_index == 0) {
250  half tmp = __low2half(*scratch);
251  reducer.reduce(__high2half(*scratch), &tmp);
252  *output = tmp;
253  }
254 }
255 
256 template <typename Op>
257 __global__ void ReductionCleanupKernelHalfFloat(Op& reducer, half* output, half2* scratch) {
258  eigen_assert(threadIdx.x == 1);
259  half tmp = __low2half(*scratch);
260  reducer.reduce(__high2half(*scratch), &tmp);
261  *output = tmp;
262 }
263 
264 #endif
265 
266 
267 template <typename Self, typename Op, typename OutputType, bool PacketAccess>
268 struct FullReductionLauncher {
269  static void run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index) {
270  assert(false && "Should only be called on floats and half floats");
271  }
272 };
273 
274 template <typename Self, typename Op, bool PacketAccess>
275 struct FullReductionLauncher<Self, Op, float, PacketAccess> {
276  static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs) {
277  typedef typename Self::Index Index;
278  typedef typename Self::CoeffReturnType Scalar;
279  const int block_size = 256;
280  const int num_per_thread = 128;
281  const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
282 
283  unsigned int* semaphore = NULL;
284  if (num_blocks > 1) {
285  semaphore = device.semaphore();
286  }
287 
288  LAUNCH_CUDA_KERNEL((FullReductionKernel<block_size, num_per_thread, Self, Op, Index>),
289  num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, semaphore);
290  }
291 };
292 
293 #ifdef EIGEN_HAS_CUDA_FP16
294 template <typename Self, typename Op>
295 struct FullReductionLauncher<Self, Op, Eigen::half, false> {
296  static void run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index) {
297  assert(false && "Should not be called since there is no packet accessor");
298  }
299 };
300 
301 template <typename Self, typename Op>
302 struct FullReductionLauncher<Self, Op, Eigen::half, true> {
303  static void run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) {
304  typedef typename Self::Index Index;
305 
306  const int block_size = 256;
307  const int num_per_thread = 128;
308  const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
309  half2* scratch = static_cast<half2*>(device.scratchpad());
310 
311  if (num_blocks > 1) {
312  // We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there
313  // won't be a race conditions between multiple thread blocks.
314  LAUNCH_CUDA_KERNEL((ReductionInitFullReduxKernelHalfFloat<Self, Op, Index>),
315  1, 1, 0, device, reducer, self, num_coeffs, scratch);
316  }
317 
318  LAUNCH_CUDA_KERNEL((FullReductionKernelHalfFloat<block_size, num_per_thread, Self, Op, Index>),
319  num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, scratch);
320 
321  if (num_blocks > 1) {
322  LAUNCH_CUDA_KERNEL((ReductionCleanupKernelHalfFloat<Op>),
323  1, 1, 0, device, reducer, output, scratch);
324  }
325  }
326 };
327 #endif
328 
329 
330 template <typename Self, typename Op, bool Vectorizable>
331 struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
332  // Unfortunately nvidia doesn't support well exotic types such as complex,
333  // so reduce the scope of the optimized version of the code to the simple case
334  // of floats and half floats.
335 #ifdef EIGEN_HAS_CUDA_FP16
336  static const bool HasOptimizedImplementation = !Op::IsStateful &&
337  (internal::is_same<typename Self::CoeffReturnType, float>::value ||
338  (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
339 #elif __CUDA_ARCH__ >= 300
340  static const bool HasOptimizedImplementation = !Op::IsStateful &&
341  internal::is_same<typename Self::CoeffReturnType, float>::value;
342 #else
343  static const bool HasOptimizedImplementation = false;
344 #endif
345 
346  template <typename OutputType>
347  static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
348  assert(HasOptimizedImplementation && "Should only be called on floats or half floats");
349  const Index num_coeffs = array_prod(self.m_impl.dimensions());
350  // Don't crash when we're called with an input tensor of size 0.
351  if (num_coeffs == 0) {
352  return;
353  }
354 
355  FullReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs);
356  }
357 };
358 
359 
360 template <int NumPerThread, typename Self,
361  typename Reducer, typename Index>
362 __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
363  typename Self::CoeffReturnType* output) {
364 #if __CUDA_ARCH__ >= 300
365  eigen_assert(blockDim.y == 1);
366  eigen_assert(blockDim.z == 1);
367  eigen_assert(gridDim.y == 1);
368  eigen_assert(gridDim.z == 1);
369 
370  const int unroll_times = 16;
371  eigen_assert(NumPerThread % unroll_times == 0);
372 
373  const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
374  const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
375 
376  const Index num_threads = blockDim.x * gridDim.x;
377  const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
378 
379  // Initialize the output values if they weren't initialized by the ReductionInitKernel
380  if (gridDim.x == 1) {
381  for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
382  output[i] = reducer.initialize();
383  }
384  __syncthreads();
385  }
386 
387  for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
388  const Index row = i / input_col_blocks;
389 
390  if (row < num_preserved_coeffs) {
391  const Index col_block = i % input_col_blocks;
392  const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
393 
394  float reduced_val = reducer.initialize();
395 
396  for (Index j = 0; j < NumPerThread; j += unroll_times) {
397  const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
398  if (last_col >= num_coeffs_to_reduce) {
399  for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col += blockDim.x) {
400  const float val = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
401  reducer.reduce(val, &reduced_val);
402  }
403  break;
404  } else {
405  // Faster version of the loop with no branches after unrolling.
406 #pragma unroll
407  for (int k = 0; k < unroll_times; ++k) {
408  const Index col = col_begin + blockDim.x * (j + k);
409  reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val);
410  }
411  }
412  }
413 
414 #pragma unroll
415  for (int offset = warpSize/2; offset > 0; offset /= 2) {
416  reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
417  }
418 
419  if ((threadIdx.x & (warpSize - 1)) == 0) {
420  atomicReduce(&(output[row]), reduced_val, reducer);
421  }
422  }
423  }
424 #else
425  assert(0 && "Shouldn't be called on unsupported device");
426 #endif
427 }
428 
429 #ifdef EIGEN_HAS_CUDA_FP16
430 
431 template <int NumPerThread, typename Self,
432  typename Reducer, typename Index>
433 __global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
434  half* output) {
435  eigen_assert(blockDim.y == 1);
436  eigen_assert(blockDim.z == 1);
437  eigen_assert(gridDim.y == 1);
438  eigen_assert(gridDim.z == 1);
439 
440  const int unroll_times = 16;
441  eigen_assert(NumPerThread % unroll_times == 0);
442  eigen_assert(unroll_times % 2 == 0);
443 
444  const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2);
445  const Index num_input_blocks = divup<Index>(input_col_blocks * num_preserved_coeffs, 2);
446 
447  const Index num_threads = blockDim.x * gridDim.x;
448  const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
449 
450  // Initialize the output values if they weren't initialized by the ReductionInitKernel
451  if (gridDim.x == 1) {
452  Index i = 2*thread_id;
453  for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
454  half* loc = output + i;
455  *((half2*)loc) = reducer.template initializePacket<half2>();
456  }
457  if (i < num_preserved_coeffs) {
458  output[i] = reducer.initialize();
459  }
460  __syncthreads();
461  }
462 
463  for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
464  const Index row = 2 * (i / input_col_blocks);
465 
466  if (row + 1 < num_preserved_coeffs) {
467  const Index col_block = i % input_col_blocks;
468  const Index col_begin = 2 * (col_block * blockDim.x * NumPerThread + threadIdx.x);
469 
470  half2 reduced_val1 = reducer.template initializePacket<half2>();
471  half2 reduced_val2 = reducer.template initializePacket<half2>();
472 
473  for (Index j = 0; j < NumPerThread; j += unroll_times) {
474  const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1) * 2;
475  if (last_col >= num_coeffs_to_reduce) {
476  Index col = col_begin + blockDim.x * j;
477  for (; col + 1 < num_coeffs_to_reduce; col += blockDim.x) {
478  const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
479  reducer.reducePacket(val1, &reduced_val1);
480  const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
481  reducer.reducePacket(val2, &reduced_val2);
482  }
483  if (col < num_coeffs_to_reduce) {
484  // Peel;
485  const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
486  const half2 val1 = __halves2half2(last1, reducer.initialize());
487  reducer.reducePacket(val1, &reduced_val1);
488  const half last2 = input.m_impl.coeff((row+1) * num_coeffs_to_reduce + col);
489  const half2 val2 = __halves2half2(last2, reducer.initialize());
490  reducer.reducePacket(val2, &reduced_val2);
491  }
492  break;
493  } else {
494  // Faster version of the loop with no branches after unrolling.
495 #pragma unroll
496  for (int k = 0; k < unroll_times; ++k) {
497  const Index col = col_begin + blockDim.x * (j + k) * 2;
498  reducer.reducePacket(input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col), &reduced_val1);
499  reducer.reducePacket(input.m_impl.template packet<Unaligned>((row + 1)* num_coeffs_to_reduce + col), &reduced_val2);
500  }
501  }
502  }
503 
504 #pragma unroll
505  for (int offset = warpSize/2; offset > 0; offset /= 2) {
506  reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
507  reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
508  }
509 
510  half val1 = __low2half(reduced_val1);
511  reducer.reduce(__high2half(reduced_val1), &val1);
512  half val2 = __low2half(reduced_val2);
513  reducer.reduce(__high2half(reduced_val2), &val2);
514  half2 val = __halves2half2(val1, val2);
515 
516  if ((threadIdx.x & (warpSize - 1)) == 0) {
517  half* loc = output + row;
518  atomicReduce((half2*)loc, val, reducer);
519  }
520  }
521  }
522 }
523 
524 #endif
525 
526 template <typename Self, typename Op, typename OutputType, bool PacketAccess>
527 struct InnerReductionLauncher {
528  static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) {
529  assert(false && "Should only be called to reduce floats and half floats on a gpu device");
530  return true;
531  }
532 };
533 
534 template <typename Self, typename Op, bool PacketAccess>
535 struct InnerReductionLauncher<Self, Op, float, PacketAccess> {
536  static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
537  typedef typename Self::Index Index;
538 
539  const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
540  const int block_size = 256;
541  const int num_per_thread = 128;
542  const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
543  const int max_blocks = device.getNumCudaMultiProcessors() *
544  device.maxCudaThreadsPerMultiProcessor() / block_size;
545  const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
546 
547  if (num_blocks > 1) {
548  // We initialize the outputs outside the reduction kernel when we can't be sure that there
549  // won't be a race conditions between multiple thread blocks.
550  const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
551  const int max_blocks = device.getNumCudaMultiProcessors() *
552  device.maxCudaThreadsPerMultiProcessor() / 1024;
553  const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
554  LAUNCH_CUDA_KERNEL((ReductionInitKernel<float, Index>),
555  num_blocks, 1024, 0, device, reducer.initialize(),
556  num_preserved_vals, output);
557  }
558 
559  LAUNCH_CUDA_KERNEL((InnerReductionKernel<num_per_thread, Self, Op, Index>),
560  num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
561 
562  return false;
563  }
564 };
565 
566 #ifdef EIGEN_HAS_CUDA_FP16
567 template <typename Self, typename Op>
568 struct InnerReductionLauncher<Self, Op, Eigen::half, false> {
569  static bool run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index, typename Self::Index) {
570  assert(false && "Should not be called since there is no packet accessor");
571  return true;
572  }
573 };
574 
575 template <typename Self, typename Op>
576 struct InnerReductionLauncher<Self, Op, Eigen::half, true> {
577  static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
578  typedef typename Self::Index Index;
579 
580  if (num_preserved_vals % 2 != 0) {
581  // Not supported yet, revert to the slower code path
582  return true;
583  }
584 
585  const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
586  const int block_size = /*256*/128;
587  const int num_per_thread = /*128*/64;
588  const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
589  const int max_blocks = device.getNumCudaMultiProcessors() *
590  device.maxCudaThreadsPerMultiProcessor() / block_size;
591  const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
592 
593  if (num_blocks > 1) {
594  // We initialize the outputs outside the reduction kernel when we can't be sure that there
595  // won't be a race conditions between multiple thread blocks.
596  const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
597  const int max_blocks = device.getNumCudaMultiProcessors() *
598  device.maxCudaThreadsPerMultiProcessor() / 1024;
599  const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
600  LAUNCH_CUDA_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
601  1, 1, 0, device, reducer, self, num_preserved_vals, output);
602  }
603 
604  LAUNCH_CUDA_KERNEL((InnerReductionKernelHalfFloat<num_per_thread, Self, Op, Index>),
605  num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
606 
607  return false;
608  }
609 };
610 #endif
611 
612 
613 template <typename Self, typename Op>
614 struct InnerReducer<Self, Op, GpuDevice> {
615  // Unfortunately nvidia doesn't support well exotic types such as complex,
616  // so reduce the scope of the optimized version of the code to the simple case
617  // of floats and half floats.
618 #ifdef EIGEN_HAS_CUDA_FP16
619  static const bool HasOptimizedImplementation = !Op::IsStateful &&
620  (internal::is_same<typename Self::CoeffReturnType, float>::value ||
621  (internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value && reducer_traits<Op, GpuDevice>::PacketAccess));
622 #elif __CUDA_ARCH__ >= 300
623  static const bool HasOptimizedImplementation = !Op::IsStateful &&
624  internal::is_same<typename Self::CoeffReturnType, float>::value;
625 #else
626  static const bool HasOptimizedImplementation = false;
627 #endif
628 
629  template <typename OutputType>
630  static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
631  assert(HasOptimizedImplementation && "Should only be called on floats or half floats");
632  const Index num_coeffs = array_prod(self.m_impl.dimensions());
633  // Don't crash when we're called with an input tensor of size 0.
634  if (num_coeffs == 0) {
635  return true;
636  }
637  // It's faster to use the usual code.
638  if (num_coeffs_to_reduce <= 128) {
639  return true;
640  }
641 
642  return InnerReductionLauncher<Self, Op, OutputType, reducer_traits<Op, GpuDevice>::PacketAccess>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals);
643  }
644 };
645 
646 template <int NumPerThread, typename Self,
647  typename Reducer, typename Index>
648 __global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
649  typename Self::CoeffReturnType* output) {
650  const Index num_threads = blockDim.x * gridDim.x;
651  const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
652  // Initialize the output values if they weren't initialized by the ReductionInitKernel
653  if (gridDim.x == 1) {
654  for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) {
655  output[i] = reducer.initialize();
656  }
657  __syncthreads();
658  }
659 
660  // Do the reduction.
661  const Index max_iter = num_preserved_coeffs * divup<Index>(num_coeffs_to_reduce, NumPerThread);
662  for (Index i = thread_id; i < max_iter; i += num_threads) {
663  const Index input_col = i % num_preserved_coeffs;
664  const Index input_row = (i / num_preserved_coeffs) * NumPerThread;
665  typename Self::CoeffReturnType reduced_val = reducer.initialize();
666  const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce);
667  for (Index j = input_row; j < max_row; j++) {
668  typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col);
669  reducer.reduce(val, &reduced_val);
670  }
671  atomicReduce(&(output[input_col]), reduced_val, reducer);
672  }
673 }
674 
675 
676 template <typename Self, typename Op>
677 struct OuterReducer<Self, Op, GpuDevice> {
678  // Unfortunately nvidia doesn't support well exotic types such as complex,
679  // so reduce the scope of the optimized version of the code to the simple case
680  // of floats.
681 #if __CUDA_ARCH__ >= 300
682  static const bool HasOptimizedImplementation = !Op::IsStateful &&
683  internal::is_same<typename Self::CoeffReturnType, float>::value;
684 #else
685  static const bool HasOptimizedImplementation = false;
686 #endif
687 
688  template <typename Device, typename OutputType>
689  static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
690  assert(false && "Should only be called to reduce floats on a gpu device");
691  return true;
692  }
693 
694  static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
695  typedef typename Self::Index Index;
696 
697  // It's faster to use the usual code.
698  if (num_coeffs_to_reduce <= 32) {
699  return true;
700  }
701 
702  const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
703  const int block_size = 256;
704  const int num_per_thread = 16;
705  const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
706  const int max_blocks = device.getNumCudaMultiProcessors() *
707  device.maxCudaThreadsPerMultiProcessor() / block_size;
708  const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
709 
710  if (num_blocks > 1) {
711  // We initialize the outputs in the reduction kernel itself when we don't have to worry
712  // about race conditions between multiple thread blocks.
713  const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
714  const int max_blocks = device.getNumCudaMultiProcessors() *
715  device.maxCudaThreadsPerMultiProcessor() / 1024;
716  const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
717  LAUNCH_CUDA_KERNEL((ReductionInitKernel<float, Index>),
718  num_blocks, 1024, 0, device, reducer.initialize(),
719  num_preserved_vals, output);
720  }
721 
722  LAUNCH_CUDA_KERNEL((OuterReductionKernel<num_per_thread, Self, Op, Index>),
723  num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
724 
725  return false;
726  }
727 };
728 
729 #endif
730 
731 
732 } // end namespace internal
733 } // end namespace Eigen
734 
735 #endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_CUDA_H
Namespace containing all symbols from the Eigen library.
Definition: AdolcForward:45