SpatialOps
NeboRhs.h
1 /* This file was generated by fulmar version 0.9.2. */
2 
3 /*
4  * Copyright (c) 2014-2017 The University of Utah
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #ifndef NEBO_RHS_H
26  #define NEBO_RHS_H
27 
28  namespace SpatialOps {
29  template<typename CurrentMode, typename AtomicType>
30  struct NeboScalar;
31  template<typename AtomicType>
32  struct NeboScalar<Initial, AtomicType> {
33  public:
34  AtomicType typedef value_type;
35 
36  NeboScalar<SeqWalk, AtomicType> typedef SeqWalkType;
37 
38  #ifdef ENABLE_THREADS
39  NeboScalar<Resize, AtomicType> typedef ResizeType;
40  #endif
41  /* ENABLE_THREADS */
42 
43  #ifdef __CUDACC__
44  NeboScalar<GPUWalk, AtomicType> typedef GPUWalkType;
45  #endif
46  /* __CUDACC__ */
47 
48  NeboScalar(value_type const v)
49  : value_(v)
50  {}
51 
52  inline GhostData ghosts_with_bc(void) const {
53  return GhostData(GHOST_MAX);
54  }
55 
56  inline GhostData ghosts_without_bc(void) const {
57  return GhostData(GHOST_MAX);
58  }
59 
60  inline bool has_extents(void) const { return false; }
61 
62  inline IntVec extents(void) const { return IntVec(0, 0, 0); }
63 
64  inline IntVec has_bc(BCSide const bcSide) const {
65  return IntVec(0, 0, 0);
66  }
67 
68  inline SeqWalkType init(IntVec const & extents,
69  GhostData const & ghosts,
70  IntVec const & hasBC) const {
71  return SeqWalkType(value_);
72  }
73 
74  #ifdef ENABLE_THREADS
75  inline ResizeType resize(void) const { return ResizeType(value_); }
76  #endif
77  /* ENABLE_THREADS */
78 
79  #ifdef __CUDACC__
80  inline bool cpu_ready(void) const { return true; }
81 
82  inline bool gpu_ready(int const deviceIndex) const { return true; }
83 
84  inline GPUWalkType gpu_init(IntVec const & extents,
85  GhostData const & ghosts,
86  IntVec const & hasBC,
87  int const deviceIndex,
88  cudaStream_t const & lhsStream) const {
89  return GPUWalkType(value_);
90  }
91 
92  inline void stream_wait_event(cudaEvent_t const & event) const {}
93 
94  #ifdef NEBO_GPU_TEST
95  inline void gpu_prep(int const deviceIndex) const {}
96  #endif
97  /* NEBO_GPU_TEST */
98  #endif
99  /* __CUDACC__ */
100 
101  private:
102  value_type const value_;
103  };
104  #ifdef ENABLE_THREADS
105  template<typename AtomicType>
106  struct NeboScalar<Resize, AtomicType> {
107  public:
108  AtomicType typedef value_type;
109 
110  NeboScalar<SeqWalk, AtomicType> typedef SeqWalkType;
111 
112  NeboScalar(value_type const value)
113  : value_(value)
114  {}
115 
116  inline SeqWalkType init(IntVec const & extents,
117  GhostData const & ghosts,
118  IntVec const & hasBC) const {
119  return SeqWalkType(value_);
120  }
121 
122  private:
123  value_type const value_;
124  }
125  #endif
126  /* ENABLE_THREADS */;
127  template<typename AtomicType>
128  struct NeboScalar<SeqWalk, AtomicType> {
129  public:
130  AtomicType typedef value_type;
131 
132  NeboScalar(value_type const value)
133  : value_(value)
134  {}
135 
136  template<typename OptionalArgT>
137  inline value_type eval(int const x, int const y, int const z) const {
138  return value_;
139  }
140 
141  private:
142  value_type const value_;
143  };
144  #ifdef __CUDACC__
145  template<typename AtomicType>
146  struct NeboScalar<GPUWalk, AtomicType> {
147  public:
148  AtomicType typedef value_type;
149 
150  NeboScalar(value_type const value)
151  : value_(value)
152  {}
153 
154  template<typename OptionalArgT>
155  __device__ inline value_type eval(int const x,
156  int const y,
157  int const z) const {
158  return value_;
159  }
160 
161  private:
162  value_type const value_;
163  }
164  #endif
165  /* __CUDACC__ */;
166 
167  template<typename CurrentMode, typename FieldType>
169  template<typename FieldType>
170  struct NeboConstField<Initial, FieldType> {
171  public:
172  FieldType typedef field_type;
173 
174  NeboConstField<SeqWalk, FieldType> typedef SeqWalkType;
175 
176  #ifdef ENABLE_THREADS
177  NeboConstField<Resize, FieldType> typedef ResizeType;
178  #endif
179  /* ENABLE_THREADS */
180 
181  #ifdef __CUDACC__
182  NeboConstField<GPUWalk, FieldType> typedef GPUWalkType;
183  #endif
184  /* __CUDACC__ */
185 
186  NeboConstField(FieldType const & f)
187  : field_(f)
188  {}
189 
190  inline GhostData ghosts_with_bc(void) const {
191  return field_.get_valid_ghost_data() + point_to_ghost(field_.boundary_info().has_extra());
192  }
193 
194  inline GhostData ghosts_without_bc(void) const {
195  return field_.get_valid_ghost_data();
196  }
197 
198  inline bool has_extents(void) const { return true; }
199 
200  inline IntVec extents(void) const {
201  return field_.window_with_ghost().extent() - field_.get_valid_ghost_data().get_minus()
202  - field_.get_valid_ghost_data().get_plus();
203  }
204 
205  inline IntVec has_bc(BCSide const bcSide) const {
206  return field_.boundary_info().has_bc(bcSide);
207  }
208 
209  inline SeqWalkType init(IntVec const & extents,
210  GhostData const & ghosts,
211  IntVec const & hasBC) const {
212  return SeqWalkType(field_);
213  }
214 
215  #ifdef ENABLE_THREADS
216  inline ResizeType resize(void) const { return ResizeType(field_); }
217  #endif
218  /* ENABLE_THREADS */
219 
220  #ifdef __CUDACC__
221  inline bool cpu_ready(void) const {
222  return field_.is_valid(CPU_INDEX);
223  }
224 
225  inline bool gpu_ready(int const deviceIndex) const {
226  return field_.is_valid(deviceIndex);
227  }
228 
229  inline GPUWalkType gpu_init(IntVec const & extents,
230  GhostData const & ghosts,
231  IntVec const & hasBC,
232  int const deviceIndex,
233  cudaStream_t const & lhsStream) const {
234  return GPUWalkType(lhsStream, deviceIndex, field_);
235  }
236 
237  inline void stream_wait_event(cudaEvent_t const & event) const {
238  cudaStreamWaitEvent(field_.get_stream(), event, 0);
239  }
240 
241  #ifdef NEBO_GPU_TEST
242  inline void gpu_prep(int const deviceIndex) const {
243  const_cast<FieldType *>(&field_)->add_device(deviceIndex);
244  }
245  #endif
246  /* NEBO_GPU_TEST */
247  #endif
248  /* __CUDACC__ */
249 
250  private:
251  FieldType const field_;
252  };
253  #ifdef ENABLE_THREADS
254  template<typename FieldType>
255  struct NeboConstField<Resize, FieldType> {
256  public:
257  FieldType typedef field_type;
258 
259  NeboConstField<SeqWalk, FieldType> typedef SeqWalkType;
260 
261  NeboConstField(FieldType const & f)
262  : field_(f)
263  {}
264 
265  inline SeqWalkType init(IntVec const & extents,
266  GhostData const & ghosts,
267  IntVec const & hasBC) const {
268  return SeqWalkType(field_);
269  }
270 
271  private:
272  FieldType const field_;
273  }
274  #endif
275  /* ENABLE_THREADS */;
276  template<typename FieldType>
277  struct NeboConstField<SeqWalk, FieldType> {
278  public:
279  FieldType typedef field_type;
280 
281  typename field_type::value_type typedef value_type;
282 
283  NeboConstField(FieldType const & f)
284  : xGlob_(f.window_with_ghost().glob_dim(0)),
285  yGlob_(f.window_with_ghost().glob_dim(1)),
286  base_(f.field_values(CPU_INDEX) + (f.window_with_ghost().offset(0) +
287  f.get_valid_ghost_data().get_minus(0))
288  + (f.window_with_ghost().glob_dim(0) * ((f.window_with_ghost().offset(1)
289  + f.get_valid_ghost_data().get_minus(1))
290  + (f.window_with_ghost().glob_dim(1)
291  * (f.window_with_ghost().offset(2)
292  + f.get_valid_ghost_data().get_minus(2))))))
293  {}
294 
295  template<typename OptionalArgT>
296  inline value_type eval(int const x, int const y, int const z) const {
297  return base_[x + xGlob_ * (y + (yGlob_ * z))];
298  }
299 
300  private:
301  int const xGlob_;
302 
303  int const yGlob_;
304 
305  value_type const * base_;
306  };
307  #ifdef __CUDACC__
308  template<typename FieldType>
309  struct NeboConstField<GPUWalk, FieldType> {
310  public:
311  FieldType typedef field_type;
312 
313  typename field_type::value_type typedef value_type;
314 
315  NeboConstField(cudaStream_t const & lhsStream,
316  int const deviceIndex,
317  FieldType const & f)
318  : base_(f.field_values(deviceIndex) + (f.window_with_ghost().offset(0)
319  + f.get_valid_ghost_data().get_minus(0))
320  + (f.window_with_ghost().glob_dim(0) * ((f.window_with_ghost().offset(1)
321  + f.get_valid_ghost_data().get_minus(1))
322  + (f.window_with_ghost().glob_dim(1)
323  * (f.window_with_ghost().offset(2)
324  + f.get_valid_ghost_data().get_minus(2)))))),
325  xGlob_(f.window_with_ghost().glob_dim(0)),
326  yGlob_(f.window_with_ghost().glob_dim(1))
327  { cudaStreamWaitEvent(lhsStream, f.get_last_event(), 0); }
328 
329  template<typename OptionalArgT>
330  __device__ inline value_type eval(int const x,
331  int const y,
332  int const z) const {
333  return base_[x + xGlob_ * (y + (yGlob_ * z))];
334  }
335 
336  private:
337  value_type const * base_;
338 
339  int const xGlob_;
340 
341  int const yGlob_;
342  }
343  #endif
344  /* __CUDACC__ */;
345 
346  template<typename CurrentMode, typename T>
348  template<typename T>
349  struct NeboConstSingleValueField<Initial, T> {
350  public:
352  field_type;
353 
355  SingleValueFieldType;
356 
357  NeboConstSingleValueField<SeqWalk, T> typedef SeqWalkType;
358 
359  #ifdef ENABLE_THREADS
360  NeboConstSingleValueField<Resize, T> typedef ResizeType;
361  #endif
362  /* ENABLE_THREADS */
363 
364  #ifdef __CUDACC__
365  NeboConstSingleValueField<GPUWalk, T> typedef GPUWalkType;
366  #endif
367  /* __CUDACC__ */
368 
369  NeboConstSingleValueField(SingleValueFieldType const & f)
370  : field_(f)
371  {}
372 
373  inline GhostData ghosts_with_bc(void) const {
374  return GhostData(GHOST_MAX);
375  }
376 
377  inline GhostData ghosts_without_bc(void) const {
378  return GhostData(GHOST_MAX);
379  }
380 
381  inline bool has_extents(void) const { return false; }
382 
383  inline IntVec extents(void) const { return IntVec(0, 0, 0); }
384 
385  inline IntVec has_bc(BCSide const bcSide) const {
386  return IntVec(0, 0, 0);
387  }
388 
389  inline SeqWalkType init(IntVec const & extents,
390  GhostData const & ghosts,
391  IntVec const & hasBC) const {
392  return SeqWalkType(* field_.field_values(CPU_INDEX));
393  }
394 
395  #ifdef ENABLE_THREADS
396  inline ResizeType resize(void) const {
397  return ResizeType(* field_.field_values(CPU_INDEX));
398  }
399  #endif
400  /* ENABLE_THREADS */
401 
402  #ifdef __CUDACC__
403  inline bool cpu_ready(void) const {
404  return field_.is_valid(CPU_INDEX);
405  }
406 
407  inline bool gpu_ready(int const deviceIndex) const {
408  return field_.is_valid(deviceIndex);
409  }
410 
411  inline GPUWalkType gpu_init(IntVec const & extents,
412  GhostData const & ghosts,
413  IntVec const & hasBC,
414  int const deviceIndex,
415  cudaStream_t const & lhsStream) const {
416  return GPUWalkType(lhsStream, deviceIndex, field_);
417  }
418 
419  inline void stream_wait_event(cudaEvent_t const & event) const {
420  cudaStreamWaitEvent(field_.get_stream(), event, 0);
421  }
422 
423  #ifdef NEBO_GPU_TEST
424  inline void gpu_prep(int const deviceIndex) const {
425  const_cast<SingleValueFieldType *>(&field_)->add_device(deviceIndex);
426  }
427  #endif
428  /* NEBO_GPU_TEST */
429  #endif
430  /* __CUDACC__ */
431 
432  private:
433  SingleValueFieldType const field_;
434  };
435  #ifdef ENABLE_THREADS
436  template<typename T>
437  struct NeboConstSingleValueField<Resize, T> {
438  public:
440  field_type;
441 
442  NeboConstSingleValueField<SeqWalk, T> typedef SeqWalkType;
443 
444  NeboConstSingleValueField(double const & v)
445  : value_(v)
446  {}
447 
448  inline SeqWalkType init(IntVec const & extents,
449  GhostData const & ghosts,
450  IntVec const & hasBC) const {
451  return SeqWalkType(value_);
452  }
453 
454  private:
455  double const value_;
456  }
457  #endif
458  /* ENABLE_THREADS */;
459  template<typename T>
460  struct NeboConstSingleValueField<SeqWalk, T> {
461  public:
463  field_type;
464 
465  typename field_type::value_type typedef value_type;
466 
467  NeboConstSingleValueField(double const & v)
468  : value_(v)
469  {}
470 
471  template<typename OptionalArgT>
472  inline value_type eval(int const x, int const y, int const z) const {
473  return value_;
474  }
475 
476  private:
477  double value_;
478  };
479  #ifdef __CUDACC__
480  template<typename T>
481  struct NeboConstSingleValueField<GPUWalk, T> {
482  public:
484  field_type;
485 
486  typename field_type::value_type typedef value_type;
487 
489  SingleValueFieldType;
490 
491  NeboConstSingleValueField(cudaStream_t const & lhsStream,
492  int const deviceIndex,
493  SingleValueFieldType const & f)
494  : pointer_(f.field_values(deviceIndex))
495  { cudaStreamWaitEvent(lhsStream, f.get_last_event(), 0); }
496 
497  template<typename OptionalArgT>
498  __device__ inline value_type eval(int const x,
499  int const y,
500  int const z) const {
501  return *pointer_;
502  }
503 
504  private:
505  value_type const * pointer_;
506  }
507  #endif
508  /* __CUDACC__ */;
509  } /* SpatialOps */
510 
511 #endif
512 /* NEBO_RHS_H */
Holds information about the number of ghost cells on each side of the domain.
Definition: GhostData.h:54
BCSide
Allows identification of whether we are setting the BC on the right or left side when using an operat...