SpatialOps
NeboRhs.h
1 /* This file was generated by fulmar version 0.9.2. */
2 
3 /*
4  * Copyright (c) 2014 The University of Utah
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #ifndef NEBO_RHS_H
26  #define NEBO_RHS_H
27 
28  namespace SpatialOps {
29  template<typename CurrentMode, typename AtomicType>
30  struct NeboScalar;
31  template<typename AtomicType>
32  struct NeboScalar<Initial, AtomicType> {
33  public:
34  AtomicType typedef value_type;
35 
36  NeboScalar<SeqWalk, AtomicType> typedef SeqWalkType;
37 
38  #ifdef ENABLE_THREADS
39  NeboScalar<Resize, AtomicType> typedef ResizeType;
40  #endif
41  /* ENABLE_THREADS */
42 
43  #ifdef __CUDACC__
44  NeboScalar<GPUWalk, AtomicType> typedef GPUWalkType;
45  #endif
46  /* __CUDACC__ */
47 
48  NeboScalar(value_type const v)
49  : value_(v)
50  {}
51 
52  inline GhostData ghosts_with_bc(void) const {
53  return GhostData(GHOST_MAX);
54  }
55 
56  inline GhostData ghosts_without_bc(void) const {
57  return GhostData(GHOST_MAX);
58  }
59 
60  inline bool has_extents(void) const { return false; }
61 
62  inline IntVec extents(void) const { return IntVec(0, 0, 0); }
63 
64  inline IntVec has_bc(void) const { return IntVec(0, 0, 0); }
65 
66  inline SeqWalkType init(IntVec const & extents,
67  GhostData const & ghosts,
68  IntVec const & hasBC) const {
69  return SeqWalkType(value_);
70  }
71 
72  #ifdef ENABLE_THREADS
73  inline ResizeType resize(void) const { return ResizeType(value_); }
74  #endif
75  /* ENABLE_THREADS */
76 
77  #ifdef __CUDACC__
78  inline bool cpu_ready(void) const { return true; }
79 
80  inline bool gpu_ready(int const deviceIndex) const { return true; }
81 
82  inline GPUWalkType gpu_init(IntVec const & extents,
83  GhostData const & ghosts,
84  IntVec const & hasBC,
85  int const deviceIndex,
86  cudaStream_t const & lhsStream) const {
87  return GPUWalkType(value_);
88  }
89 
90  inline void stream_wait_event(cudaEvent_t const & event) const {}
91 
92  #ifdef NEBO_GPU_TEST
93  inline void gpu_prep(int const deviceIndex) const {}
94  #endif
95  /* NEBO_GPU_TEST */
96  #endif
97  /* __CUDACC__ */
98 
99  private:
100  value_type const value_;
101  };
102  #ifdef ENABLE_THREADS
103  template<typename AtomicType>
104  struct NeboScalar<Resize, AtomicType> {
105  public:
106  AtomicType typedef value_type;
107 
108  NeboScalar<SeqWalk, AtomicType> typedef SeqWalkType;
109 
110  NeboScalar(value_type const value)
111  : value_(value)
112  {}
113 
114  inline SeqWalkType init(IntVec const & extents,
115  GhostData const & ghosts,
116  IntVec const & hasBC) const {
117  return SeqWalkType(value_);
118  }
119 
120  private:
121  value_type const value_;
122  }
123  #endif
124  /* ENABLE_THREADS */;
125  template<typename AtomicType>
126  struct NeboScalar<SeqWalk, AtomicType> {
127  public:
128  AtomicType typedef value_type;
129 
130  NeboScalar(value_type const value)
131  : value_(value)
132  {}
133 
134  inline value_type eval(int const x, int const y, int const z) const {
135  return value_;
136  }
137 
138  private:
139  value_type const value_;
140  };
141  #ifdef __CUDACC__
142  template<typename AtomicType>
143  struct NeboScalar<GPUWalk, AtomicType> {
144  public:
145  AtomicType typedef value_type;
146 
147  NeboScalar(value_type const value)
148  : value_(value)
149  {}
150 
151  __device__ inline value_type eval(int const x,
152  int const y,
153  int const z) const {
154  return value_;
155  }
156 
157  private:
158  value_type const value_;
159  }
160  #endif
161  /* __CUDACC__ */;
162 
163  template<typename CurrentMode, typename FieldType>
165  template<typename FieldType>
166  struct NeboConstField<Initial, FieldType> {
167  public:
168  FieldType typedef field_type;
169 
170  NeboConstField<SeqWalk, FieldType> typedef SeqWalkType;
171 
172  #ifdef ENABLE_THREADS
173  NeboConstField<Resize, FieldType> typedef ResizeType;
174  #endif
175  /* ENABLE_THREADS */
176 
177  #ifdef __CUDACC__
178  NeboConstField<GPUWalk, FieldType> typedef GPUWalkType;
179  #endif
180  /* __CUDACC__ */
181 
182  NeboConstField(FieldType const & f)
183  : field_(f)
184  {}
185 
186  inline GhostData ghosts_with_bc(void) const {
187  return field_.get_valid_ghost_data() + point_to_ghost(field_.boundary_info().has_extra());
188  }
189 
190  inline GhostData ghosts_without_bc(void) const {
191  return field_.get_valid_ghost_data();
192  }
193 
194  inline bool has_extents(void) const { return true; }
195 
196  inline IntVec extents(void) const {
197  return field_.window_with_ghost().extent() - field_.get_valid_ghost_data().get_minus()
198  - field_.get_valid_ghost_data().get_plus();
199  }
200 
201  inline IntVec has_bc(void) const {
202  return field_.boundary_info().has_bc();
203  }
204 
205  inline SeqWalkType init(IntVec const & extents,
206  GhostData const & ghosts,
207  IntVec const & hasBC) const {
208  return SeqWalkType(field_);
209  }
210 
211  #ifdef ENABLE_THREADS
212  inline ResizeType resize(void) const { return ResizeType(field_); }
213  #endif
214  /* ENABLE_THREADS */
215 
216  #ifdef __CUDACC__
217  inline bool cpu_ready(void) const {
218  return field_.is_valid(CPU_INDEX);
219  }
220 
221  inline bool gpu_ready(int const deviceIndex) const {
222  return field_.is_valid(deviceIndex);
223  }
224 
225  inline GPUWalkType gpu_init(IntVec const & extents,
226  GhostData const & ghosts,
227  IntVec const & hasBC,
228  int const deviceIndex,
229  cudaStream_t const & lhsStream) const {
230  return GPUWalkType(lhsStream, deviceIndex, field_);
231  }
232 
233  inline void stream_wait_event(cudaEvent_t const & event) const {
234  cudaStreamWaitEvent(field_.get_stream(), event, 0);
235  }
236 
237  #ifdef NEBO_GPU_TEST
238  inline void gpu_prep(int const deviceIndex) const {
239  const_cast<FieldType *>(&field_)->add_device(deviceIndex);
240  }
241  #endif
242  /* NEBO_GPU_TEST */
243  #endif
244  /* __CUDACC__ */
245 
246  private:
247  FieldType const field_;
248  };
249  #ifdef ENABLE_THREADS
250  template<typename FieldType>
251  struct NeboConstField<Resize, FieldType> {
252  public:
253  FieldType typedef field_type;
254 
255  NeboConstField<SeqWalk, FieldType> typedef SeqWalkType;
256 
257  NeboConstField(FieldType const & f)
258  : field_(f)
259  {}
260 
261  inline SeqWalkType init(IntVec const & extents,
262  GhostData const & ghosts,
263  IntVec const & hasBC) const {
264  return SeqWalkType(field_);
265  }
266 
267  private:
268  FieldType const field_;
269  }
270  #endif
271  /* ENABLE_THREADS */;
272  template<typename FieldType>
273  struct NeboConstField<SeqWalk, FieldType> {
274  public:
275  FieldType typedef field_type;
276 
277  typename field_type::value_type typedef value_type;
278 
279  NeboConstField(FieldType const & f)
280  : xGlob_(f.window_with_ghost().glob_dim(0)),
281  yGlob_(f.window_with_ghost().glob_dim(1)),
282  base_(f.field_values(CPU_INDEX) + (f.window_with_ghost().offset(0) +
283  f.get_valid_ghost_data().get_minus(0))
284  + (f.window_with_ghost().glob_dim(0) * ((f.window_with_ghost().offset(1)
285  + f.get_valid_ghost_data().get_minus(1))
286  + (f.window_with_ghost().glob_dim(1)
287  * (f.window_with_ghost().offset(2)
288  + f.get_valid_ghost_data().get_minus(2))))))
289  {}
290 
291  inline value_type eval(int const x, int const y, int const z) const {
292  return base_[x + xGlob_ * (y + (yGlob_ * z))];
293  }
294 
295  private:
296  int const xGlob_;
297 
298  int const yGlob_;
299 
300  value_type const * base_;
301  };
302  #ifdef __CUDACC__
303  template<typename FieldType>
304  struct NeboConstField<GPUWalk, FieldType> {
305  public:
306  FieldType typedef field_type;
307 
308  typename field_type::value_type typedef value_type;
309 
310  NeboConstField(cudaStream_t const & lhsStream,
311  int const deviceIndex,
312  FieldType const & f)
313  : base_(f.field_values(deviceIndex) + (f.window_with_ghost().offset(0)
314  + f.get_valid_ghost_data().get_minus(0))
315  + (f.window_with_ghost().glob_dim(0) * ((f.window_with_ghost().offset(1)
316  + f.get_valid_ghost_data().get_minus(1))
317  + (f.window_with_ghost().glob_dim(1)
318  * (f.window_with_ghost().offset(2)
319  + f.get_valid_ghost_data().get_minus(2)))))),
320  xGlob_(f.window_with_ghost().glob_dim(0)),
321  yGlob_(f.window_with_ghost().glob_dim(1))
322  { cudaStreamWaitEvent(lhsStream, f.get_last_event(), 0); }
323 
324  __device__ inline value_type eval(int const x,
325  int const y,
326  int const z) const {
327  return base_[x + xGlob_ * (y + (yGlob_ * z))];
328  }
329 
330  private:
331  value_type const * base_;
332 
333  int const xGlob_;
334 
335  int const yGlob_;
336  }
337  #endif
338  /* __CUDACC__ */;
339 
340  template<typename CurrentMode, typename T>
342  template<typename T>
343  struct NeboConstSingleValueField<Initial, T> {
344  public:
346  field_type;
347 
349  SingleValueFieldType;
350 
351  NeboConstSingleValueField<SeqWalk, T> typedef SeqWalkType;
352 
353  #ifdef ENABLE_THREADS
354  NeboConstSingleValueField<Resize, T> typedef ResizeType;
355  #endif
356  /* ENABLE_THREADS */
357 
358  #ifdef __CUDACC__
359  NeboConstSingleValueField<GPUWalk, T> typedef GPUWalkType;
360  #endif
361  /* __CUDACC__ */
362 
363  NeboConstSingleValueField(SingleValueFieldType const & f)
364  : field_(f)
365  {}
366 
367  inline GhostData ghosts_with_bc(void) const {
368  return GhostData(GHOST_MAX);
369  }
370 
371  inline GhostData ghosts_without_bc(void) const {
372  return GhostData(GHOST_MAX);
373  }
374 
375  inline bool has_extents(void) const { return false; }
376 
377  inline IntVec extents(void) const { return IntVec(0, 0, 0); }
378 
379  inline IntVec has_bc(void) const { return IntVec(0, 0, 0); }
380 
381  inline SeqWalkType init(IntVec const & extents,
382  GhostData const & ghosts,
383  IntVec const & hasBC) const {
384  return SeqWalkType(* field_.field_values(CPU_INDEX));
385  }
386 
387  #ifdef ENABLE_THREADS
388  inline ResizeType resize(void) const {
389  return ResizeType(* field_.field_values(CPU_INDEX));
390  }
391  #endif
392  /* ENABLE_THREADS */
393 
394  #ifdef __CUDACC__
395  inline bool cpu_ready(void) const {
396  return field_.is_valid(CPU_INDEX);
397  }
398 
399  inline bool gpu_ready(int const deviceIndex) const {
400  return field_.is_valid(deviceIndex);
401  }
402 
403  inline GPUWalkType gpu_init(IntVec const & extents,
404  GhostData const & ghosts,
405  IntVec const & hasBC,
406  int const deviceIndex,
407  cudaStream_t const & lhsStream) const {
408  return GPUWalkType(lhsStream, deviceIndex, field_);
409  }
410 
411  inline void stream_wait_event(cudaEvent_t const & event) const {
412  cudaStreamWaitEvent(field_.get_stream(), event, 0);
413  }
414 
415  #ifdef NEBO_GPU_TEST
416  inline void gpu_prep(int const deviceIndex) const {
417  const_cast<SingleValueFieldType *>(&field_)->add_device(deviceIndex);
418  }
419  #endif
420  /* NEBO_GPU_TEST */
421  #endif
422  /* __CUDACC__ */
423 
424  private:
425  SingleValueFieldType const field_;
426  };
427  #ifdef ENABLE_THREADS
428  template<typename T>
429  struct NeboConstSingleValueField<Resize, T> {
430  public:
432  field_type;
433 
434  NeboConstSingleValueField<SeqWalk, T> typedef SeqWalkType;
435 
436  NeboConstSingleValueField(double const & v)
437  : value_(v)
438  {}
439 
440  inline SeqWalkType init(IntVec const & extents,
441  GhostData const & ghosts,
442  IntVec const & hasBC) const {
443  return SeqWalkType(value_);
444  }
445 
446  private:
447  double const value_;
448  }
449  #endif
450  /* ENABLE_THREADS */;
451  template<typename T>
452  struct NeboConstSingleValueField<SeqWalk, T> {
453  public:
455  field_type;
456 
457  typename field_type::value_type typedef value_type;
458 
459  NeboConstSingleValueField(double const & v)
460  : value_(v)
461  {}
462 
463  inline value_type eval(int const x, int const y, int const z) const {
464  return value_;
465  }
466 
467  private:
468  double value_;
469  };
470  #ifdef __CUDACC__
471  template<typename T>
472  struct NeboConstSingleValueField<GPUWalk, T> {
473  public:
475  field_type;
476 
477  typename field_type::value_type typedef value_type;
478 
480  SingleValueFieldType;
481 
482  NeboConstSingleValueField(cudaStream_t const & lhsStream,
483  int const deviceIndex,
484  SingleValueFieldType const & f)
485  : pointer_(f.field_values(deviceIndex))
486  { cudaStreamWaitEvent(lhsStream, f.get_last_event(), 0); }
487 
488  __device__ inline value_type eval(int const x,
489  int const y,
490  int const z) const {
491  return *pointer_;
492  }
493 
494  private:
495  value_type const * pointer_;
496  }
497  #endif
498  /* __CUDACC__ */;
499  } /* SpatialOps */
500 
501 #endif
502 /* NEBO_RHS_H */
Holds information about the number of ghost cells on each side of the domain.
Definition: GhostData.h:54