OpenLB 1.7
Loading...
Searching...
No Matches
Namespaces | Classes | Functions | Variables
olb::gpu::cuda Namespace Reference

Implementations of Nvidia CUDA specifics. More...

Namespaces

namespace  device
 Basic wrappers of common CUDA functions.
 
namespace  kernel
 CUDA kernels to execute collisions and post processors.
 

Classes

struct  AnyDeviceFieldArrayD
 Type-erased pointer to FieldArrayD device data. More...
 
class  Cell
 Device-side implementation of the Cell concept for post processors. More...
 
class  Column
 Plain column for CUDA GPU targets. More...
 
class  ConcreteDynamics
 Implementation of gpu::cuda::Dynamics for concrete DYNAMICS. More...
 
class  CyclicColumn
 Virtual memory based cyclic column for usage in ColumnVector. More...
 
class  DataOnlyCell
 Device-side implementation of the data-only Cell concept for collision steps. More...
 
class  DeviceBlockLattice
 Device-side view of a block lattice. More...
 
class  DeviceContext
 Structure for passing pointers to on-device data into CUDA kernels. More...
 
struct  DynamicDispatchCollision
 Last node in a MaskedDynamics chain in kernel::call_operators. More...
 
struct  Dynamics
 Virtual interface for device-side dynamically-dispatched dynamics access. More...
 
struct  DYNAMICS
 On-device field mirroring BlockDynamicsMap. More...
 
struct  FieldArrayPointer
 Host-side version of gpu::cuda::AnyDeviceFieldArrayD. More...
 
class  FieldPtr
 Pointer to row of a D-dimensional field. More...
 
class  ListedCollision
 List-based application of DYNAMICS::apply for use in kernel::call_list_operators. More...
 
struct  ListedPostProcessor
 List-based application of OPERATOR::apply. More...
 
class  ListedPostProcessorWithParameters
 List-based application of OPERATOR::apply with parameters. More...
 
class  MaskedCollision
 Masked application of DYNAMICS::apply for use in kernel::call_operators. More...
 
class  MaskedPostProcessor
 Masked application of OPERATOR::apply. More...
 
struct  maximum_and_plus
 Function object for simulateneously computing maximum and sum in a single thrust::reduce. More...
 
struct  pair
 Plain pair type with single-value constructor for use in gpu::cuda::maximum_and_plus. More...
 
struct  UnmaskedCoupling
 Unrestricted application of COUPLING::apply. More...
 
class  UnmaskedCouplingWithParameters
 Unrestricted application of COUPLING::apply with parameters. More...
 

Functions

template<typename FIELD , typename CONTEXT >
void gather_field (CONTEXT &lattice, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Blocking gather of FIELD at given indices into buffer.
 
template<typename FIELD , typename CONTEXT >
void async_gather_field (cudaStream_t stream, CONTEXT &lattice, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Non-blocking gather of FIELD at given indices into buffer.
 
template<typename FIELD , typename SOURCE , typename TARGET >
void async_copy_field (cudaStream_t stream, SOURCE &sourceLattice, TARGET &targetLattice, const thrust::device_vector< CellID > &sourceIndices, const thrust::device_vector< CellID > &targetIndices)
 Non-blocking copy of FIELD at given indices from sourceLattice to targetLattice.
 
void gather_any_fields (thrust::device_vector< AnyDeviceFieldArrayD > &fields, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Blocking gather of fields at given indices into buffer.
 
void async_gather_any_fields (cudaStream_t stream, thrust::device_vector< AnyDeviceFieldArrayD > &fields, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Non-blocking gather of fields at given indices into buffer.
 
void async_copy_any_fields (cudaStream_t stream, thrust::device_vector< AnyDeviceFieldArrayD > &sourceFields, thrust::device_vector< AnyDeviceFieldArrayD > &targetFields, const thrust::device_vector< CellID > &sourceIndices, const thrust::device_vector< CellID > &targetIndices)
 Non-blocking copy of fields at given indices from sourceIndices to targetIndices.
 
template<typename FIELD , typename CONTEXT >
void scatter_field (CONTEXT &lattice, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Blocking scatter of FIELD data in buffer to given indices.
 
template<typename FIELD , typename CONTEXT >
void async_scatter_field (cudaStream_t stream, CONTEXT &lattice, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Non-blocking scatter of FIELD data in buffer to given indices.
 
void scatter_any_fields (thrust::device_vector< AnyDeviceFieldArrayD > &fields, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Blocking scatter of fields data in buffer to given indices.
 
void async_scatter_any_fields (cudaStream_t stream, thrust::device_vector< AnyDeviceFieldArrayD > &fields, const thrust::device_vector< CellID > &indices, std::uint8_t *buffer)
 Non-blocking scatter of fields data in buffer to given indices.
 
template<typename CONTEXT , typename... ARGS>
void call_operators (CONTEXT &lattice, bool *subdomain, ARGS &&... args)
 Apply masked collision operators to lattice.
 
template<typename CONTEXT , typename... ARGS>
void async_call_operators (cudaStream_t stream, CONTEXT &lattice, bool *subdomain, ARGS &&... args)
 Apply masked collision operators to lattice (async)
 
template<typename CONTEXT , typename... ARGS>
void call_operators_with_statistics (CONTEXT &lattice, bool *subdomain, ARGS &&... args)
 Apply masked collision operators to lattice while tracking statistics.
 
template<typename CONTEXT , typename... ARGS>
void async_call_operators_with_statistics (cudaStream_t stream, CONTEXT &lattice, bool *subdomain, ARGS &&... args)
 Apply masked collision operators to lattice while tracking statistics (async)
 
template<typename CONTEXT , typename... ARGS>
void call_list_operators (CONTEXT &lattice, const gpu::cuda::Column< CellID > &cells, ARGS &&... args)
 Apply operators to listed cell indices.
 
template<typename CONTEXT , typename... ARGS>
void async_call_list_operators (cudaStream_t stream, CONTEXT &lattice, const gpu::cuda::Column< CellID > &cells, ARGS &&... args)
 Apply operators to listed cell indices (async version)
 
template<typename CONTEXT , typename... ARGS>
void async_call_list_operators_with_statistics (cudaStream_t stream, CONTEXT &lattice, const gpu::cuda::Column< CellID > &cells, ARGS &&... args)
 Apply ListedCollision with statistics (async version)
 
template<typename CONTEXT , typename... ARGS>
void call_coupling_operators (CONTEXT &lattices, bool *subdomain, ARGS &&... args)
 Apply coupling on subdomain.
 

Variables

template<typename T , typename DESCRIPTOR , typename... DYNAMICS>
std::function< void(ConcreteBlockLattice< T, DESCRIPTOR, Platform::GPU_CUDA > &) getFusedCollisionO )()
 Helper for constructing fused collision operators.
 
template<typename CONTEXT , typename TYPE >
__constant__ std::size_t field_type_index
 Mapping of TYPE in CONTEXT to runtime-fixed index.
 

Detailed Description

Implementations of Nvidia CUDA specifics.

Function Documentation

◆ async_call_list_operators()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::async_call_list_operators ( cudaStream_t stream,
CONTEXT & lattice,
const gpu::cuda::Column< CellID > & cells,
ARGS &&... args )

Apply operators to listed cell indices (async version)

Definition at line 423 of file operator.hh.

426 {
427 const auto block_size = 32;
428 const auto block_count = (cells.size() + block_size - 1) / block_size;
429 kernel::call_list_operators<<<block_count,block_size,0,stream>>>(
430 lattice,
431 cells.deviceData(), cells.size(),
432 std::forward<decltype(args)>(args)...);
433 device::check();
434}
const T * deviceData() const
Definition column.hh:146
std::size_t size() const
Definition column.hh:128

References olb::gpu::cuda::device::check(), olb::gpu::cuda::Column< T >::deviceData(), and olb::gpu::cuda::Column< T >::size().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ async_call_list_operators_with_statistics()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::async_call_list_operators_with_statistics ( cudaStream_t stream,
CONTEXT & lattice,
const gpu::cuda::Column< CellID > & cells,
ARGS &&... args )

Apply ListedCollision with statistics (async version)

Definition at line 438 of file operator.hh.

441 {
442 const auto block_size = 32;
443 const auto block_count = (cells.size() + block_size - 1) / block_size;
444 kernel::call_list_operators_with_statistics<<<block_count,block_size,0,stream>>>(
445 lattice,
446 cells.deviceData(), cells.size(),
447 std::forward<decltype(args)>(args)...);
448 device::check();
449}

References olb::gpu::cuda::device::check(), olb::gpu::cuda::Column< T >::deviceData(), and olb::gpu::cuda::Column< T >::size().

+ Here is the call graph for this function:

◆ async_call_operators()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::async_call_operators ( cudaStream_t stream,
CONTEXT & lattice,
bool * subdomain,
ARGS &&... args )

Apply masked collision operators to lattice (async)

Definition at line 373 of file operator.hh.

373 {
374 const auto block_size = 32;
375 const auto block_count = (lattice.getNcells() + block_size - 1) / block_size;
376 kernel::call_operators<CONTEXT,ARGS...><<<block_count,block_size,0,stream>>>(
377 lattice, subdomain, std::forward<decltype(args)>(args)...);
378 device::check();
379}

References olb::gpu::cuda::kernel::call_operators(), and olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ async_call_operators_with_statistics()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::async_call_operators_with_statistics ( cudaStream_t stream,
CONTEXT & lattice,
bool * subdomain,
ARGS &&... args )

Apply masked collision operators to lattice while tracking statistics (async)

Definition at line 396 of file operator.hh.

396 {
397 const auto block_size = 32;
398 const auto block_count = (lattice.getNcells() + block_size - 1) / block_size;
399 kernel::call_operators_with_statistics<CONTEXT,ARGS...><<<block_count,block_size,0,stream>>>(
400 lattice, subdomain, std::forward<decltype(args)>(args)...);
401 device::check();
402}

References olb::gpu::cuda::kernel::call_operators_with_statistics(), and olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ async_copy_any_fields()

void olb::gpu::cuda::async_copy_any_fields ( cudaStream_t stream,
thrust::device_vector< AnyDeviceFieldArrayD > & sourceFields,
thrust::device_vector< AnyDeviceFieldArrayD > & targetFields,
const thrust::device_vector< CellID > & sourceIndices,
const thrust::device_vector< CellID > & targetIndices )

Non-blocking copy of fields at given indices from sourceIndices to targetIndices.

Definition at line 276 of file communicator.hh.

280 {
281 const auto block_size = 32;
282 const auto block_count = (sourceIndices.size() + block_size - 1) / block_size;
283 kernel::copy_any_fields<<<block_count,block_size,0,stream>>>(
284 sourceFields.data().get(), targetFields.data().get(), sourceFields.size(),
285 sourceIndices.data().get(), targetIndices.data().get(), sourceIndices.size());
286 device::check();
287}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ async_copy_field()

template<typename FIELD , typename SOURCE , typename TARGET >
void olb::gpu::cuda::async_copy_field ( cudaStream_t stream,
SOURCE & sourceLattice,
TARGET & targetLattice,
const thrust::device_vector< CellID > & sourceIndices,
const thrust::device_vector< CellID > & targetIndices )

Non-blocking copy of FIELD at given indices from sourceLattice to targetLattice.

Definition at line 231 of file communicator.hh.

235 {
236 const auto block_size = 32;
237 const auto block_count = (sourceIndices.size() + block_size - 1) / block_size;
238 kernel::copy_field<SOURCE,TARGET,FIELD><<<block_count,block_size,0,stream>>>(
239 sourceLattice,
240 targetLattice,
241 sourceIndices.data().get(),
242 targetIndices.data().get(),
243 sourceIndices.size());
244 device::check();
245}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ async_gather_any_fields()

void olb::gpu::cuda::async_gather_any_fields ( cudaStream_t stream,
thrust::device_vector< AnyDeviceFieldArrayD > & fields,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Non-blocking gather of fields at given indices into buffer.

Definition at line 262 of file communicator.hh.

265 {
266 const auto block_size = 32;
267 const auto block_count = (indices.size() + block_size - 1) / block_size;
268 kernel::gather_any_fields<<<block_count,block_size,0,stream>>>(
269 fields.data().get(), fields.size(),
270 indices.data().get(), indices.size(),
271 buffer);
272 device::check();
273}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ async_gather_field()

template<typename FIELD , typename CONTEXT >
void olb::gpu::cuda::async_gather_field ( cudaStream_t stream,
CONTEXT & lattice,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Non-blocking gather of FIELD at given indices into buffer.

Definition at line 216 of file communicator.hh.

219 {
220 const auto block_size = 32;
221 const auto block_count = (indices.size() + block_size - 1) / block_size;
222 kernel::gather_field<CONTEXT,FIELD><<<block_count,block_size,0,stream>>>(
223 lattice,
224 indices.data().get(), indices.size(),
225 reinterpret_cast<typename FIELD::template value_type<typename CONTEXT::value_t>*>(buffer));
226 device::check();
227}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ async_scatter_any_fields()

void olb::gpu::cuda::async_scatter_any_fields ( cudaStream_t stream,
thrust::device_vector< AnyDeviceFieldArrayD > & fields,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Non-blocking scatter of fields data in buffer to given indices.

Definition at line 330 of file communicator.hh.

333 {
334 const auto block_size = 32;
335 const auto block_count = (indices.size() + block_size - 1) / block_size;
336 kernel::scatter_any_fields<<<block_count,block_size,0,stream>>>(
337 fields.data().get(), fields.size(),
338 indices.data().get(), indices.size(),
339 buffer);
340 device::check();
341}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ async_scatter_field()

template<typename FIELD , typename CONTEXT >
void olb::gpu::cuda::async_scatter_field ( cudaStream_t stream,
CONTEXT & lattice,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Non-blocking scatter of FIELD data in buffer to given indices.

Definition at line 303 of file communicator.hh.

306 {
307 const auto block_size = 32;
308 const auto block_count = (indices.size() + block_size - 1) / block_size;
309 kernel::scatter_field<CONTEXT,FIELD><<<block_count,block_size,0,stream>>>(
310 lattice,
311 indices.data().get(), indices.size(),
312 reinterpret_cast<typename FIELD::template value_type<typename CONTEXT::value_t>*>(buffer));
313 device::check();
314}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ call_coupling_operators()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::call_coupling_operators ( CONTEXT & lattices,
bool * subdomain,
ARGS &&... args )

Apply coupling on subdomain.

Definition at line 453 of file operator.hh.

453 {
454 const auto nCells = lattices.template get<0>().getNcells();
455 const auto block_size = 32;
456 const auto block_count = (nCells + block_size - 1) / block_size;
457 kernel::call_coupling_operators<CONTEXT,ARGS...><<<block_count,block_size>>>(
458 lattices, subdomain, std::forward<decltype(args)>(args)...);
459 device::check();
460}

References olb::gpu::cuda::kernel::call_coupling_operators(), and olb::gpu::cuda::device::check().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ call_list_operators()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::call_list_operators ( CONTEXT & lattice,
const gpu::cuda::Column< CellID > & cells,
ARGS &&... args )

Apply operators to listed cell indices.

Used to call post processors in ConcreteBlockO with OperatorScope::PerCell

Definition at line 409 of file operator.hh.

411 {
412 const auto block_size = 32;
413 const auto block_count = (cells.size() + block_size - 1) / block_size;
414 kernel::call_list_operators<CONTEXT,ARGS...><<<block_count, block_size>>>(
415 lattice,
416 cells.deviceData(), cells.size(),
417 std::forward<decltype(args)>(args)...);
418 device::check();
419}

References olb::gpu::cuda::kernel::call_list_operators(), olb::gpu::cuda::device::check(), olb::gpu::cuda::Column< T >::deviceData(), and olb::gpu::cuda::Column< T >::size().

+ Here is the call graph for this function:

◆ call_operators()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::call_operators ( CONTEXT & lattice,
bool * subdomain,
ARGS &&... args )

Apply masked collision operators to lattice.

ARGS are instances of MaskedCollision or DynamicDispatchCollision

Definition at line 363 of file operator.hh.

363 {
364 const auto block_size = 32;
365 const auto block_count = (lattice.getNcells() + block_size - 1) / block_size;
366 kernel::call_operators<CONTEXT,ARGS...><<<block_count,block_size>>>(
367 lattice, subdomain, std::forward<decltype(args)>(args)...);
368 device::check();
369}

References olb::gpu::cuda::kernel::call_operators(), and olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ call_operators_with_statistics()

template<typename CONTEXT , typename... ARGS>
void olb::gpu::cuda::call_operators_with_statistics ( CONTEXT & lattice,
bool * subdomain,
ARGS &&... args )

Apply masked collision operators to lattice while tracking statistics.

ARGS are instances of MaskedCollision or DynamicDispatchCollision

Definition at line 386 of file operator.hh.

386 {
387 const auto block_size = 32;
388 const auto block_count = (lattice.getNcells() + block_size - 1) / block_size;
389 kernel::call_operators_with_statistics<CONTEXT,ARGS...><<<block_count,block_size>>>(
390 lattice, subdomain, std::forward<decltype(args)>(args)...);
391 device::check();
392}

References olb::gpu::cuda::kernel::call_operators_with_statistics(), and olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ gather_any_fields()

void olb::gpu::cuda::gather_any_fields ( thrust::device_vector< AnyDeviceFieldArrayD > & fields,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Blocking gather of fields at given indices into buffer.

Definition at line 249 of file communicator.hh.

251 {
252 const auto block_size = 32;
253 const auto block_count = (indices.size() + block_size - 1) / block_size;
254 kernel::gather_any_fields<<<block_count,block_size>>>(
255 fields.data().get(), fields.size(),
256 indices.data().get(), indices.size(),
257 buffer);
258 device::check();
259}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ gather_field()

template<typename FIELD , typename CONTEXT >
void olb::gpu::cuda::gather_field ( CONTEXT & lattice,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Blocking gather of FIELD at given indices into buffer.

Definition at line 204 of file communicator.hh.

204 {
205 const auto block_size = 32;
206 const auto block_count = (indices.size() + block_size - 1) / block_size;
207 kernel::gather_field<CONTEXT,FIELD><<<block_count,block_size>>>(
208 lattice,
209 indices.data().get(), indices.size(),
210 reinterpret_cast<typename FIELD::template value_type<typename CONTEXT::value_t>*>(buffer));
211 device::check();
212}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ scatter_any_fields()

void olb::gpu::cuda::scatter_any_fields ( thrust::device_vector< AnyDeviceFieldArrayD > & fields,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Blocking scatter of fields data in buffer to given indices.

Definition at line 317 of file communicator.hh.

319 {
320 const auto block_size = 32;
321 const auto block_count = (indices.size() + block_size - 1) / block_size;
322 kernel::scatter_any_fields<<<block_count,block_size>>>(
323 fields.data().get(), fields.size(),
324 indices.data().get(), indices.size(),
325 buffer);
326 device::check();
327}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

◆ scatter_field()

template<typename FIELD , typename CONTEXT >
void olb::gpu::cuda::scatter_field ( CONTEXT & lattice,
const thrust::device_vector< CellID > & indices,
std::uint8_t * buffer )

Blocking scatter of FIELD data in buffer to given indices.

Definition at line 291 of file communicator.hh.

291 {
292 const auto block_size = 32;
293 const auto block_count = (indices.size() + block_size - 1) / block_size;
294 kernel::scatter_field<CONTEXT,FIELD><<<block_count,block_size>>>(
295 lattice,
296 indices.data().get(), indices.size(),
297 reinterpret_cast<typename FIELD::template value_type<typename CONTEXT::value_t>*>(buffer));
298 device::check();
299}

References olb::gpu::cuda::device::check().

+ Here is the call graph for this function:

Variable Documentation

◆ field_type_index

template<typename CONTEXT , typename TYPE >
__constant__ std::size_t olb::gpu::cuda::field_type_index

Mapping of TYPE in CONTEXT to runtime-fixed index.

Used for dynamic access to field arrays

Definition at line 40 of file registry.hh.

◆ getFusedCollisionO

template<typename T , typename DESCRIPTOR , typename... DYNAMICS>
std::function< void(ConcreteBlockLattice< T, DESCRIPTOR, Platform::GPU_CUDA > &) olb::gpu::cuda::getFusedCollisionO) () ( )

Helper for constructing fused collision operators.

This is a convenient way for potentially improving performance by injecting application knowledge. E.g. if the lattice contains primarily BGK and BounceBack dynamics this can be declared using:

superLattice.forBlocksOnPlatform<Platform::GPU_CUDA>([](auto& block) {
block.setCollisionO(
});
std::function< void(ConcreteBlockLattice< T, DESCRIPTOR, Platform::GPU_CUDA > &) getFusedCollisionO)()
Helper for constructing fused collision operators.
Definition operator.hh:233
@ GPU_CUDA
Vector CPU (AVX2 / AVX-512 collision)
Dynamics constructed as a tuple of momenta, equilibrium and collision.
Definition interface.h:182

Definition at line 233 of file operator.hh.

233 {
235 bool* subdomain = block.template getData<CollisionSubdomainMask>().deviceData();
236 DeviceContext<T,DESCRIPTOR> lattice(block);
237 if (block.statisticsEnabled()) {
238 call_operators_with_statistics(
239 lattice,
240 subdomain,
242 block.template getData<OperatorParameters<DYNAMICS>>().parameters,
243 block.template getData<DynamicsMask<DYNAMICS>>().deviceData()
244 }...,
245 DynamicDispatchCollision{});
246 } else {
247 call_operators(
248 lattice,
249 subdomain,
250 MaskedCollision<T,DESCRIPTOR,DYNAMICS>{
251 block.template getData<OperatorParameters<DYNAMICS>>().parameters,
252 block.template getData<DynamicsMask<DYNAMICS>>().deviceData()
253 }...,
254 DynamicDispatchCollision{});
255 }
256 };
257}
Implementation of BlockLattice on a concrete PLATFORM.
Structure for passing pointers to on-device data into CUDA kernels.
Definition context.hh:55
Masked application of DYNAMICS::apply for use in kernel::call_operators.
Definition operator.hh:45
Describe mask of DYNAMICS in Data.
Definition data.h:62