-
orestis.malaspin authoredorestis.malaspin authored
tensor_field.c 4.28 KiB
#include "globals.h"
#include "tensor_field.h"
#include <math.h>
#include <stdlib.h>
#include <assert.h>
bool tensor_field_consistant_spatial_size_tensor_field(tensor_field lhs, tensor_field rhs) {
return (rhs.nx == lhs.nx && rhs.ny == lhs.ny && rhs.nz == lhs.nz);
}
bool tensor_field_consistant_size_tensor_field(tensor_field lhs, tensor_field rhs) {
return (rhs.nx == lhs.nx && rhs.ny == lhs.ny && rhs.nz == lhs.nz && rhs.q == lhs.q);
}
bool tensor_field_consistant_size_scalar_field(tensor_field lhs, scalar_field rhs) {
return (rhs.nx == lhs.nx && rhs.ny == lhs.ny && rhs.nz == lhs.nz);
}
tensor_field tensor_field_allocate(fint nx, fint ny, fint nz, fint q) {
tensor_field field;
field.nx = nx;
field.ny = ny;
field.nz = nz;
field.q = q;
field.dim = 3;
field.data = malloc(sizeof(T) * nx * ny * nz * q);
field.grid = malloc(sizeof(T ***) * nx);
for (fint ix = 0; ix < nx; ++ix) {
field.grid[ix] = malloc(sizeof(T **) * ny);
for (fint iy = 0; iy < ny; ++iy) {
field.grid[ix][iy] = malloc(sizeof(T *) * nz);
for (fint iz = 0; iz < nz; ++iz) {
field.grid[ix][iy][iz] = field.data + q * (iz + nz * (iy + ny * ix));
}
}
}
return field;
}
tensor_field tensor_field_allocate_and_init(fint nx, fint ny, fint nz, fint q, T ini[q]) {
tensor_field tensor = tensor_field_allocate(nx, ny, nz, q);
for (fint ix = 0; ix < nx; ++ix) {
for (fint iy = 0; iy < ny; ++iy) {
for (fint iz = 0; iz < nz; ++iz) {
for (fint iq = 0; iq < q; ++iq) {
tensor.grid[ix][iy][iz][iq] = ini[iq];
}
}
}
}
return tensor;
}
tensor_field tensor_field_allocate_and_init_from_scalar(fint nx, fint ny, fint nz, fint q, T ini) {
tensor_field tensor = tensor_field_allocate(nx, ny, nz, q);
for (fint ix = 0; ix < nx; ++ix) {
for (fint iy = 0; iy < ny; ++iy) {
for (fint iz = 0; iz < nz; ++iz) {
for (fint iq = 0; iq < q; ++iq) {
tensor.grid[ix][iy][iz][iq] = ini;
}
}
}
}
return tensor;
}
void tensor_field_deallocate(tensor_field field) {
free(field.data);
for (fint ix = 0; ix < field.nx; ++ix) {
for (fint iy = 0; iy < field.ny; ++iy) {
free(field.grid[ix][iy]);
}
free(field.grid[ix]);
}
free(field.grid);
}
void tensor_field_from_futhark_f32_4d_inplace(futhark_4d *f, struct futhark_context *ctx, tensor_field field) {
#ifndef NDEBUG
const int64_t *shape = futhark_shape_4d(ctx, f);
fint nx = shape[0];
fint ny = shape[1];
fint nz = shape[2];
fint q = shape[3];
#endif
assert(nx == field.nx);
assert(ny == field.ny);
assert(nz == field.nz);
assert(q == field.q);
futhark_values_4d(ctx, f, field.data);
}
void tensor_field_compute_norm_inplace(tensor_field field, scalar_field norm) {
assert(norm.nx == field.nx);
assert(norm.ny == field.ny);
assert(norm.nz == field.nz);
for (int ix = 0; ix < field.nx; ++ix) {
for (int iy = 0; iy < field.ny; ++iy) {
for (int iz = 0; iz < field.nz; ++iz) {
T n = 0.0;
for (int id = 0; id < field.q; ++id) {
n += field.grid[ix][iy][iz][id] * field.grid[ix][iy][iz][id];
}
norm.grid[ix][iy][iz] = sqrt(n);
}
}
}
}
tensor_field tensor_field_from_futhark_f32_4d(futhark_4d *f, struct futhark_context *ctx) {
const int64_t *shape = futhark_shape_4d(ctx, f);
fint nx = shape[0];
fint ny = shape[1];
fint nz = shape[2];
fint q = shape[3];
tensor_field field = tensor_field_allocate(nx, ny, nz, q);
tensor_field_from_futhark_f32_4d_inplace(f, ctx, field);
return field;
}
futhark_4d *tensor_field_to_futhark_f32_4d(tensor_field field, struct futhark_context *ctx) {
futhark_4d *f = futhark_new_4d(ctx, field.data, field.nx, field.ny, field.nz, field.q);
assert(f != NULL);
return f;
}
scalar_field tensor_field_compute_norm(tensor_field field) {
scalar_field norm = scalar_field_allocate(field.nx, field.ny, field.nz);
tensor_field_compute_norm_inplace(field, norm);
return norm;
}