Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New thermal diffusivity #7

Open
wants to merge 15 commits into
base: newThermalDiffusivity
Choose a base branch
from
2 changes: 2 additions & 0 deletions python-wrapper/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
CONDA_ENV_PATH=/opt/anaconda/miniconda3/envs/test_cython/lib/
python setup.py build_ext --inplace --bitpit-path=/home/jc.giret/bitpit/ --madeleine-path=/home/jc.giret/SOURCES/madeleine_parallel/build/src/ --extensions-source=coupling.pyx --metis-path=/home/jc.giret/.local/lib/ --lapack-path=$CONDA_ENV_PATH --mpi-include-path=$CONDA_ENV_PATH --petsc-path=$CONDA_ENV_PATH
2 changes: 2 additions & 0 deletions python-wrapper/env.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
export LD_LIBRARY_PATH=/home/jc.giret/bitpit/lib/:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/home/jc.giret/SOURCES/madeleine_parallel/build/src/:$LD_LIBRARY_PATH
143 changes: 90 additions & 53 deletions python-wrapper/gems_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,44 @@
# initial documentation
# :author: Francois Gallard
# OTHER AUTHORS - MACROSCOPIC CHANGES
from __future__ import print_function
from numpy import ones
import numpy as np
from petsc4py import PETSc
import logging

from gems.core.discipline import MDODiscipline
from gems.parallel.api import create_execution_context, create_user_partition
from gems.parallel.core.mpi_manager import get_world_comm
from gems_mpi_plugins.api import create_execution_context, create_user_partition
from gems_mpi_plugins.api import configure_logger
from gems_mpi_plugins.core.mpi_manager import MPIManager
import coupling

LOGGER = logging.getLogger(__file__)

class ToySphereDiscipline(MDODiscipline):

def __init__(self, name, inputs, outputs, mesh_file,
neutral_mesh_file, sphere_radius=1.0, sphere_neutral_radius=1.0,
sphere_thickness=0.001, is_inner_sphere=True,
source_intensity=1.0, source_direction=None,
n_cpus=1, kernel=1):
PETSC_DETERMINE = PETSc.DETERMINE


class ToySphereDiscipline(MDODiscipline):
def __init__(
self,
name,
inputs,
outputs,
mesh_file,
neutral_mesh_file,
sphere_radius=1.0,
sphere_neutral_radius=1.0,
sphere_thickness=0.001,
is_inner_sphere=True,
source_intensity=10.0,
source_direction=None,
thermalDiffusivityCoefficient=0.5,
emissivity=0.5,
infinityTemperature=400.0,
n_cpus=1,
kernel=1,
):

if source_direction is None:
source_direction = [1.0, 0.0, 0.0]
Expand All @@ -37,7 +58,7 @@ def __init__(self, name, inputs, outputs, mesh_file,

super(ToySphereDiscipline, self).__init__(name)

self.input_grammar.initialize_from_data_names([inputs[0], 'r'])
self.input_grammar.initialize_from_data_names([inputs[0], "r"])
self.output_grammar.initialize_from_data_names([outputs[0]])

# Creation of the execution context and register
Expand All @@ -49,55 +70,65 @@ def __init__(self, name, inputs, outputs, mesh_file,
local_size = 0
all_sizes = None
if self.execution_context.is_alive():
cell_indices_per_rank = coupling.Py_computeMeshFilePartitioning(neutral_mesh_file,
comm)
cell_indices_per_rank = coupling.Py_computeMeshFilePartitioning(
neutral_mesh_file.encode(), comm
)
local_indices = [i for i in cell_indices_per_rank if i == comm.rank]
local_size = len(local_indices)
all_sizes = comm.allgather(local_size)

# Broadcast all_sizes to all the ranks of comm_world
comm_world = get_world_comm()
comm_world = MPIManager().main_comm
root = self.execution_context.comm_world_ranks[0]
all_sizes = comm_world.bcast(all_sizes, root=root)

obj_variables = [0] * n_cpus
obj_variables[0] = 1

variables_sizes = {inputs[0]: all_sizes,
outputs[0]: all_sizes,
"r": obj_variables}
variables_sizes = {
inputs[0]: all_sizes,
outputs[0]: all_sizes,
"r": obj_variables,
}

partition = create_user_partition(self.execution_context,
variables_sizes)
partition = create_user_partition(self.execution_context, variables_sizes)

# Init the discipline only if the ranks belong to the discipline
# subcommunicator
partition.initialize_default_inputs(1.)

partition.initialize_default_inputs(1.0)

if self.execution_context.is_alive():
if comm.rank == 0:
self.default_inputs['r'] = ones(1)*sphere_radius
self.default_inputs["r"] = ones(1) * sphere_radius

if self.execution_context.is_alive():
self.mesh_coupling = coupling.Py_MeshCoupling(inputs,
outputs,
name,
comm)

self.mesh_coupling.initialize(mesh_file,
neutral_mesh_file,
sphere_radius,
sphere_neutral_radius,
sphere_thickness,
is_inner_sphere,
source_intensity,
source_direction,
cell_indices_per_rank,
self.kernel)
inputs_encode = [inp.encode() for inp in inputs]
outputs_encode = [out.encode() for out in outputs]
self.mesh_coupling = coupling.Py_MeshCoupling(
inputs_encode, outputs_encode, name.encode(), comm
)

self.mesh_coupling.initialize(
mesh_file.encode(),
neutral_mesh_file.encode(),
sphere_radius,
sphere_neutral_radius,
sphere_thickness,
is_inner_sphere,
source_intensity,
source_direction,
thermalDiffusivityCoefficient,
emissivity,
infinityTemperature,
cell_indices_per_rank,
self.kernel,
)

def _run(self):
LOGGER.info("Running discipline")
input_vector = self.local_data[self.inputs[0]]
r = self.local_data['r']
r = self.local_data["r"]

comm = self.execution_context.comm
r = comm.bcast(r, root=0)
Expand All @@ -106,13 +137,15 @@ def _run(self):
self.mesh_coupling.compute(input_vector, r, r)
output = {self.outputs[0]: input_vector}
self.store_local_data(**output)
LOGGER.info("End running discipline")

def close(self):
self.mesh_coupling.close()

def _compute_jacobian(self, inputs=None, outputs=None):
inputs = self.get_input_data_names()
outputs = self.get_output_data_names()

inputs = list(self.get_input_data_names())
outputs = list(self.get_output_data_names())

# Compute global number of neutral cell
nofNeutralLocalCells = self.mesh_coupling.getNeutralMeshSize()
Expand All @@ -121,39 +154,43 @@ def _compute_jacobian(self, inputs=None, outputs=None):

# Initialize Jacobian matrices
self._init_jacobian(with_zeros=True)
self.jac[outputs[0]] = {}
# mat = PETSc.Mat().createAIJ(nofNeutralGlobalCells,nofNeutralGlobalCells,comm=comm) #Non-preallocated, malloc at each setVelue
matInputs = PETSc.Mat().create(comm=comm)
matInputs.setSizes(((nofNeutralLocalCells, PETSC_DETERMINE), (nofNeutralLocalCells, PETSC_DETERMINE)))
matInputs.setSizes(
(
(nofNeutralLocalCells, PETSC_DETERMINE),
(nofNeutralLocalCells, PETSC_DETERMINE),
)
)
matInputs.setType("dense")
matInputs.setUp()

for i in range(nofNeutralLocalCells):
rowId,colIds,values = self.mesh_coupling.extractOutputInputJacobianRow(i)
rowId, colIds, values = self.mesh_coupling.extractOutputInputJacobianRow(i)
rowIds = [rowId]
#CAVEAT: petsc4py accepts only int32 by default. bitpit indices are long integers. Cast is possible but very large meshes are not feasible
matInputs.setValues(rowIds,colIds,values, addv=1)
# CAVEAT: petsc4py accepts only int32 by default. bitpit indices are long integers. Cast is possible but very large meshes are not feasible
matInputs.setValues(rowIds, colIds, values)

matInputs.assemblyBegin()
matInputs.assemblyEnd()
self.jac[outpus[0]][inputs[0]] = matInputs
viewer = PETSc.Viewer().createASCII("Jac.dat",mode=1,comm=comm)
viewer.view(self.jac[outpus[0]][inputs[0]])
self.jac[outputs[0]][inputs[0]] = matInputs

viewer = PETSc.Viewer().createASCII("CouplingJac.dat", mode=1, comm=comm)
viewer.view(self.jac[outputs[0]][inputs[0]])

matControl = PETSc.Mat().create(comm=comm)
matControl.setSizes(((nofNeutralLocalCells, PETSC_DETERMINE), (1, 1)))
matControl.setType("dense")
matControl.setUp()

for i in range(nofNeutralLocalCells):
rowId,colIds,values = self.mesh_coupling.extractOutputControlJacobianRow(i)
rowId, colIds, values = self.mesh_coupling.extractOutputControlJacobianRow(
i
)
rowIds = [rowId]
#CAVEAT: petsc4py accepts only int32 by default. bitpit indices are long integers. Cast is possible but very large meshes are not feasible
mat.setValues(rowIds,colIds,values, addv=1)
colIds = [0]
# CAVEAT: petsc4py accepts only int32 by default. bitpit indices are long integers. Cast is possible but very large meshes are not feasible
matControl.setValues(rowIds, colIds, values, addv=1)

matControl.assemblyBegin()
matControl.assemblyEnd()
self.jac[outputs[0]]['r'] = matControl

viewer = PETSc.Viewer().createASCII("RadiusJac.dat",mode=1,comm=comm)
viewer.view(self.jac[outputs[0]]['r'])
self.jac[outputs[0]]["r"] = matControl
103 changes: 74 additions & 29 deletions python-wrapper/test_chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,46 +7,91 @@
# initial documentation
# :author: Francois Gallard
# OTHER AUTHORS - MACROSCOPIC CHANGES
from __future__ import print_function, unicode_literals

import unittest
from numpy import ones
from math import pi
from numpy import ones, full, mean
from numpy.random import random

from gems.parallel.core.mpi_manager import MPIManager, get_world_comm
from gems.parallel.core.parallel_chain import MDOParallelMPIChain
from gems_mpi_plugins.core.mpi_manager import MPIManager

from gems_mpi_plugins.core.parallel_chain import MDOParallelMPIChain
from gems_wrapper import ToySphereDiscipline

COMM = get_world_comm()
COMM = MPIManager().main_comm
SIZE = MPIManager().main_comm.size


def test_basic():
MPIManager().clear(1)

mesh_file = "../examples/data/unitSphere5.stl"
neutral_mesh_file = "../examples/data/unitSphere4.stl"

toy_inner = ToySphereDiscipline(
"Sphere1",
["T_out"],
["T_in"],
mesh_file,
neutral_mesh_file,
sphere_radius=1.0,
n_cpus=1,
is_inner_sphere=True,
source_intensity=1.0,
source_direction=None,
thermalDiffusivityCoefficient=0.0,
emissivity=1e-6,
infinityTemperature=350.0,
)

toy_outer = ToySphereDiscipline(
"Sphere1",
["T_in"],
["T_out"],
mesh_file,
neutral_mesh_file,
sphere_radius=1.0,
n_cpus=1,
is_inner_sphere=False,
source_intensity=30.0,
source_direction=None,
thermalDiffusivityCoefficient=0.25,
emissivity=0.1,
infinityTemperature=400.0,
)

class TestGEMSWrapper(unittest.TestCase):
mesh_file = "../examples/data/unitSphere5.stl"
neutral_mesh_file = "../examples/data/unitSphere4.stl"

def test_basic(self):
MPIManager().clear(2)
chain = MDOParallelMPIChain([toy_outer, toy_inner])
default_inputs = chain.default_inputs

mesh_file = "../examples/data/unitSphere5.stl"
neutral_mesh_file = "../examples/data/unitSphere4.stl"
inputs = None
if chain.execution_context.is_rank_on_mpi_group():
r = full(1, 10.0)
neutral_size = default_inputs["T_in"].shape[0]
t_array = full(neutral_size, 300.0) + random(neutral_size) * 10.0
t_out = {"T_out": t_array}
inputs = {"r": r}
inputs.update(t_out)

toy1 = ToySphereDiscipline("Sphere1", ["Forces"], ["Pressure"], mesh_file,
neutral_mesh_file, sphere_radius=1.0,
n_cpus=2)
out = chain.execute(inputs)

toy2 = ToySphereDiscipline("Sphere1", ["Pressure"], ["Forces"], mesh_file,
neutral_mesh_file, sphere_radius=1.0,
n_cpus=2)
for i in range(50):
out = chain.execute(out)

chain = MDOParallelMPIChain([toy1, toy2])
default_inputs = chain.default_inputs
inputs = None
if chain.execution_context.is_rank_on_mpi_group():
r = ones(1)
size_forces = default_inputs['Forces'].shape[0]
forces = ones(size_forces)*10.
inputs = {'r': r, 'Forces': forces}
if chain.execution_context.is_rank_on_mpi_group():
target = 500.0
obj = 0.5 * mean((out["T_in"] - target) ** 2)
obj2 = 0.5 * mean((out["T_in"] - target) ** 2) * 4 * pi * r[0] ** 2
print("Obj is", obj, obj2)

out = chain.execute(inputs)
# COMM.Barrier()
# if toy_inner.execution_context.is_rank_on_mpi_group():
# toy_inner.close()
# if toy_outer.execution_context.is_rank_on_mpi_group():
# toy_outer.close()

if chain.execution_context.is_rank_on_mpi_group():
print out

if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
test_basic()
Loading