|
| 1 | +Tutorial 7: Resolution of an inverse problem |
| 2 | +============================================ |
| 3 | + |
| 4 | +Introduction to the inverse problem |
| 5 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 6 | + |
| 7 | +This tutorial shows how to solve an inverse Poisson problem with |
| 8 | +Physics-Informed Neural Networks. The problem definition is that of a |
| 9 | +Poisson problem with homogeneous boundary conditions and it reads: |
| 10 | +:raw-latex:`\begin{equation} |
| 11 | +\begin{cases} |
| 12 | +\Delta u = e^{-2(x-\mu_1)^2-2(y-\mu_2)^2} \text{ in } \Omega\, ,\\ |
| 13 | +u = 0 \text{ on }\partial \Omega,\\ |
| 14 | +u(\mu_1, \mu_2) = \text{ data} |
| 15 | +\end{cases} |
| 16 | +\end{equation}` where :math:`\Omega` is a square domain |
| 17 | +:math:`[-2, 2] \times [-2, 2]`, and |
| 18 | +:math:`\partial \Omega=\Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4` |
| 19 | +is the union of the boundaries of the domain. |
| 20 | + |
| 21 | +This kind of problem, namely the “inverse problem”, has two main goals: |
| 22 | +- find the solution :math:`u` that satisfies the Poisson equation; - |
| 23 | +find the unknown parameters (:math:`\mu_1`, :math:`\mu_2`) that better |
| 24 | +fit some given data (third equation in the system above). |
| 25 | + |
| 26 | +In order to achieve both the goals we will need to define an |
| 27 | +``InverseProblem`` in PINA. |
| 28 | + |
| 29 | +Let’s start with useful imports. |
| 30 | + |
| 31 | +.. code:: ipython3 |
| 32 | +
|
| 33 | + import matplotlib.pyplot as plt |
| 34 | + import torch |
| 35 | + from pytorch_lightning.callbacks import Callback |
| 36 | + from pina.problem import SpatialProblem, InverseProblem |
| 37 | + from pina.operators import laplacian |
| 38 | + from pina.model import FeedForward |
| 39 | + from pina.equation import Equation, FixedValue |
| 40 | + from pina import Condition, Trainer |
| 41 | + from pina.solvers import PINN |
| 42 | + from pina.geometry import CartesianDomain |
| 43 | +
|
| 44 | +Then, we import the pre-saved data, for (:math:`\mu_1`, |
| 45 | +:math:`\mu_2`)=(:math:`0.5`, :math:`0.5`). These two values are the |
| 46 | +optimal parameters that we want to find through the neural network |
| 47 | +training. In particular, we import the ``input_points``\ (the spatial |
| 48 | +coordinates), and the ``output_points`` (the corresponding :math:`u` |
| 49 | +values evaluated at the ``input_points``). |
| 50 | + |
| 51 | +.. code:: ipython3 |
| 52 | +
|
| 53 | + data_output = torch.load('data/pinn_solution_0.5_0.5').detach() |
| 54 | + data_input = torch.load('data/pts_0.5_0.5') |
| 55 | +
|
| 56 | +Moreover, let’s plot also the data points and the reference solution: |
| 57 | +this is the expected output of the neural network. |
| 58 | + |
| 59 | +.. code:: ipython3 |
| 60 | +
|
| 61 | + points = data_input.extract(['x', 'y']).detach().numpy() |
| 62 | + truth = data_output.detach().numpy() |
| 63 | +
|
| 64 | + plt.scatter(points[:, 0], points[:, 1], c=truth, s=8) |
| 65 | + plt.axis('equal') |
| 66 | + plt.colorbar() |
| 67 | + plt.show() |
| 68 | +
|
| 69 | +
|
| 70 | +
|
| 71 | +.. image:: tutorial_files/output_8_0.png |
| 72 | + |
| 73 | + |
| 74 | +Inverse problem definition in PINA |
| 75 | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 76 | + |
| 77 | +Then, we initialize the Poisson problem, that is inherited from the |
| 78 | +``SpatialProblem`` and from the ``InverseProblem`` classes. We here have |
| 79 | +to define all the variables, and the domain where our unknown parameters |
| 80 | +(:math:`\mu_1`, :math:`\mu_2`) belong. Notice that the laplace equation |
| 81 | +takes as inputs also the unknown variables, that will be treated as |
| 82 | +parameters that the neural network optimizes during the training |
| 83 | +process. |
| 84 | + |
| 85 | +.. code:: ipython3 |
| 86 | +
|
| 87 | + ### Define ranges of variables |
| 88 | + x_min = -2 |
| 89 | + x_max = 2 |
| 90 | + y_min = -2 |
| 91 | + y_max = 2 |
| 92 | +
|
| 93 | + class Poisson(SpatialProblem, InverseProblem): |
| 94 | + ''' |
| 95 | + Problem definition for the Poisson equation. |
| 96 | + ''' |
| 97 | + output_variables = ['u'] |
| 98 | + spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]}) |
| 99 | + # define the ranges for the parameters |
| 100 | + unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]}) |
| 101 | +
|
| 102 | + def laplace_equation(input_, output_, params_): |
| 103 | + ''' |
| 104 | + Laplace equation with a force term. |
| 105 | + ''' |
| 106 | + force_term = torch.exp( |
| 107 | + - 2*(input_.extract(['x']) - params_['mu1'])**2 |
| 108 | + - 2*(input_.extract(['y']) - params_['mu2'])**2) |
| 109 | + delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y']) |
| 110 | +
|
| 111 | + return delta_u - force_term |
| 112 | +
|
| 113 | + # define the conditions for the loss (boundary conditions, equation, data) |
| 114 | + conditions = { |
| 115 | + 'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max], |
| 116 | + 'y': y_max}), |
| 117 | + equation=FixedValue(0.0, components=['u'])), |
| 118 | + 'gamma2': Condition(location=CartesianDomain({'x': [x_min, x_max], 'y': y_min |
| 119 | + }), |
| 120 | + equation=FixedValue(0.0, components=['u'])), |
| 121 | + 'gamma3': Condition(location=CartesianDomain({'x': x_max, 'y': [y_min, y_max] |
| 122 | + }), |
| 123 | + equation=FixedValue(0.0, components=['u'])), |
| 124 | + 'gamma4': Condition(location=CartesianDomain({'x': x_min, 'y': [y_min, y_max] |
| 125 | + }), |
| 126 | + equation=FixedValue(0.0, components=['u'])), |
| 127 | + 'D': Condition(location=CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max] |
| 128 | + }), |
| 129 | + equation=Equation(laplace_equation)), |
| 130 | + 'data': Condition(input_points=data_input.extract(['x', 'y']), output_points=data_output) |
| 131 | + } |
| 132 | +
|
| 133 | + problem = Poisson() |
| 134 | +
|
| 135 | +Then, we define the model of the neural network we want to use. Here we |
| 136 | +used a model which impose hard constrains on the boundary conditions, as |
| 137 | +also done in the Wave tutorial! |
| 138 | + |
| 139 | +.. code:: ipython3 |
| 140 | +
|
| 141 | + model = FeedForward( |
| 142 | + layers=[20, 20, 20], |
| 143 | + func=torch.nn.Softplus, |
| 144 | + output_dimensions=len(problem.output_variables), |
| 145 | + input_dimensions=len(problem.input_variables) |
| 146 | + ) |
| 147 | +
|
| 148 | +After that, we discretize the spatial domain. |
| 149 | + |
| 150 | +.. code:: ipython3 |
| 151 | +
|
| 152 | + problem.discretise_domain(20, 'grid', locations=['D'], variables=['x', 'y']) |
| 153 | + problem.discretise_domain(1000, 'random', locations=['gamma1', 'gamma2', |
| 154 | + 'gamma3', 'gamma4'], variables=['x', 'y']) |
| 155 | +
|
| 156 | +Here, we define a simple callback for the trainer. We use this callback |
| 157 | +to save the parameters predicted by the neural network during the |
| 158 | +training. The parameters are saved every 100 epochs as ``torch`` tensors |
| 159 | +in a specified directory (``tmp_dir`` in our case). The goal is to read |
| 160 | +the saved parameters after training and plot their trend across the |
| 161 | +epochs. |
| 162 | + |
| 163 | +.. code:: ipython3 |
| 164 | +
|
| 165 | + # temporary directory for saving logs of training |
| 166 | + tmp_dir = "tmp_poisson_inverse" |
| 167 | +
|
| 168 | + class SaveParameters(Callback): |
| 169 | + ''' |
| 170 | + Callback to save the parameters of the model every 100 epochs. |
| 171 | + ''' |
| 172 | + def on_train_epoch_end(self, trainer, __): |
| 173 | + if trainer.current_epoch % 100 == 99: |
| 174 | + torch.save(trainer.solver.problem.unknown_parameters, '{}/parameters_epoch{}'.format(tmp_dir, trainer.current_epoch)) |
| 175 | +
|
| 176 | +Then, we define the ``PINN`` object and train the solver using the |
| 177 | +``Trainer``. |
| 178 | + |
| 179 | +.. code:: ipython3 |
| 180 | +
|
| 181 | + ### train the problem with PINN |
| 182 | + max_epochs = 5000 |
| 183 | + pinn = PINN(problem, model, optimizer_kwargs={'lr':0.005}) |
| 184 | + # define the trainer for the solver |
| 185 | + trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=max_epochs, |
| 186 | + default_root_dir=tmp_dir, callbacks=[SaveParameters()]) |
| 187 | + trainer.train() |
| 188 | +
|
| 189 | +One can now see how the parameters vary during the training by reading |
| 190 | +the saved solution and plotting them. The plot shows that the parameters |
| 191 | +stabilize to their true value before reaching the epoch :math:`1000`! |
| 192 | + |
| 193 | +.. code:: ipython3 |
| 194 | +
|
| 195 | + epochs_saved = range(99, max_epochs, 100) |
| 196 | + parameters = torch.empty((int(max_epochs/100), 2)) |
| 197 | + for i, epoch in enumerate(epochs_saved): |
| 198 | + params_torch = torch.load('{}/parameters_epoch{}'.format(tmp_dir, epoch)) |
| 199 | + for e, var in enumerate(pinn.problem.unknown_variables): |
| 200 | + parameters[i, e] = params_torch[var].data |
| 201 | +
|
| 202 | + # Plot parameters |
| 203 | + plt.close() |
| 204 | + plt.plot(epochs_saved, parameters[:, 0], label='mu1', marker='o') |
| 205 | + plt.plot(epochs_saved, parameters[:, 1], label='mu2', marker='s') |
| 206 | + plt.ylim(-1, 1) |
| 207 | + plt.grid() |
| 208 | + plt.legend() |
| 209 | + plt.xlabel('Epoch') |
| 210 | + plt.ylabel('Parameter value') |
| 211 | + plt.show() |
| 212 | +
|
| 213 | +
|
| 214 | +
|
| 215 | +.. image:: tutorial_files/output_21_0.png |
| 216 | + |
| 217 | + |
0 commit comments