-
Notifications
You must be signed in to change notification settings - Fork 1
/
multiphysicsL7B.tex
161 lines (161 loc) · 4.55 KB
/
multiphysicsL7B.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
%
% Copyright © 2014 Peeter Joot. All Rights Reserved.
% Licenced as described in the file LICENSE under the root directory of this GIT repository.
%
\section{Summary of factorization costs.}
\index{LU decomposition}
\paragraph{LU (dense)}
\begin{itemize}
\item cost: \( O(n^3) \)
\item cost depends only on size
\end{itemize}
\paragraph{LU (sparse)}
\begin{itemize}
\item cost: Diagonal and tridiagonal are \( O(n) \), but can be up to \( O(n^3) \) depending on sparsity and the method of dealing with the sparsity.
\item cost depends on size and sparsity
\end{itemize}
%
Computation can be affordable up to a few million elements.
\paragraph{Iterative methods}
Can be cheap if done right. Convergence requires careful preconditioning.
\section{Iterative methods.}
Given an initial guess of \( \Bx_0 \), any iterative methods are generally of the form
%
\makealgorithm{Iterative methods.}{alg:multiphysicsL7B:1}{
\REPEAT
\STATE \(\Br = \Bb - M \Bx_i\)
\UNTIL{\(\Norm{\Br} < \epsilon \).}
}
%
The difference \( \Br \) is called the residual. For as long as it is bigger than desired, continue improving the estimate \( \Bx_i \).
%
The matrix vector product \( M \Bx_i \), if dense, is of \( O(n^2) \). Suppose, for example, that the solution can be found in ten iterations. If the matrix is dense, performance can be of \( 10 \, O(n^2) \). If sparse, this can be worse than just direct computation.
\section{Gradient method.}
This is a method for iterative solution of the equation \( M \Bx = \Bb \).
%
This requires symmetric positive definite matrix \( M = M^\T \), with \( M > 0 \), for which an \textAndIndex{energy function} is defined
%
\begin{equation}\label{eqn:multiphysicsL7:60}
\Psi(\By) \equiv \inv{2} \By^\T M \By - \By^\T \Bb.
\end{equation}
For a two variable system this is illustrated in \cref{fig:lecture7:lecture7Fig1}.
\imageFigure{../figures/ece1254-multiphysics/lecture7Fig1}{Positive definite energy function.}{fig:lecture7:lecture7Fig1}{0.3}
%
\index{energy function}
\maketheorem{Energy function minimum.}{thm:multiphysicsL7:460}{
The energy function \cref{eqn:multiphysicsL7:60} has a minimum at
\begin{equation}\label{eqn:multiphysicsL7:80}
\By = M^{-1} \Bb = \Bx.
\end{equation}
%
To prove this, consider the coordinate representation
\begin{equation}\label{eqn:multiphysicsL7:480}
\Psi = \inv{2} y_a M_{ab} y_b - y_b b_b,
\end{equation}
for which the derivatives are
\begin{equation}\label{eqn:multiphysicsL7:500}
\begin{aligned}
\PD{y_i}{\Psi} &=
\inv{2} M_{ib} y_b
+
\inv{2} y_a M_{ai}
- b_i
\\ &=
\lr{ M \By - \Bb }_i.
\end{aligned}
\end{equation}
%
The last operation above was possible because \( M = M^\T \). Setting all of these equal to zero, and rewriting this as a matrix relation gives
\begin{equation}\label{eqn:multiphysicsL7:520}
M \By = \Bb,
\end{equation}
as asserted.
}
%
This is called the gradient method because the gradient moves a point along the path of steepest descent towards the minimum if it exists.
%
The method is
\begin{equation}\label{eqn:multiphysicsL7:100}
\Bx^{(k+1)} = \Bx^{(k)} +
\mathLabelBox
{ \alpha_k }{step size}
\mathLabelBox
[
labelstyle={below of=m\themathLableNode, below of=m\themathLableNode}
]
{
\Bd^{(k)}
}{direction},
\end{equation}
where the direction is
\begin{equation}\label{eqn:multiphysicsL7:120}
\begin{aligned}
\Bd^{(k)}
&= - \spacegrad \Phi
\\ &= \Bb - M \Bx^{(k)}
\\ &= r^{(k)}.
\end{aligned}
\end{equation}
\paragraph{Optimal step size}
\index{step size}
%
The next iteration expansion of the energy function \( \Phi \lr{ \Bx^{(k+1)} } \) is
\begin{equation}\label{eqn:multiphysicsL7:140}
\begin{aligned}
\Phi \lr{ \Bx^{(k+1)} }
&= \Phi\lr{ \Bx^{(k)} + \alpha_k \Bd^{(k)} }
\\ &=
\inv{2}
\lr{ \Bx^{(k)} + \alpha_k \Bd^{(k)} }^\T
M
\lr{ \Bx^{(k)} + \alpha_k \Bd^{(k)} }
-
\lr{ \Bx^{(k)} + \alpha_k \Bd^{(k)} }^\T \Bb.
\end{aligned}
\end{equation}
%
To find the minimum, the derivative of both sides with respect to \( \alpha_k \) is required
\begin{equation}\label{eqn:multiphysicsL7:540}
0 =
\inv{2}
\lr{ \Bd^{(k)} }^\T
M
\Bx^{(k)}
+
\inv{2}
\lr{ \Bx^{(k)} }^\T
M
\Bd^{(k)}
+
\alpha_k \lr{ \Bd^{(k)} }^\T
M
\Bd^{(k)}
-
\lr{ \Bd^{(k)} }^\T \Bb.
\end{equation}
Because \( M \) is symmetric, this is
\begin{equation}\label{eqn:multiphysicsL7:560}
\alpha_k \lr{ \Bd^{(k)} }^\T
M
\Bd^{(k)}
=
\lr{ \Bd^{(k)} }^\T \lr{ \Bb - M \Bx^{(k)}}
=
\lr{ \Bd^{(k)} }^\T r^{(k)},
\end{equation}
or
\begin{equation}\label{eqn:multiphysicsL7:160}
\alpha_k
= \frac{
\lr{\Br^{(k)}}^\T
\Br^{(k)}
}{
\lr{\Br^{(k)}}^\T
M
\Br^{(k)}
}
\end{equation}
%
This method is not optimal when a new direction is picked each step of the iteration.
%\EndArticle
%\EndNoBibArticle