Sharedlatex-tests / long-presentation.texOpen in CoCalc
\documentclass{beamer}

\mode<presentation> {
  \usetheme{Madrid}%CambridgeUS}%CambridgeUS}%Hannover}%default}%AnnArbor}%Goettingen}%Antibes}%CambridgeUS} %Madrid} %Frankfurt}%Warsaw}
   \setbeamercolor*{palette primary}{use=structure,fg=white,bg=mycolor1}
  \setbeamercolor*{palette tertiary}{use=structure,fg=white,bg=mycolor3}
  \setbeamercolor*{palette secondary}{use=structure,fg=white,bg=mycolor2}
  \setbeamercolor{item projected}{bg=mycolor1}
 \addtobeamertemplate{theorem begin}{%
  \setbeamercolor{block title}{fg=white,bg=mycolor1}%
  \setbeamercolor{block body}{fg=black,bg=mycolor4}%
}{}
%  \setbeamercolor{itemize item}{fg=mycolor1}
%  \setbeamercolor{enumerate item}{fg=mycolor1}
%\setbeamercovered{transparent}
}
%  \setbeamercolor*{palette tertiary}{use=structure,fg=white,bg=green}
\definecolor{mycolor1}{RGB}{51, 0, 102}
\definecolor{mycolor2}{RGB}{51, 0, 90}
\definecolor{mycolor3}{RGB}{51, 0, 80}
\definecolor{mycolor4}{RGB}{255,250,250}%{127,255,212}

\usepackage{amsmath,amssymb,comment}
\usepackage{amsfonts}
\usepackage{makeidx}
\usepackage{xcolor}
\usepackage{epsfig}
\usepackage{mathrsfs}
% \documentclass{article}
\newcommand{\bigo}[1]{O\left( #1 \right) }
\newtheorem{conjecture}{Conjecture}
\newtheorem{mydef}{Definition}
\newtheorem{thm}{Theorem}
\newtheorem{cor}{Corollary}
\newcommand{\nnn}{\nonumber}
\newcommand{\OO}{\mathcal{O}}
\newcommand{\vv}{\textbf{v}}
\newcommand{\x}{\textbf{x}}
\newcommand{\xx}{\boldsymbol{\xi}}
\newcommand{\w}{\textbf{w}}
\def\l{\left}
\def\r{\right}
\renewcommand{\b}{\vec{b}}
\renewcommand{\v}{\vec{v}}
\newcommand{\f}{\textbf{f}}

\title[Perturbation Theory]{Introduction to Ordinary Differential Equations}
\author[Emily Weymier]{Emily Weymier}
\institute[SFA]{\begin{tabular}{l} Department of Mathematics $\&$ Statistics \\Stephen F. Austin State University, Nacogdoches, TX
\end{tabular}}
\date{September 22, 2017}

\begin{document}

\frame{\titlepage}

\begin{frame}{Outline}
\begin{enumerate}
\item What is a differential equation? \vfill
\item Initial Value Problems %\vfill
\begin{itemize}
    \item Linear first order differential equations
    \item Second order differential equations
    \item Recasting high order differential equations as a system of first order differential equations
    \end{itemize} \vfill
\item Boundary Value Problems \vfill
\item Solution techniques for nonlinear differential equations %\vfill
    \begin{itemize}
    \item Power series solutions
    \item Perturbation theory concept
    \end{itemize} \vfill
\item Concluding Remarks \vfill
\end{enumerate}
\end{frame}

\section{What is a differential equation?}

\begin{frame}{Differential Equations: The Basics}
\begin{itemize}
\item Ordinary differential equations are used to model change over an independent variable (for our purposes it will usually be $t$ for time) without using partial derivatives. \pause
\vfill
\item Differential equations contain three types of variables: an independent variable, at least one dependent variable (these will be functions of the independent variable), and the parameters.\pause
\vfill
\item ODE's can contain multiple iterations of derivatives. They are named accordingly (i.e. if there are only first derivatives, then the ODE is called a first order ODE).\pause
\vfill
%\item 
%\vfill
\end{itemize}
\end{frame}

\begin{frame}{A Simple Example: Population Modeling}

Population growth is commonly modeled with differential equations. In the following equation: $t=$ time, $P=$ population and $k=$ proportionality constant. $k$ represents the constant ratio between the growth rate of the population and the size of the population. 
\begin{eqnarray*}
\frac{dP}{dt} = kP
\end{eqnarray*}

In this particular equation, the left hand side represents the growth rate of the population being proportional to the size of the population $P$. This is a very simple example of a first order, ordinary differential equation. The equation only contains first order derivatives and there are no partial derivatives. %I think it is slipping my mind right now, but is there a word for non partial derivatives? That is probably a stupid question but I am blanking on it right now. 
\end{frame}



\section{Initial Value Problems}

\begin{frame}{Initial Value Problems}

An initial value problem consists of a differential equation and an initial condition. So, going back to the population example, the following is an example of an initial value problem:

\begin{eqnarray*}
\frac{dP}{dt} = kP, P(0)=P_0
\end{eqnarray*}

The solution to this set of equations is a function, call it $P(t)$, that satisfies both equations. 

\end{frame}

\begin{frame} {Linear First Order Differential Equations}
\begin{itemize}

\item The standard form for a first-order differential equation is
\begin{eqnarray*}
\frac{dy}{dt} = f(t,y)
\end{eqnarray*}
where the right hand side represents the function $f$ that depends on the independent variable, $t$, and the dependent variable, $y$.

\end{itemize}
\end{frame}



\begin{frame}{General Solutions to a Differential Equation}

Let's look at a simple example and walk through the steps of finding a general solution to the following equation
\begin{eqnarray*}
\frac{dy}{dt} = (ty)^2
\end{eqnarray*}
\vfill
We will simply "separate" {\color{red}write as ``separate''} the variables then integrate the both sides of the equation to find the general solution.

\begin{eqnarray*}
\frac{dy}{dt} &=& t^2 y^2
\\ \frac{1}{y^2} \,dy &=& t^2 \, dt
\\ \int \frac{1}{y^2} \,dy &=& \int t^2 \, dt
\end{eqnarray*}

\end{frame}

\begin{frame}

\begin{eqnarray*}
\\ - y^{-1} &=& \frac{t^3}{3} + c
\\ -\frac{1}{y} &=& \frac{t^3}{3} + c
\\ y &=& -\frac{1}{\frac{t^3}{3} + c}
\\ \Rightarrow y(t) &=& -\frac{3}{t^3 + c_1}
\end{eqnarray*}
where $c_1$ is any real number.
\end{frame}

\section{Solving Initial Value Problems}

\begin{frame}{Linear First Order Differential Equations}

Initial value problems consist of a differential equation and an initial value. We will work through the example below:

\begin{eqnarray*}
\frac{dx}{dt} = -xt;~~~~~ x(0) = \frac{1}{\sqrt{\pi}}
\end{eqnarray*}

First we will need to find the general solution to $\frac{dx}{dt} = -xt$, then use the initial value $x(0)=\frac{1}{\sqrt{\pi}}$ to solve for $c$. Since we do not know what $x(t)$ is, we will need to "separate" the equation before integrating. 
\begin{eqnarray*}
\frac{dx}{dt} &=& -x t
\\ -\frac{1}{x} \,dx &=& t \,dt % "separate" the variables
\\ \int -\frac{1}{x} \,dx &=& \int t \,dt % integrate both sides with respect to the appropriate variable
\end{eqnarray*}

\end{frame}



\begin{frame}{Linear First Order Differential Equations Continued}
\begin{eqnarray*}
-\ln{x} &=& \frac{t^2}{2} + c 
\\ x &=& e^{-(\frac{t^2}{2} + c)}
\\ x &=& e^{-(\frac{t^2}{2})} e^{-c}
\\ x &=& k e^{-\frac{t^2}{2}}
\end{eqnarray*}

The above function of $t$ is the general solution to $\frac{dx}{dt} = -xt$ where $k$ is some constant. Since we have the initial value $x(0) = \frac{1}{\sqrt{\pi}}$, we can solve for $k$.
\end{frame}

\begin{frame}{Solving Initial Value Problems}

Thus we can see that the solution to the initial value problem 
\begin{eqnarray*}
\frac{dx}{dt} = -xt; x(0) = \frac{1}{\sqrt{\pi}}
\end{eqnarray*} 
is 

$$x(0) = \frac{1}{\sqrt{\pi}} = k e^{-\frac{0^2}{2}}$$

$$x(t) = \frac{1}{\sqrt{\pi}} \, e^{-\frac{t^2}{2}}$$ %is this even right? OK I think I got it this time.

\end{frame}

\begin{frame}

Let's verify that this solution is correct. We will need to show 
\begin{eqnarray*}
\frac{dx}{dt} &=& x'(t) = f(t, x(t)) %I don't feel like this is the correct notation.??
\\\frac{dx}{dt} &=& \frac{d}{dt}\bigg(\frac{1}{\sqrt{\pi}} e^{-\frac{t^2}{2}} \bigg) = \frac{1}{\sqrt{\pi}} e^{-\frac{t^2}{2}}
\\ &\Rightarrow& \frac{d}{dt}\bigg(\frac{1}{\sqrt{\pi}} e^{-\frac{t^2}{2}} \bigg) = -\frac{1}{\sqrt{\pi}} e^{-\frac{t^2}{2}}
\end{eqnarray*}

\end{frame}


\begin{frame}{Second Order Differential Equations}
Second order differential equations simply have a second derivative of the dependent variable. The following is a common example that models a simple harmonic oscillator:

\begin{eqnarray*}
\frac{d^2y}{dt^2} + \frac{k}{m} y = 0
\end{eqnarray*}
where $m$ and $k$ are determined by the mass and spring involved. This second order differential equation can be rewritten as the following first order differential equation:

\begin{eqnarray*}
\frac{dv}{dt} = -\frac{k}{m} y
\end{eqnarray*}
where $v$ denotes velocity.
\end{frame}

\begin{frame}{Second Order Differential Equations Continued}

Referring back to some calculus knowledge, if $v(t)$ is velocity, then $v=\frac{dy}{dt}$. Thus, we can substitute in $\frac{dv}{dt}$ into our second order differential equation and essentially turn it into a first order differential equation.

\begin{eqnarray*}
\frac{d^2y}{dt^2} = -\frac{k}{m} y \Leftrightarrow \frac{dv}{dt} = -\frac{k}{m} y
\end{eqnarray*}

Now we have the following system of first order differential equations to describe the original second order differential equation:

\begin{eqnarray*}
\frac{dy}{dt} &=& v
\\ \frac{dv}{dt} &=& -\frac{k}{m} y
\end{eqnarray*}
\end{frame}

\begin{frame}{Second Order Differential Equations Continued}
Consider the following initial value problem:

$$\frac{d^2y}{dt^2} + y = 0$$

with $y(0) = 0$ and $y'(0) = v(0) = 1$. Let's show that $y(t) = \sin(t)$ is a solution. Let $v=\frac{dy}{dt}$, then we have the following system:

\begin{eqnarray*}
\frac{dy}{dt} &=& v
\\ \frac{dv}{dt} &=& -y
\end{eqnarray*}

\end{frame}



\begin{frame}{Second Order Differential Equations Continued}

\begin{eqnarray*}
\frac{dy}{dt} &=& \frac{d}{dt} \sin(t) = \cos(t) = v
\\ \frac{dv}{dt} &=& -\sin(t) = -y
\\ \Rightarrow \frac{d^2y}{dt^2} &=& -\sin(t)
\\ \Rightarrow \frac{d^2y}{dt^2} + y &=& \frac{d^2 (\sin(t))}{dt^2} + \sin(t)
\\ &=& -\sin(t) + \sin(t) = 0
\end{eqnarray*}

\end{frame}



\begin{frame}{High Order Differential Equations as a System}



\end{frame}

\section{Boundary Value Problems}

\begin{frame}{Boundary Value Problems: The Basics}

\end{frame}

\section{Solution Techniques for Nonlinear Differential Equations}
\begin{frame}{Power Series Solutions}
To demonstrate how to use power series to solve a nonlinear differential equation we will look at Hermite's Equation:

\begin{eqnarray*}
\frac{d^2y}{dt^2} - 2t \frac{dy}{dt} + 2 py = 0
\end{eqnarray*}
We will use the following power series and its first and second derivatives to make a guess:

\begin{eqnarray}
\label{function} y(t) &=& a_0 + a_1 t + a_2 t^2 + a_3 t^3 + ... = \sum_{n=0}^{\infty} a_n t^n
\\ \label{der} \frac{dy}{dt} &=& a_1 + 2a_2 t + 3a_3 t^2 + 4a_4 t^3 + ... = \sum_{n=1}^{\infty} n a_n t^{n-1}
\\ \label{der2} \frac{d^2y}{dt^2} &=& 2 a_2 + 6 a_3 t + 12 a_4 t^2 + ... = \sum_{n=2}^{\infty} n(n-1) a_n t^{n-2}
\end{eqnarray}

\end{frame}


\begin{frame}

From the previous equations we can conclude that 

\begin{eqnarray*}
y(0) &=& a_0
\\ y'(0) &=& a_1
\end{eqnarray*}

Next we will substitute (\ref{function}), (\ref{der}) and (\ref{der2}) into Hermite's Equation and collect like terms.

\begin{eqnarray*}
\frac{d^2y}{dt^2} - 2t \frac{dy}{dt} + 2 py = 0 = (2 a_2 + 6 a_3 t + 12 a_4 t^2 + ...) 
\\  - 2t (a_1 + 2a_2 t + 3a_3 t^2 + 4a_4 t^3 + ...) 
\\  +2p (a_0 + a_1 t + a_2 t^2 + a_3 t^3 + ...)
\\ \Rightarrow (2pa_0 + 2a_2) + (2pa_1 - 2a_1 + 6a_3)t + 
\\ (2pa_2 - 4a_2 + 12a_4)t^2 + (2pa_3 - 6a_3 + 20a_5)t^3 = 0
\end{eqnarray*}

\end{frame}



\begin{frame}
Then from here, we will set all coefficients equal to $0$ since the equation is equal to $0$ and $t \neq 0$. %is that true? 
We get the following sequence of equations:

\begin{eqnarray*}
2pa_0 + 2a_2 &=& 0
\\ 2pa_1 - 2a_1 + 6a_3 &=& 0
\\ 2pa_2 - 4a_2 + 12a_4 &=& 0
\\ 2pa_3 - 6a_3 + 20a_5 &=& 0
\end{eqnarray*}

\end{frame}



\begin{frame}

Then will several substitutions we arrive at the following set of equations:

\begin{eqnarray*}
\\ \Rightarrow a_2 &=& -pa_0
\\ a_3 &=& - \frac{p-1}{3} a_1
\\ a_4 &=& -\frac{p-2}{6} a_2 = \frac{(p-2)p}{6} a_0
\\ a_5 &=& -\frac{p-3}{10} a_3 = \frac{(p-3)(p-1)}{30} a_1
\end{eqnarray*}

\end{frame}




\begin{frame}{Perturbation Theory Concept}
Perturbation theory is used when a mathematical equation involves a small perturbation, usually $\epsilon$. From here we create $y(x)$ such that it is an expansion in terms of $\epsilon$. For example 
\begin{eqnarray*}
y(x) = y_0(x) + \epsilon y_1(x) + \epsilon^2 y_2(x)+\cdots 
\end{eqnarray*}
This summation is called a perturbation series and it has a nice feature that allows each $y_i$ to be solved using the previous $y_i$'s.
Consider the equation,
\begin{eqnarray}
\label{ex1} x^2+x+6\epsilon &=& 0, \hspace{.5cm} \epsilon \ll 1
\end{eqnarray}

\noindent Let's consider using perturbation theory to determine approximations for the roots of Equation (\ref{ex1}).


\end{frame}


\begin{frame}{Perturbation Theory Concept Continued}

Notice this equation is a perturbation of $x^2+x=0$. Let $x(\epsilon) = \sum_{n=0}^{\infty} a_n \epsilon^n$. This series will be substituted into (\ref{ex1}) and powers of $\epsilon$ will be collected. %We wish for the equation to be true for any $\epsilon \ll 1$.
Next we will calculate the first term of the series by setting $\epsilon =0$ in (\ref{ex1}). So the leading order equation is

\begin{eqnarray}
\label{LOE} a_0^2 + a_0 = 0
\end{eqnarray}

\noindent with solutions $x = -1, 0$. Thus $x(0) = a_0 = -1, 0$. 
Now the perturbation series are as follows

\begin{eqnarray}
&=& 1 - a_1 \epsilon - a_2 \epsilon^2 - a_1 \epsilon + a_1^2 \epsilon^2 + a_1 a_2 \epsilon^3 - a_2 \epsilon^2 + a_1 a_2 \epsilon^3 + a_2^2 \epsilon^4 - 1 + a_1 \epsilon + a_2 \epsilon^2 + 6 \epsilon
%\\ = 1 - 2 a_1 \epsilon - 2 a_2 \epsilon^2 + a_1^2 \epsilon^2 + 2 a_1 a_2 \epsilon^3 + a_2^2 \epsilon^4 - 1 + a_1 \epsilon + a_2 \epsilon^2 + 6 \epsilon %combining like terms
\\ &=& (1-1) + (-2 a_1 + a_1 + 6)\epsilon + (-2 a_2 + a_1^2 +a_2)\epsilon^2 + \mathcal{O}(\epsilon^3) %factoring out epsilons.
\end{eqnarray}

\end{frame}



\begin{frame}{Perturbation Theory Concept Continued}

\begin{eqnarray}
\\\label{root1} x_1(\epsilon) = -1 + a_1 \epsilon + a_2 \epsilon^2 + \mathcal{O}(\epsilon^3)
\end{eqnarray} 
and 
\begin{eqnarray}
\label{root2} x_2(\epsilon) &=& 0 + b_1 \epsilon + b_2 \epsilon^2 + \mathcal{O}(\epsilon^3)
\end{eqnarray}

Next, we will substitute in (\ref{root1}) into (\ref{ex1}) while ignoring powers of $\epsilon$ greater than $2$. Since we are only approximating the solution to the second-order, we can disregard the powers of $\epsilon$ greater than $2$.

\begin{align*}
x^2 + x + 6 \epsilon &= (-1 + a_1 \epsilon + a_2 \epsilon^2)^2 + (-1 + a_1 \epsilon + a_2 \epsilon^2) + 6 \epsilon
\\ &\Rightarrow (- a_1 + 6)\epsilon + (- a_2 + a_1^2)\epsilon^2 + \mathcal{O}(\epsilon^3)
\end{align*}

\end{frame}


\begin{frame}{Perturbation Theory Concept Continued}

From here we take the coefficient of each power of $\epsilon$ and set it equal to zero. This step is justified because (\ref{ex1}) is equal to zero and $\epsilon \neq 0$ so each coefficient must be equal to zero. Thus we have the following equations
\begin{align*}
\mathcal{O}(\epsilon^1) &: -a_1 + 6 = 0
\\ \mathcal{O}(\epsilon^2) &: a_1^2 - a_2 = 0
\end{align*}
These equations will be solved sequentially. The results are $a_1 = 6$ and $a_2 = 36$. Thus the perturbation expansion for the root $x_1 = -1$ is: 

$$x_1(\epsilon) = -1 + 6 \epsilon + 36 \epsilon^2 + \mathcal{O}(\epsilon^3)$$



The same process can be repeated for $x_2$ with the perturbation expansion for the root $x_2 = 0$ resulting in 

$$x_2(\epsilon) =  -6 \epsilon - 36 \epsilon^2 + \mathcal{O}(\epsilon^3)$$
\end{frame}


%Here's the tex and work for the above:

%Now, we will plug equation (\ref{root2}) into (\ref{ex1}) while disregarding powers of $\epsilon$ greater than $2$. This will create the perturbation expansion for the root $x_2 = 0$.

%\begin{align*}
%x^2 + x + 6 \epsilon &= (0 + b_1 \epsilon + b_2 \epsilon^2)^2 + (0 + b_1 \epsilon + b_2 \epsilon^2) + 6 \epsilon
% &= b_1^2 \epsilon^2 + 2 b_1 b_2 \epsilon^3 + b_2^2 \epsilon^4 + b_1 \epsilon + b_2 \epsilon^2 + 6 \epsilon
%\\ &= (b_1 + 6) \epsilon + (b_1^2 + b_2) \epsilon^2 + \mathcal{O}(\epsilon^3)
%\end{align*}
%From here we take the coefficient of each power of $\epsilon$ and set it equal to zero. This step is justified because (\ref{ex1}) is equal to zero and $\epsilon \neq 0$ so each coefficient must be equal to zero. Thus we have the following equations

%\begin{align*}
%\mathcal{O}(\epsilon^1) &: b_1 + 6 = 0
%\\ \mathcal{O}(\epsilon^2) &: b_1^2 + b_2 = 0
%\end{align*}

%These equations will be solved sequentially. The results are

%\begin{align*}
%b_1 &= -6
%\\ b_1^2 + b_2 &= 0
%\\ (-6)^2 + b_2 &= 0
%\\ b_2 &= -36
%\end{align*}

%Thus the perturbation expansion for the root $x_2 = 0$ is 

%$$x_2(\epsilon) =  -6 \epsilon - 36 \epsilon^2 + \mathcal{O}(\epsilon^3)$$





%\begin{frame}{Numerical Experiments}
%\begin{itemize}
%\item Example 1 - Test convergence rate, computational efficiency, and accuracy with known exact solution\vfill\pause
%\item Example 2 - Test convergence rate, computational efficiency, and accuracy with nonlinear model\vfill\pause
%\item Example 3 - convergence rate, computational efficiency of three-species model\vfill
%\end{itemize}
%\end{frame}



\begin{frame}{Concluding Remarks}


\end{frame}

%\begin{frame}{Efficiency}
%\begin{minipage}{1.0\textwidth}
%\begin{figure}[h]
%\begin{center}
%\includegraphics[scale=.3]{Example1CompTime.pdf}
%\caption{A log-log plot of the computational time, in seconds, versus $N$ after $1000$ iterations.  The temporal step is held constant, $\tau = 10^{-6}$, while $h=1/(N-1)$.  A linear least squares approximates the slope of the line to be $1.654628$. This indicates that the computational time is scales as $N^{1.654628}$.  The computational time of an efficient scheme should scale no slower than $N^2$.  This indicates that the proposed nonlinear splitting scheme is highly efficient.}
%\end{center}
%\end{figure}
%\end{minipage}
%\begin{minipage}{1.0\textwidth}
%\begin{itemize}
%\item Let $\tau = 10^{-6}$ and consider the computational time for $1000$ temporal steps for $N=21,~31, \ldots, 401$.\vfill\pause
%\item The computational time scales as $N^{1.654628}$\vfill
%\end{itemize}
%\end{minipage}
%\end{frame}

%\begin{frame}{Example 2 - Invasive Species Model}

%\end{frame}

%\begin{frame}{Nonlinear Splitting Algorithm}

%\end{frame}

%\begin{frame}{All in the details}


%\end{frame}

%\begin{frame}{Concluding Remarks}

%\begin{itemize}
%\item In summary:
%   \begin{enumerate}
%    \item \textbf{Positivity} of numerical solution guaranteed.
%    \vfill
%    \item \textbf{Nonlinear} stability and convergence were shown.
%    \vfill
%    \item \textbf{Computationally efficient} design $\rightarrow$ Computational Time scales less than $N^2$.
%    \end{enumerate}
%    \vfill
%\end{itemize}
%\end{frame}

\begin{frame}{Questions?}


\end{frame}

\end{document}