HW1: Linear System Theory (ECE532)
Author
thanh nguyentang
Last Updated
9년 전
License
Creative Commons CC BY 4.0
Abstract
HW1: Linear System Theory (ECE532)
HW1: Linear System Theory (ECE532)
\documentclass[a4paper]{article}
\usepackage{fullpage} % Package to use full page
\usepackage{parskip} % Package to tweak paragraph skipping
\usepackage{tikz} % Package for drawing
\usepackage{amsmath}
\usepackage{hyperref}
\usepackage{enumitem}
\title{HW1: Linear System Theory (ECE532)}
\author{Thanh T. Nguyen}
\date{2016/03/10}
\begin{document}
\maketitle
\section*{Problem 1}
\begin{enumerate}[label=(\alph*)]
\item Assume that output $y(t) = h$ (a constant) in the steady state and $A <0$. In the steady state, the state variable $x$ does not depend on time anymore, i.e., $\dot{x(t)} = \frac{d}{dt}x(t) = 0$. Therefore, the state-space equations becomes:
\begin{align}
0 &= Ax+Bu \\
h &= Cx
\end{align}
Therefore, $u(t) = -\frac{A}{B} x(t) = - \frac{A}{CB}h$, which proves the required claim.
\item With the steady state controller $u(t) = - \frac{A}{CB}h$, we can solve the state-space equations by using Laplace transform. Indeed, we substitute $u(t) = - \frac{A}{CB}h$ into the state equation and define $z(t) = x(t) - \frac{h}{C}$, the state equation becomes:
\begin{align}
\dot{z(t)} &= Az(t)
\end{align}
So, by applying Laplace transform and inverse Laplace transform which is also shown as follows, we get:
\begin{align*}
\mathcal{L} [\dot{z(t)}] &= \mathcal{L}[Az(t)]\\
sZ(s) - z(0) &= AZ(s)\\
Z(s) &= \frac{z(0)}{s - A}\\
\mathcal{L}^{-1} [Z(s)] &= \mathcal{L}^{-1} [\frac{z(0)}{s - A}] \\
z(t) &= e^{At}z(0)\\
x(t) - \frac{h}{C} &= e^{At}(x(0) -\frac{h}{C}) \\
x(t) &= \frac{h}{C}(1 - e^{At})\\
\end{align*}.
Thus,
\begin{align}
y(t) &= Cx(t) = h(1 - e^{At})
\end{align}
for $t \geq 0$.
For $A < 0$,
\begin{align*}
\lim_{t \to \infty} y(t) &= \lim_{t \to \infty} h(1 - e^{At})\\
&= h
\end{align*}
\item For $A > 0$,
\begin{align*}
\lim_{t \to \infty} y(t) &= \lim_{t \to \infty} h(1 - e^{At})
\in \{\infty, -\infty, 0\}
\end{align*}
depending on whether $h$ is negative, positive or zero, respectively.
\item Simulation using MATLAB Simulink:
\begin{figure}[!h]
\begin{center}
\includegraphics[width=10cm]{siso_lti_ss.png}
\end{center}
\caption{MATLAB Simulink Configuration for ($A,B,C,h$) = ($-2,1,1,0.5$)}\label{case1}
\end{figure}
\begin{figure}[!h]
\begin{center}
\includegraphics[width=10cm]{plt1.jpg}
\end{center}
\caption{The time response plot for ($A,B,C,h$) = ($-2,1,1,0.5$)}\label{plcase1}
\end{figure}
\begin{figure}[!h]
\begin{center}
\includegraphics[width=10cm]{siso_lti_ss2.png}
\end{center}
\caption{MATLAB Simulink Configuration for ($A,B,C,h$) = ($2,1,1,0.5$)}\label{case2}
\end{figure}
\end{enumerate}
\begin{figure}[!htbp]
\begin{center}
\includegraphics[width=10cm]{plt2.jpg}
\end{center}
\caption{The time response plot for ($A,B,C,h$) = ($2,1,1,0.5$)}\label{plcase2}
\end{figure}
These plots confirm the correctness the results of the output $y(t)$ in the steady state derived in part (b) and (c).
\section*{Problem 2}
\begin{enumerate}[label=(\alph*)]
\item
Given the hard disk drive equations, that is,
\begin{align}
I_{1} \ddot{\theta_1} + b(\dot{\theta_1} - \dot{\theta_2}) + k(\theta_1 - \theta_2) &= M_c + M_D\\
I_{2} \ddot{\theta_2} + b(\dot{\theta_2} - \dot{\theta_1}) + k(\theta_2 - \theta_1) &= 0
\end{align}
we can develop a state equation by choosing $\mathbf{x}(t)=\begin{bmatrix}
\theta_1\\
\dot{\theta_1}\\
\theta_2\\
\dot{\theta_2}
\end{bmatrix}$ as state variables, $\mathbf{u}(t) = \begin{bmatrix}
M_C\\
M_D
\end{bmatrix}$ as input variables and $y = \theta_2$ as output variable. For this choice, the state equation for this system is:
\begin{align*}
\mathbf{\dot{x}} &= \begin{bmatrix}
\dot{\theta_1}\\
\ddot{\theta_1}\\
\dot{\theta_2}\\
\ddot{\theta_2}
\end{bmatrix}
= \begin{bmatrix}
0 & 1 & 0 & 1\\
-\frac{k}{I_1} & -\frac{b}{I_1} & \frac{k}{I_1} & \frac{b}{I_1}\\
0 & 0 & 0 & 1\\
\frac{k}{I_2} & \frac{b}{I_2} & -\frac{k}{I_2} & -\frac{b}{I_2}
\end{bmatrix}
\mathbf{x}
+
\begin{bmatrix}
0 & 0\\
\frac{1}{I_1} & \frac{1}{I_1}\\
0 & 0\\
0 & 0
\end{bmatrix}
\mathbf{u} \\
&= A\mathbf{x} + Bu\\
y &= \begin{bmatrix}
0 & 0 & 1 & 0
\end{bmatrix} \mathbf{x} + 0. \mathbf{u}\\
&=C\mathbf{x}
\end{align*}
where $A = \begin{bmatrix}
0 & 1 & 0 & 1\\
-\frac{k}{I_1} & -\frac{b}{I_1} & \frac{k}{I_1} & \frac{b}{I_1}\\
0 & 0 & 0 & 1\\
\frac{k}{I_2} & \frac{b}{I_2} & -\frac{k}{I_2} & -\frac{b}{I_2}
\end{bmatrix}$,
$B = \begin{bmatrix}
0 & 0\\
\frac{1}{I_1} & \frac{1}{I_1}\\
0 & 0\\
0 & 0
\end{bmatrix}$, and
$C = \begin{bmatrix}
0 & 0 & 1 & 0
\end{bmatrix}$.
\item
For $M_D = 0$, $b=0$, and $\mathbf{y} = \begin{bmatrix}
\theta_1\\
\theta_2
\end{bmatrix}
$ as output variables, let $u = M_C$ as input variable. The state-space equations become
\begin{align*}
\mathbf{\dot{x}} &= A\mathbf{x} + Bu\\
\mathbf{y} &= C\mathbf{x}
\end{align*}
where $A = \begin{bmatrix}
0 & 1 & 0 & 1\\
-\frac{k}{I_1} & 0 & \frac{k}{I_1} & 0\\
0 & 0 & 0 & 1\\
\frac{k}{I_2} & 0 & -\frac{k}{I_2} & 0
\end{bmatrix}$,
$B = \begin{bmatrix}
0\\
\frac{1}{I_1}\\
0\\
0
\end{bmatrix}$,
$C = \begin{bmatrix}
1 & 0 & 0 & 0\\
0 & 0 & 1 & 0
\end{bmatrix}$, and
$u = M_C$.
Taking Laplace transform both sides of the state-space equations gives the transfer function as follows
\begin{align*}
\mathbf{H}(s)
&= \begin{bmatrix}
H_1(s)\\
H_2(s)
\end{bmatrix}\\
&= \begin{bmatrix}
\frac{Y_1(s)}{U(s)}\\
\frac{Y_2(s)}{U(s)}\\
\end{bmatrix}\\
&= C (sI-A)^{-1} B\\
&= \begin{bmatrix}
\frac{(1/I_1) s^2 + k/(I_1 I_2)}{s^4 + (k/I_1) s^2}\\
\\
\frac{k/(I_1 I_2)}{s^4 + (k/I_1) s^2}\\
\end{bmatrix}
\end{align*}
\end{enumerate}
\section*{Problem 3}
Assume that the system is operating about the equilibrium point $(\mathbf{x_0}, \mathbf{u_0}) = (\mathbf{0}, \mathbf{0})$ and the variations of $\mathbf{f}(\mathbf{x}(t), \mathbf{u}(t))$ around the equilibrium point is sufficiently small. Then we can write $\mathbf{x}(t) = \mathbf{x_0} + \delta \mathbf{x}(t)$ and $\mathbf{u}(t) = \mathbf{u_0} + \delta \mathbf{u}(t)$.
Recall the vector equation $\dot{\mathbf{x}}(t) = \mathbf{f}(\mathbf{x}(t),\mathbf{u}(t))$, each equation of which $\dot{x}_i(t) = f_i (\mathbf{x}(t), \mathbf{u}(t))$ can be expanded using Taylor series expansion as
\begin{align}
\frac{d}{dt}(x_{0i} + \delta x_i) &= f_i(\mathbf{x_0} + \delta \mathbf{x}(t), \mathbf{u_0} + \delta\mathbf{u}(t))\\
&\approx f_i(\mathbf{x_0},\mathbf{u_0}) + \left.\frac{\partial f_i} {\partial \mathbf{x}}\right |_{\mathbf{x} = \mathbf{x_0}} \delta \mathbf{x} + \left.\frac{\partial f_i}{\partial \mathbf{u}}\right |_{\mathbf{u} = \mathbf{u_0}} \delta \mathbf{u}
\end{align}
The variations should be small enough for this approximation to hold. Since $\frac{d}{dt} x_{0i} = f_i(\mathbf{x_0},\mathbf{u_0})$, we thus have
\begin{align}
\frac{d}{dt}\delta x_i &\approx \left.\frac{\partial f_i} {\partial \mathbf{x}}\right |_{\mathbf{x} = \mathbf{x_0}} \delta \mathbf{x} + \left.\frac{\partial f_i}{\partial \mathbf{u}}\right |_{\mathbf{u} = \mathbf{u_0}} \delta \mathbf{u}
\end{align}
Combining all $n$ state equations noting that we replace "$\approx$" by "$=$" in (9), gives
\begin{align}
\frac{d}{dt} \delta \mathbf{x}
&=
\begin{bmatrix}
\left.\frac{\partial f_1}{\partial \mathbf{x}} \right |_{\mathbf{x} = \mathbf{x_0}} \\
\left.\frac{\partial f_2}{\partial \mathbf{x}} \right |_{\mathbf{x} = \mathbf{x_0}} \\
\vdots\\
\left.\frac{\partial f_n}{\partial \mathbf{x}} \right |_{\mathbf{x} = \mathbf{x_0}} \\
\end{bmatrix}
\delta \mathbf{x} +
\begin{bmatrix}
\left.\frac{\partial f_1}{\partial \mathbf{u}} \right |_{\mathbf{u} = \mathbf{u_0}} \\
\left.\frac{\partial f_2}{\partial \mathbf{u}} \right |_{\mathbf{u} = \mathbf{u_0}} \\
\vdots\\
\left.\frac{\partial f_n}{\partial \mathbf{u}} \right |_{\mathbf{u} = \mathbf{u_0}} \\
\end{bmatrix}
\delta \mathbf{u}\\
&= A \delta \mathbf{x} + B \delta \mathbf{u}
\end{align}
where
$
A = \left.\begin{bmatrix}
\frac{\partial f_1}{\partial x_1} & \frac{\partial f_1}{\partial x_2} & \cdots & \frac{\partial f_1}{\partial x_n}\\
\frac{\partial f_2}{\partial x_1} & \frac{\partial f_2}{\partial x_2} & \cdots & \frac{\partial f_2}{\partial x_n}\\
& & \vdots & \\
\frac{\partial f_n}{\partial x_1} & \frac{\partial f_n}{\partial x_2} & \cdots & \frac{\partial f_n}{\partial x_n}\\
\end{bmatrix}\right |_{\mathbf{x} = \mathbf{x_0}}
$
and
$
B = \left.\begin{bmatrix}
\frac{\partial f_1}{\partial u_1} & \frac{\partial f_1}{\partial u_2} & \cdots & \frac{\partial f_1}{\partial u_n}\\
\frac{\partial f_2}{\partial u_1} & \frac{\partial f_2}{\partial u_2} & \cdots & \frac{\partial f_2}{\partial u_n}\\
& & \vdots & \\
\frac{\partial f_n}{\partial u_1} & \frac{\partial f_n}{\partial u_2} & \cdots & \frac{\partial f_n}{\partial u_n}\\
\end{bmatrix}\right |_{\mathbf{u} = \mathbf{u_0}}
$.
Since $\mathbf{x}(t) = \mathbf{x_0} + \delta \mathbf{x(t)} = \delta \mathbf{x(t)}$ and $\mathbf{u}(t) = \mathbf{u_0} + \delta \mathbf{u(t)} = \delta \mathbf{u(t)}$, (11) becomes
\begin{align*}
\mathbf{\dot{x}}(t) &= A \mathbf{x}(t) + B \mathbf{u}(t)
\end{align*}
\section*{Problem 4}
\begin{enumerate}[label=(\alph*)]
\item
Choosing
$
\mathbf{x} =
\begin{bmatrix}
x_1 \\ x_2 \\ x_3 \\ x_4
\end{bmatrix} =
\begin{bmatrix}
r \\ \dot{r} \\ \theta \\\dot{\theta}
\end{bmatrix}
$
as state variables, $\mathbf{y} =
\begin{bmatrix}
r\\
\theta
\end{bmatrix}
$ as output variables, and
$
\mathbf{u} =
\begin{bmatrix}
u_r\\
u_{\theta}
\end{bmatrix}
$
as input variables gives the nonlinear state space equation as
\begin{align}
\mathbf{\dot{x}}
&=
\begin{bmatrix}
\dot{r}\\
\ddot{r}\\
\dot{\theta}\\
\ddot{\theta}
\end{bmatrix}
= \mathbf{f}(\mathbf{x},\mathbf{u}) =
\begin{bmatrix}
\dot{r}\\
r \dot{\theta}^2 - k/r^2 + u_r\\
\dot{\theta}\\
-2 \dot{r} \dot{\theta}/r + u_{\theta}/ r
\end{bmatrix}
\end{align}
\item
Let $k = r_0^3 \omega_0 ^2$, we check that $\mathbf{x_0} =
\begin{bmatrix}
r_0\\
0\\
\omega_0 t\\
\omega_0
\end{bmatrix}
$ and
$ \mathbf{u_0} =
\begin{bmatrix}
0\\
0
\end{bmatrix}
$ is one solution to the state space equation (12). Indeed, we can easily see that
$\mathbf{\dot{x}_0} =
\begin{bmatrix}
0\\
0\\
\omega_0\\
0
\end{bmatrix}
$ and
$
\mathbf{f}(\mathbf{x_0},\mathbf{u_0}) =
\begin{bmatrix}
0\\
r_0 \omega_0 ^2 - k/r^2 + 0\\
\omega_0\\
-2 (0) \omega_0 / r_0 + 0
\end{bmatrix} =
\begin{bmatrix}
0\\
0\\
\omega_0\\
0
\end{bmatrix}
$. So, $\mathbf{\dot{x}_0} = \mathbf{f}(\mathbf{x_0}, \mathbf{u_0})$. We now can obtain a linearized system around the point $(\mathbf{x_0}, \mathbf{u_0})$ by using derived equations from Problem 3. That is,
\begin{align*}
\delta \mathbf{\dot{x}} &= A \delta \mathbf{x} + B \delta \mathbf{u}\\
\delta \mathbf{y} &= C \delta \mathbf{x}
\end{align*}
where
\begin{align*}
A &= \left.\begin{bmatrix}
\frac{\partial f_1}{\partial x_1} & \frac{\partial f_1}{\partial x_2} & \cdots & \frac{\partial f_1}{\partial x_4}\\
\frac{\partial f_2}{\partial x_1} & \frac{\partial f_2}{\partial x_2} & \cdots & \frac{\partial f_2}{\partial x_4}\\
& & \vdots & \\
\frac{\partial f_4}{\partial x_1} & \frac{\partial f_4}{\partial x_2} & \cdots & \frac{\partial f_4}{\partial x_4}\\
\end{bmatrix}\right |_{\mathbf{x} = \mathbf{x_0}}
=
\begin{bmatrix}
0 & 0 & 0 & 0\\
3 \omega_0 ^2 & 0 & 0 & 2 r_0 \omega_0\\
0 & 0 & 0 & 0\\
0 & -2 \omega_0 / r_0 & 0 & 0
\end{bmatrix}
\end{align*}
\begin{align*}
B &= \left.\begin{bmatrix}
\frac{\partial f_1}{\partial u_1} & \frac{\partial f_1}{\partial u_2}\\
\frac{\partial f_2}{\partial u_1} & \frac{\partial f_2}{\partial u_2} \\
\frac{\partial f_3}{\partial u_1} & \frac{\partial f_3}{\partial u_2} \\
\frac{\partial f_4}{\partial u_1} & \frac{\partial f_4}{\partial u_2}
\end{bmatrix}\right |_{\mathbf{u} = \mathbf{u_0}} =
\begin{bmatrix}
0 & 0\\
1 & 0 \\
0 & 0 \\
0 & 1/r_0
\end{bmatrix}
\end{align*}
\begin{align*}
C &=
\begin{bmatrix}
1 & 0 & 0 & 0\\
0 & 0 & 1 & 0
\end{bmatrix}
\end{align*}
\end{enumerate}
\section*{Problem 5}
The system in Figure (a) is linear and the system in Figure (b) and (c) are non linear. In Figure (a), $y(t) = f(x(t)) = k x(t)$ for some non-zero $k$ which satisfies additivity and homogeneity properties for a linear system. In Figure (b), $y(t) = f(x(t)) = k x(t) + y_0$ does not satisfy the additivity condition, that is, $f(x_1(t) + x_2(t)) = k(x_1(t)+ x_2(t)) + y_0 \neq f(x_1(t)) + f(x_2(t)) = kx_1(t) + kx_2(t) + 2 y_0$. In Figure (c), the graph is a nonlinear curve. \\
In Figure (b), the system with output $\bar{y}(t) = y(t) - y_0 = g(u(t)) = ku(t)$ is linear.
\section*{Problem 6}
Let $f: u(t) \rightarrow y(t)$ be the transfer function in the time domain and denote indicator operator $1(.)$ whose value is $1$ if its argument is true; otherwise, its value is zero.
\begin{enumerate}[label=(\alph*)]
\item \textbf{Linearity}
\begin{itemize}
\item Additivity
\begin{align*}
f(u_1(t) + u_2(t))
&= 1(t \leq \alpha) (u_1(t) + u_2(t))\\
&= 1(t \leq \alpha) u_1(t) + 1(t \leq \alpha) u_2(t)\\
& = f(u_1(t)) + f(u_2(t))
\end{align*}
for any inputs $u_1(t)$ and $u_2(t)$.
\item
Homogeneity
\begin{align*}
f(ku(t))
&= 1(t \leq \alpha) ku(t)\\
&= k 1(t \leq \alpha) u(t)\\
&= k f(u(t))
\end{align*}
for any constant $k$ and input $u(t)$.
\end{itemize}
Therefore, the system is \textbf{linear}.
\item \textbf{Time-Invariance}
Consider input $u(t) = 1$ , $0 < T < \alpha$, and $y(t) = f(u(t)) = 1(t \leq \alpha )$. We thus have $y(t-T) = 1(t - T \leq \alpha ) = 1(t \leq \alpha + T )$. In the other hand, $f(u(t-T)) = f(1) = 1(t\leq \alpha))$. Since $f(u(t-T)) \neq y(t-T)$, the system is \textbf{time-variant}.
\item \textbf{Causality}
The output does not depend on future inputs, so the system is \textbf{causal}.
\end{enumerate}
\section*{Problem 7}
Consider the following network
\begin{figure}[!htbp]
\begin{center}
\includegraphics[width=10cm]{fig_p7.png}
\end{center}
\caption{The circuit network}\label{p7_1}
\end{figure}
Applying Kirchhoff's current law at node A yields $C_2 \dot{x}_2 = x_3$, at node B yields
$\frac{u-x_1}{R} = C_1 \dot{x}_1 + C_2 \dot{x}_2 = C_1 \dot{x}_1 + x_3 $. We thus have
\begin{align*}
\dot{x}_1 &= x_1 \frac{-1}{R C_1} + x_3 \frac{-1}{C_1} + \frac{u}{R C_1}\\
\dot{x}_2 &= x_3 \frac{1}{C_2}
\end{align*}
Applying Kirchhoff's voltage law to the right-hand-side loop yields $x_1 - x_2 = L \dot{x}_3$, or
\begin{align*}
y &= L x_3 = x_1 - x_2
\end{align*}
Choosing
$ \mathbf{x} =
\begin{bmatrix}
x_1\\
x_2\\
x_3
\end{bmatrix}
$ as state variables,
$u$ as input variable, and
$y$ as output variable gives the state space equations for the system
\begin{align*}
\mathbf{\dot{x}} &=
\begin{bmatrix}
-1/RC_1 & 0 & -1/C_1\\
0 & 0 & 1/C_2\\
1/L & -1/L & 0
\end{bmatrix}
\mathbf{x} +
\begin{bmatrix}
1/RC_1 \\
0\\
0
\end{bmatrix}
u\\
y &=
\begin{bmatrix}
1 & -1 & 0
\end{bmatrix}
\mathbf{x} + 0.u
\end{align*}
Assume zero initial state values and take Laplace transform both sides of the state space equations, we have
\begin{align*}
s\mathbf{X}(s) &= A\mathbf{X}(s) + BU(s)\\
Y(s) &= C\mathbf{X}(s)
\end{align*}
Therefore, the transfer function is
\begin{align*}
H(s) &= \frac{Y(s)}{U(s)}\\
&= \frac{C\mathbf{X}(s)}{U(s)}\\
&= C(sI-A)^{-1} B\\
&= \frac{-\frac{1}{R C_1} s^2}{ s^3 + \frac{1}{R C_1} s^2 + \frac{1}{L}(\frac{1}{C_1} + \frac{1}{C_2}) s + \frac{1}{C_1 C_2 L R}}
\end{align*}
\section*{Problem 8}
Consider the discrete-time system represented by the difference equation
\begin{align*}
y(k+3) + 2 y(k+2) + 3 y(k+1) + y(k) &= u(k)
\end{align*}
Choosing $\mathbf{x}(k) =
\begin{bmatrix}
y(k+2)\\
y(k+1)\\
y(k)
\end{bmatrix}
$ as state variables, $u(k)$ as input variable, and y(k) as output variable gives the following state space equations
\begin{align*}
\mathbf{x}(k+1)
&= \begin{bmatrix}
y(k+3)\\
y(k+2)\\
y(k+1)
\end{bmatrix}\\
&= \begin{bmatrix}
-2 & -3 & -1\\
1 & 0 & 0\\
0 & 1 & 0
\end{bmatrix}
\mathbf{x}(k) +
\begin{bmatrix}
1\\
0\\
0
\end{bmatrix}
u(k)\\
y(k) &= \begin{bmatrix}
0 & 0 & 1
\end{bmatrix}
\mathbf{x}(k)
\end{align*}
or
\begin{align*}
\mathbf{x}(k+1) &= A \mathbf{x}(k) + B u(k)\\
y(k) &= C \mathbf{x}(k) + D u(k)
\end{align*}
where
$A = \begin{bmatrix}
-2 & -3 & -1\\
1 & 0 & 0\\
0 & 1 & 0
\end{bmatrix}$, $B =\begin{bmatrix}
1\\
0\\
0
\end{bmatrix} $, $C = \begin{bmatrix}
0 & 0 & 1
\end{bmatrix}$, and $D = 0$.
The transfer function can be obtained by directly applying Z-transform to both sides of the difference equation
\begin{align*}
Y(z) z^3 + 2 Y(z) z^2 + 3 Y(z) z + Y(z) &= U(z)\\
\end{align*}
So, the transfer function is
\begin{align*}
H(z) &= \frac{Y(z)}{U(z)}\\
&= \frac{1}{z^3 + 2 z^2 + 3 z + 1}
\end{align*}
\section*{Problem 9}
\begin{enumerate}[label=(\alph*)]
\item
Consider the transfer function
$$\hat{g}(s) = \frac{Y(s)}{U(s)} = \frac{k \omega_n ^2}{s^2 + 2 \xi \omega_n s + \omega_n ^2}$$
Taking inverse Laplace transform both sides of the transfer function gives
\begin{align*}
\ddot{y} + 2 \xi \omega_n \dot{y} + \omega_n ^2 y = k \omega_n ^2 u
\end{align*}
By choosing $\mathbf{x} = \begin{bmatrix}
y\\
\dot{y}
\end{bmatrix}$ as state variables,
$u$ as input variable and $y$ as output variable, we have
\begin{align*}
\mathbf{\dot{x}}
&= \begin{bmatrix}
\dot{y}\\
\ddot{y}
\end{bmatrix}\\
&= \begin{bmatrix}
0 & 1\\
-\omega_n ^2 & -2 \xi \omega_n
\end{bmatrix}
\mathbf{x} +
\begin{bmatrix}
0 \\
k \omega_n ^2
\end{bmatrix}
u\\
y &= \begin{bmatrix}
1 & 0
\end{bmatrix}
\mathbf{x} + 0. u
\end{align*}
\item
With the transfer function,
$$\hat{g}(s) = \frac{Y(s)}{U(s)} = \frac{s + a}{s^2 + 2 \xi \omega_n s + \omega_n ^2}$$
the differential equation becomes
\begin{align*}
\ddot{y} + 2 \xi \omega_n \dot{y} + \omega_n ^2 y &= \dot{u} + a u
\end{align*}
Now, choose $\mathbf{x} = \begin{bmatrix}
y\\
\dot{y}\\
u\\
\end{bmatrix}$ as state variables, $\mathbf{u} =
\begin{bmatrix}
u\\
\dot{u}
\end{bmatrix}
$ as input variable, and $y$ as output variables. We thus have
\begin{align*}
\mathbf{\dot{x}}
&= \begin{bmatrix}
\dot{y}\\
\ddot{y}\\
\dot{u}\\
\end{bmatrix}\\
&= \begin{bmatrix}
0 & 1 & 0\\
-\omega_n ^2 & -2 \xi \omega_n & a\\
0 & 0 & 0
\end{bmatrix}
\mathbf{x} +
\begin{bmatrix}
0 & 0\\
0 & 1\\
0 & 1
\end{bmatrix}
\mathbf{u}\\
y &= \begin{bmatrix}
1 & 0 & 0
\end{bmatrix}
\mathbf{x} + 0. \mathbf{u}
\end{align*}
\end{enumerate}
\section*{Problem 10}
First, choose $\mathbf{x} =
\begin{bmatrix}
y_1\\
\dot{y_1}\\
y_2
\end{bmatrix}$
as state variables, $\mathbf{u} =
\begin{bmatrix}
u_1\\
u_2
\end{bmatrix}
$ as input variables, and $\mathbf{y} =
\begin{bmatrix}
y_1\\
y_2
\end{bmatrix}
$ as output variables. The state-space equation of the system is
\begin{align*}
\mathbf{\dot{x}}
&= \begin{bmatrix}
\dot{y_1}\\
\ddot{y_1}\\
\dot{y_2}
\end{bmatrix}\\
& = \begin{bmatrix}
0 & 1 & 0\\
-k_2 & -k_1 & 0\\
0 & -k_5 & -k_4
\end{bmatrix}
\mathbf{x} +
\begin{bmatrix}
0 & 0\\
1 & k_3\\
k_6 & 0
\end{bmatrix}
\mathbf{u}\\
y &= \begin{bmatrix}
1 & 0 & 0\\
0 & 0 & 1
\end{bmatrix}
\mathbf{x} + 0. \mathbf{u}
\end{align*}
\bibliographystyle{plain}
%\bibliography{bibliography.bib}
\end{document}