-
Notifications
You must be signed in to change notification settings - Fork 1
/
1_quantum.tex
851 lines (664 loc) · 30.3 KB
/
1_quantum.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
\documentclass{tufte-book}
\hypersetup{colorlinks}% uncomment this line if you prefer colored hyperlinks (e.g., for onscreen viewing)
%%
% Book metadata
\title{Physics I - Quantum Mechanics}
\author{Ragnamus}
%%
% If they're installed, use Bergamo and Chantilly from www.fontsite.com.
% They're clones of Bembo and Gill Sans, respectively.
%\IfFileExists{bergamo.sty}{\usepackage[osf]{bergamo}}{}% Bembo
%\IfFileExists{chantill.sty}{\usepackage{chantill}}{}% Gill Sans
%\usepackage{microtype}
%%
% For nicely typeset tabular material
\usepackage{booktabs}
%%
% For graphics / images
\usepackage{graphicx}
\setkeys{Gin}{width=\linewidth,totalheight=\textheight,keepaspectratio}
\graphicspath{{res/}}
% The fancyvrb package lets us customize the formatting of verbatim
% environments. We use a slightly smaller font.
\usepackage{fancyvrb}
\fvset{fontsize=\normalsize}
%%
% Prints argument within hanging parentheses (i.e., parentheses that take
% up no horizontal space). Useful in tabular environments.
\newcommand{\hangp}[1]{\makebox[0pt][r]{(}#1\makebox[0pt][l]{)}}
%%
% Prints an asterisk that takes up no horizontal space.
% Useful in tabular environments.
\newcommand{\hangstar}{\makebox[0pt][l]{*}}
%%
% Prints a trailing space in a smart way.
\usepackage{xspace}
%%
% Math packages
\usepackage{braket}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{mathtools}
%%
% Some shortcuts for Tufte's book titles. The lowercase commands will
% produce the initials of the book title in italics. The all-caps commands
% will print out the full title of the book in italics.
\newcommand{\vdqi}{\textit{VDQI}\xspace}
\newcommand{\ei}{\textit{EI}\xspace}
\newcommand{\ve}{\textit{VE}\xspace}
\newcommand{\be}{\textit{BE}\xspace}
\newcommand{\VDQI}{\textit{The Visual Display of Quantitative Information}\xspace}
\newcommand{\EI}{\textit{Envisioning Information}\xspace}
\newcommand{\VE}{\textit{Visual Explanations}\xspace}
\newcommand{\BE}{\textit{Beautiful Evidence}\xspace}
\newcommand{\TL}{Tufte-\LaTeX\xspace}
% Prints the month name (e.g., January) and the year (e.g., 2008)
\newcommand{\monthyear}{%
\ifcase\month\or January\or February\or March\or April\or May\or June\or
July\or August\or September\or October\or November\or
December\fi\space\number\year
}
% Prints an epigraph and speaker in sans serif, all-caps type.
\newcommand{\openepigraph}[2]{%
%\sffamily\fontsize{14}{16}\selectfont
\begin{fullwidth}
\sffamily\large
\begin{doublespace}
\noindent\allcaps{#1}\\% epigraph
\noindent\allcaps{#2}% author
\end{doublespace}
\end{fullwidth}
}
% Inserts a blank page
\newcommand{\blankpage}{\newpage\hbox{}\thispagestyle{empty}\newpage}
\usepackage{units}
% Typesets the font size, leading, and measure in the form of 10/12x26 pc.
\newcommand{\measure}[3]{#1/#2$\times$\unit[#3]{pc}}
% Macros for typesetting the documentation
\newcommand{\hlred}[1]{\textcolor{Maroon}{#1}}% prints in red
\newcommand{\hangleft}[1]{\makebox[0pt][r]{#1}}
\newcommand{\hairsp}{\hspace{1pt}}% hair space
\newcommand{\hquad}{\hskip0.5em\relax}% half quad space
\newcommand{\TODO}{\textcolor{red}{\bf TODO!}\xspace}
\newcommand{\na}{\quad--}% used in tables for N/A cells
\providecommand{\XeLaTeX}{X\lower.5ex\hbox{\kern-0.15em\reflectbox{E}}\kern-0.1em\LaTeX}
\newcommand{\tXeLaTeX}{\XeLaTeX\index{XeLaTeX@\protect\XeLaTeX}}
% \index{\texttt{\textbackslash xyz}@\hangleft{\texttt{\textbackslash}}\texttt{xyz}}
\newcommand{\tuftebs}{\symbol{'134}}% a backslash in tt type in OT1/T1
\newcommand{\doccmdnoindex}[2][]{\texttt{\tuftebs#2}}% command name -- adds backslash automatically (and doesn't add cmd to the index)
\newcommand{\doccmddef}[2][]{%
\hlred{\texttt{\tuftebs#2}}\label{cmd:#2}%
\ifthenelse{\isempty{#1}}%
{% add the command to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2}}% command name
}%
{% add the command and package to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2} (\texttt{#1} package)}% command name
\index{#1 package@\texttt{#1} package}\index{packages!#1@\texttt{#1}}% package name
}%
}% command name -- adds backslash automatically
\newcommand{\doccmd}[2][]{%
\texttt{\tuftebs#2}%
\ifthenelse{\isempty{#1}}%
{% add the command to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2}}% command name
}%
{% add the command and package to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2} (\texttt{#1} package)}% command name
\index{#1 package@\texttt{#1} package}\index{packages!#1@\texttt{#1}}% package name
}%
}% command name -- adds backslash automatically
\newcommand{\docopt}[1]{\ensuremath{\langle}\textrm{\textit{#1}}\ensuremath{\rangle}}% optional command argument
\newcommand{\docarg}[1]{\textrm{\textit{#1}}}% (required) command argument
\newenvironment{docspec}{\begin{quotation}\ttfamily\parskip0pt\parindent0pt\ignorespaces}{\end{quotation}}% command specification environment
\newcommand{\docenv}[1]{\texttt{#1}\index{#1 environment@\texttt{#1} environment}\index{environments!#1@\texttt{#1}}}% environment name
\newcommand{\docenvdef}[1]{\hlred{\texttt{#1}}\label{env:#1}\index{#1 environment@\texttt{#1} environment}\index{environments!#1@\texttt{#1}}}% environment name
\newcommand{\docpkg}[1]{\texttt{#1}\index{#1 package@\texttt{#1} package}\index{packages!#1@\texttt{#1}}}% package name
\newcommand{\doccls}[1]{\texttt{#1}}% document class name
\newcommand{\docclsopt}[1]{\texttt{#1}\index{#1 class option@\texttt{#1} class option}\index{class options!#1@\texttt{#1}}}% document class option name
\newcommand{\docclsoptdef}[1]{\hlred{\texttt{#1}}\label{clsopt:#1}\index{#1 class option@\texttt{#1} class option}\index{class options!#1@\texttt{#1}}}% document class option name defined
\newcommand{\docmsg}[2]{\bigskip\begin{fullwidth}\noindent\ttfamily#1\end{fullwidth}\medskip\par\noindent#2}
\newcommand{\docfilehook}[2]{\texttt{#1}\index{file hooks!#2}\index{#1@\texttt{#1}}}
\newcommand{\doccounter}[1]{\texttt{#1}\index{#1 counter@\texttt{#1} counter}}
% MATH STUFF
\newtheorem{axiom}{Axiom}
\newtheorem{theorem}{Theorem}
\newtheorem{definition}{Definition}
\newtheorem{method}{Method}
\DeclarePairedDelimiterX{\norm}[1]{\lVert}{\rVert}{#1}
\newcommand{\commutator}[3][1.2em]{[ \makebox[#1]{$#2$} , \makebox[#1]{$#3$} ]}
\newcommand{\infint}{\int^{\infty}_{-\infty}}
\immediate\write18{bibtex \jobname}
% Generates the index
\usepackage{makeidx}
\makeindex
\begin{document}
% Front matter
\frontmatter
% r.1 blank page
\blankpage
% v.2 epigraphs
% r.3 full title page
\maketitle
% v.4 copyright page
% r.5 contents
\tableofcontents
\listoffigures
\listoftables
% r.7 dedication
% r.9 introduction
\cleardoublepage
\chapter*{Introduction}
This is based off notes from various quantum mechanics books.\cite{Shankar1994}
\chapter*{Notation}
Braket notation will be used. Summation convention will be used. Typically scalars are represented with lower case characters, indices are also lower case and typically i, and vectors are upper case. Operators are greek upper case and their corresponding vectors are greek lower case.
%%
% Start the main matter (normal chapters)
\mainmatter
\chapter{Vectors and Dirac Notation}
\label{ch:braket}
Vectors in quantum mechanics will be written in Dirac's braket notation. Vectors occupy a linear vector space $\mathcal{V}$. The following axioms apply:
\begin{axiom}
\label{ax:1}
The addition of vectors $\ket{A} + \ket{B}$ produces a new vector that lies in the vector space $\mathcal{V}$.
\end{axiom}
\begin{axiom}
\label{ax:2}
The scalar multiplication of vector $\ket{A}$ with scalar $a$ produces a new vector that lies in the vector space $\mathcal{V}$.
\end{axiom}
\begin{axiom}
Scalar multiplication is distributive both ways:
\[
\label{ax:3.1}
a(\ket{A} + \ket{B}) = a\ket{A} + a\ket{B}
\]
\[
\label{ax:3.2}
(a+b)\ket{A} = a\ket{A} + b\ket{A}
\]
\end{axiom}
\begin{axiom}
\label{ax:4}
Scalar multiplication is associative, $a(b\ket{A}) = ab\ket{A}$.
\end{axiom}
\begin{axiom}
\label{ax:5}
Addition is commutative, $\ket{A} + \ket{B} = \ket{B} + \ket{A}$.
\end{axiom}
\begin{axiom}
\label{ax:6}
Addition is associative, $\ket{A} + (\ket{B} + \ket{C}) = (\ket{A} + \ket{B}) + \ket{C}$.
\end{axiom}
\begin{axiom}
\label{ax:7}
There exists a null vector, call $\ket{0}$, such that $\ket{A} + \ket{0} = \ket{A}$.
\end{axiom}
\begin{axiom}
\label{ax:8}
There exists an inverse vector for each vector such that $\ket{A} + \ket{-A} = \ket{0}$.
\end{axiom}
\begin{definition}
\label{def:1}
The set of vectors is linearly independent if the only way to satisfy
\[
a_i\ket{i} = \ket{0}
\]
is the trivial setting of all coefficients to zero.
\end{definition}
One can define a set of linearly independent vectors as basis vectors and write any arbitrary vector in that vector space in terms of those basis vectors
\begin{equation}
\label{eq:1}
\ket{A} = a_i\ket{i}
\end{equation}
\begin{definition}
\label{def:2}
The adjoint is an modifier that acts on vectors, numbers and operators. The adjoint on itself returns the original vector/number/operator. It is represented by the dagger symbol and satisfies the following axioms
\end{definition}
\begin{axiom}
\label{ax:9}
The adjoint of a bra vector is a ket vector. The adjoint of a ket vector is a bra vector: $\ket{A}^{\dag} = \bra{A}$.
\end{axiom}
\begin{axiom}
\label{ax:10}
The adjoint of a number is its complex conjugate, $a^{\dag} = a^*$.
\end{axiom}
\begin{definition}
\label{def:3}
The inner product is a function that takes in a bra and a ket vector in that order, and returns a scalar. It satisfies the following axioms
\end{definition}
\begin{axiom}
\label{ax:11}
$\braket{A|B} = \braket{B|A}^*$
\end{axiom}
\begin{axiom}
\label{ax:12}
The inner product of a vector with itself is greater than zero, or equal if the vector is the null vector.
\end{axiom}
\begin{axiom}
\label{ax:13}
The inner product satisfies distributivity, $\bra{C}(a\ket{A} + b\ket{B}) \equiv \braket{C|aA + bB} = a\braket{C|A} + b\braket{C|B}$.
\end{axiom}
\begin{definition}
\label{def:4}
The norm of a vector equal to the square root of the inner product of the vector with itself, $\norm{A} = \sqrt{\braket{A|A}}$.
\end{definition}
\begin{definition}
\label{def:5}
The kronecker delta function takes in two variables and returns 1 for equality and 0 otherwise. It is useful since writing the inner product between two basis vectors is essentially the kronecker delta function, if the basis is orthonormal.
\end{definition}
One can write the inner product between two vectors as
\begin{equation}
\label{eq:2}
\braket{A|B} = a_i^*b_i
\end{equation}
\begin{proof}
Using axiom \ref{ax:9} and equation \ref{eq:1},
\[{A|B} = a_i^*b_j\braket{i|j}\]
By definition \ref{def:5}, one gets equation \ref{eq:2}
\end{proof}
To get the coefficient of a vector, take the inner product between the basis and the vector
\begin{equation}
\label{eq:3}
\braket{j|A} = a_j
\end{equation}
\begin{proof}
Using equation \ref{eq:1} and definition \ref{def:5},
\[{j|A} = a_i\braket{j|i} = a_j\]
\end{proof}
The Gram-Schmidt process is a way of orthonormalizing a set of vectors in an inner product space. Essentially one normalizes the first vector and then for each subsequent vector, remove the projection components, and then normalize the result. The method is given,
\begin{method}
\label{meth:1}
Let $\ket{1}, \ket{2}, ..., \ket{n}$ be a linearly independent basis, $\ket{A}, \ket{B}, ..., \ket{N}$ is the normalized basis,
\begin{enumerate}
\item $\ket{N'} = \ket{n} - \ket{A}\braket{A|n} - ... - \ket{N-1}\braket{N-1|n}$
\item $\ket{N} = \frac{N}{\norm{N}}$
\end{enumerate}
\end{method}
An operator is a transformer for vectors and can be represented as a matrix. Operators take in a vector and output a new vector. They are linear and thus obey standard linearity rules.
\begin{definition}
\label{def:6}
The commutator takes in two operators and returns 0 if the operators commute. It is defined by $\commutator{\Omega}{\Lambda} \equiv \Omega\Lambda - \Lambda\Omega$.
\end{definition}
\begin{definition}
\label{def:7}
The inverse of an operator works like any other inverse, $\Omega\Omega^{-1}$, and is multiplicative.
\end{definition}
Additionally,
\begin{equation}
\label{eq:4}
(\Omega\Lambda)^{-1} = \Lambda^{-1}\Omega^{-1}
\end{equation}
The operator can operate on basis vectors. The construction is bra then operator then ket. Thus the inner product between a basis vector and a completely different basis vector (one in a different basis) is just a component in the operator matrix, as shown
\begin{equation}
\label{eq:5}
\braket{j|i'} = \braket{j|\Omega|i} = \Omega_{ij}
\end{equation}
Vector transformation can be expressed as
\begin{equation}
\label{eq:6}
v_i' = \Omega_{ij}v_j
\end{equation}
\begin{definition}
\label{def:8}
Define the identity operator as one that operates on vectors and leaves them unchanged. The matrix form is clearly just the kronecker delta. One can also write this in terms of bra and ket vectors (since a ket-bra composition is just an operator), $I = \ket{i}\bra{i}$.
\end{definition}
\begin{definition}
\label{def:9}
Define the projection operator as one that projects a vector into the direction of a basis vector. So the projection operator is the same as taking components! The sum of the projection operators gives the identity operator, $I = \sum P_i$, $P_i\ket{A} = \ket{i}\braket{i|A} = \ket{i}a_i$.
\end{definition}
A result of definition \ref{def:2} is that
\begin{equation}
\label{eq:7}
Q_{ij}^{\dag} = Q_{ji}^*
\end{equation}
and
\begin{equation}
\label{eq:8}
(\Omega\Lambda)^{\dag} = \Lambda^{\dag}\Omega^{\dag}
\end{equation}
\begin{definition}
\label{def:10}
A Hermitian operator is self-adjoint, that is, the adjoint of a Hermitian operator is equal to the original Hermitian operator. The adjoint of an Antihermitian operator is the equal to the negative original Hermitian operator. One can write any operator in Hermitian and Antihermitian parts. The Hermitian component is the sum of the operator with its adjoint over two, and the Antihermitian is the difference of those two operators over two. The eigenvalues of a Hermitian operator are real.
\end{definition}
\begin{definition}
\label{def:11}
A Unitary operator acts on its adjoint to produce the identity operator. Hence it preserves the inner product and vectors in such operators are orthonormal. When I write vectors in operators, I mean, when you take columns of the matrix.
\end{definition}
When an operator acts on a vector, it is referred to as an active transformation. When one sandwiches an operator between two unitary operators, it is referred to as a passive transformation. Finally since operators are like matrices, they have eigenvalues and eigenvectors. The eigenvalues of a Hermitian operator are real. Matrices can also be diagonalized, if they are Hermitian. The eigenvectors of such operators can form unitary matrices once normalized. The passive transformation of a Hermitian operator with these unitary matrices results in diagonalization. Once diagonalized, the diagonal components are the eigenvalues of the operator. Two commuting Hermitian operators can be simultaneously diagonalized. If an operator is Hermitian, a function of such an operator can be diagonalized and its diagonal components are simply the function on each component of the original diagonal matrix.
We can generalize all this to continuous space, sums change to integrals and the kronecker delta becomes a dirac delta function defined by
\begin{definition}
\label{def:12}
The dirac delta takes in $x$ and $x'$ and return 0 unless they equal, the integral of this function is equal to one.
\end{definition}
This acts on functions like
\begin{equation}
\label{eq:dd}
\int\delta(x-x')f(x)dx = f(x')
\end{equation}
Taking derivatives of such a function has a straightforward result
\begin{equation}
\label{eq:9}
\delta^n(x-x') = \delta(x-x')\frac{d^n}{dx^n}
\end{equation}
Vectors on basis vectors become functions on variables, operators on vectors become functionals on functions, function input is changed by integration with a dirac delta. It is worth defining Fourier transforms at this point.
\begin{equation}
\label{eq:10}
\widetilde{f}(k) = \frac{1}{\sqrt{2\pi}} \infint e^{-ikx}f(x)dx
\end{equation}
\begin{equation}
\label{eq:11}
f(x) = \frac{1}{\sqrt{2\pi}} \infint e^{ikx}\widetilde{f}(k)dk
\end{equation}
\begin{definition}
\label{def:13}
The differential operator is labeled D and unsurprisingly, it differentiates a function, $D\ket{f} = \ket{\frac{df}{dx}}$. Here, $x$ is some position space.
\end{definition}
Consider the combination $\braket{x|D|f}$. The functional acts on a function, by definition \ref{def:13} and then the result acts on the basis vector to give $df(x)/dx$. The functional can also act on two basis vectors (assume they are in different bases). This deduction can be made by inserting identity to $\braket{x|D|f}$.
\begin{equation}
\label{eq:12}
\braket{x|D|x'} = D_{xx'} = \delta'(x-x') = \delta(x-x')\frac{d}{dx}
\end{equation}
This operator is not particularly useful because it is not Hermitian. We want to deal with Hermitian operators because they give measureable results, such operators are often called observables.
\begin{definition}
\label{def:14}
The operator K is defined by $K = -iD$ and is Hermitian. This operator generates k-space.
\end{definition}
\begin{definition}
\label{def:15}
The operator X generates x-space. It has the effect of multiplying $f(x)$ by $x$.
\end{definition}
These two operators also have the eigenvalue equations
\begin{equation}
\label{eq:13}
X\ket{x} = x\ket{x}
\end{equation}
\begin{equation}
\label{eq:14}
K\ket{k} = k\ket{k}
\end{equation}
It does not take much work to deduce that
\begin{equation}
\label{eq:15}
\braket{k|K|k'} = k'\braket{k|k'} = k'\delta(k-k')
\end{equation}
\begin{equation}
\label{eq:16}
\braket{x|X|x'} = x'\braket{x|x'} = x'\delta(x-x')
\end{equation}
To find constructions of mixed states, we must add in as many identities as necessary. For example
\begin{align*}
\braket{x|K|k'} &= k\braket{x|k}\\
\int\braket{x|K|x'}\braket{x'|k}dx' & = k\psi_k(x) & \ref{def:8} \\
\int -i\delta(x-x')\frac{d}{dx'}\psi_k(x) & = k\psi_k(x) & \ref{eq:12} \\
-i\frac{d}{dx}\psi_k(x) & = k\psi_k(x) & \ref{eq:dd} \\
\psi_k(x) &= Ae^{ikx}
\end{align*}
The last step immediately follows and defines $\braket{x|k}$. After normalization,
\begin{equation}
\label{eq:17}
\braket{x|k} = \frac{1}{\sqrt{2\pi}}e^{ikx}
\end{equation}
\begin{align*}
\braket{k|X|k'} & = \infint\infint\braket{k|x}\braket{x|X|x'}\braket{x'|k'}dxdx' & \ref{def:8} \\
& = \frac{1}{2\pi}\infint\infint e^{-ikx}x\delta(x-x')e^{ik'x'}dxdx' & \ref{eq:17}, \ref{eq:16} \\
& = \frac{1}{2\pi}\infint e^{-ikx}xe^{ikx}dx & \ref{eq:dd} \\
&= i\frac{d}{dk}\left(\frac{1}{2\pi}\infint e^{i(k'-k)}dx\right) \\
&= i\delta'(k-k')
\end{align*}
Thus the relations
\begin{equation}
\label{eq:18}
\braket{k|X|k'} = i\delta'(k-k')
\end{equation}
\begin{equation}
\label{eq:19}
\braket{x|K|x'} = -i\delta'(x-x')
\end{equation}
\begin{equation}
\label{eq:20}
\braket{x|X|f} = xf(x)
\end{equation}
\begin{equation}
\label{eq:21}
\braket{x|K|f} = -i\frac{df(x)}{dx}
\end{equation}
\begin{equation}
\label{eq:22}
\braket{x|XK|f} = -ix\frac{df(x)}{dx}
\end{equation}
\begin{equation}
\label{eq:23}
\braket{x|XK|f} = -i\frac{d}{dx}xf(x)
\end{equation}
\begin{equation}
\label{eq:24}
\commutator{X}{K} = iI
\end{equation}
\begin{equation}
\label{eq:25}
X\ket{f(x)} = \ket{xf(x)}
\end{equation}
\begin{equation}
\label{eq:26}
K\ket{f(x)} = \ket{-i\frac{df}{dx}}
\end{equation}
\begin{equation}
\label{eq:27}
X\ket{g(k)} = \ket{i\frac{dg}{dk}}
\end{equation}
\begin{equation}
\label{eq:28}
X\ket{g(k)} = \ket{i\frac{dg}{dk}}
\end{equation}
\chapter{The Postulates of Quantum Mechanics}
\begin{definition}
\label{def:16}
The momentum is related to the k we defined earlier by $p = \hbar k$
\end{definition}
There are four postulates of quantum mechanics.
\begin{definition}
\label{def:17}
Postulate 1. The state of a particle is represented by a vector $\ket{\psi(t)}$ in a Hilbert space.
\end{definition}
\begin{definition}
\label{def:18}
Postulate 2. The independent variables $x$ and $p$ of classical mechanics are represented by Hermitian operators $X$ and $P$ with the following matrix elements in the eigenbasis of $X$
\[\braket{x|X|x'} = x\delta(x-x')\]
\[\braket{x|P|x'} = -i\hbar\delta'(x-x')\]
The operators corresponding to dependent variables $\omega(x, p)$ are given Hermitian operators
\[\Omega(X, P) = \omega(x\rightarrow X, p\rightarrow P)\]
\end{definition}
\begin{definition}
\label{def:19}
Postulate 3. If the particle is in a state $\ket{\psi}$, measurement of the variable corresponding to $\Omega$ will yield one of the eigenvalues $\omega$ with probability $P(\omega)\propto|\braket{\omega|\psi}|^2$. The state of the system will change from $\psi$ to $\omega$ as a result of the measurement.
\end{definition}
\begin{definition}
\label{def:20}
Postulate 4. The state vector obeys the Schrodinger equation
\[i\hbar\frac{d}{dt}\ket{\psi(t)} = H\ket{\psi(t)}\]
where H is the quantum Hamiltonian operator.
\end{definition}
We can directly use these postulates in probability problems. Now at any given time, instead of having definite states, entities will be in a superposition of states. For a given state, one can find out the possible results of measuring some observable at that very moment or any subsequent moment, as well as the associated probabilities.
\begin{method}
\label{meth:2}
\begin{enumerate}
\item Construct an operator built out of the quantum equivalents to the classical position and momentum terms. This is due to Ehrenfest's Theorem.
\item Find the orthonormal eigenvectors $\ket{\omega_i}$ and eigenvalues $\omega_i$ of $\Omega$.
\item Expand $\ket{\psi}$ in this basis, $\ket{\psi} = \sum_i\ket{\omega_i}\braket{\omega_i|\psi}$
\item The probability $P(\omega)$ that the result $\omega$ will be obtained is proportional to $|\braket{\omega|\psi}|^2$. In terms of the projection operator $P_{\omega} = \ket{\omega}\bra{\omega}$, this becomes $\braket{\psi|P_{\omega}|\psi} = \braket{\psi|P_{\omega}P_{\omega}|\psi} = \braket{P_{\omega}\psi|P_{\omega}\psi}$.
\end{enumerate}
\end{method}
To get an absolute probability out of the all the probilities, just normalize them. If a state is already an eigenstate, the measurement clearly just returns the corresponding eigenvalue. Superposition of states results in a distribution of the probabilities. For a different variable, the process is repeated for that variable. Since the bases of two variables $\Omega$ and $\Lambda$ are normally different, it is necessary to perform the following. If we are working in the $\Omega$ basis,
\begin{equation}
\label{eq:29}
\ket{\psi} = \sum_i\ket{\omega_i}\braket{\omega_i|\psi}
\end{equation}
and $P(\omega_i) = |\braket{\omega_i|\psi}|^2$. If we want $P(\lambda_i)$, we take the operator $\Lambda$ which is some given matrix with elements $\Lambda_{ij} = \braket{\omega_i|\Lambda|\omega_j}$, find its eigenvectors $\ket{lambda_i}$ and take the inner product $\braket{\lambda_i|\psi}$ in this basis
\begin{equation}
\label{eq:30}
\braket{\lambda_i|\psi} = \sum_j\braket{\lambda_i|\omega_j}\braket{\omega_j|\psi}
\end{equation}
If a variable is ambiguous, it does not commute, for example $\Omega = XP$ is ambiguous. In this case, we take the symmetric sum, $\Omega = (XP + PX)/2$. This fixes everything. If a variable is degenerate, it has multiple eigenvalues, say $\omega_1 = \omega_2 = \omega$. In this case the probability is found by selecting some orthonormal basis $\ket{\omega, 1}$ and $\ket{\omega, 2}$. Then
\begin{equation}
\label{eq:31}
P(\omega) = |\braket{\omega, 1|\psi}|^2 + |\braket{\omega, 2|\psi}|^2
\end{equation}
In the case of continuous variables, the state expands over an integral instead of a sum. If the variable has no classical counterparts, a solution is a product of intuition and semi-classical reasoning. Since we are dealing with probabilities it makes sense to define expectation values and variances. The expectation value is defined
\begin{equation}
\label{eq:32}
\langle\Omega\rangle = \sum_i P(\omega_i)\omega_i = \sum_i\braket{\psi|\omega_i}\braket{\omega_i|\psi}\omega_i
\end{equation}
However since $\omega_i\ket{\omega_i} = \Omega\ket{\omega_i}$,
\begin{equation}
\label{eq:33}
\langle\Omega\rangle = \sum_i\braket{\psi|\Omega|\omega_i}\braket{\omega_i|\psi} = \braket{\psi|\Omega|\psi}
\end{equation}
The variance is defined
\begin{equation}
\label{eq:34}
(\Delta\Omega)^2 = \langle(\Omega-\langle\Omega\rangle)^2\rangle
\end{equation}
In the discrete case, this is
\begin{equation}
\label{eq:35}
(\Delta\Omega)^2 = \sum_iP(\omega_i)(\omega_i-\langle\Omega\rangle)^2
\end{equation}
The continuous case trivially follows
\begin{equation}
\label{eq:36}
(\Delta\Omega)^2 = \int P(\omega)(\omega-\langle\Omega\rangle)^2d\omega
\end{equation}
and given the state and the operator
\begin{equation}
\label{eq:37}
\Delta\Omega = [\braket{\psi|(\Omega - \langle\Omega\rangle)^2|\psi}]^{1/2}
\end{equation}
Now we attempt to recreate some multiple filtering process that takes a state and produces a new state with well defined eigenvalues for two variables. The issue being that normally we find eigenvalues for the variables, but every time we measure a value, the state changes. The simple way to bypass this issue is to say the states must be the same for the two variables. That is, the variables themselves commute. If they do, they are compatible and measuring one does not affect the measurement of the other.
It is more likely that ensembles do not share the same state. We effectively want to build a wave packet rather than ordinary waves.
\begin{definition}
\label{def:21}
The Gaussian distribution is defined in a few ways. It is the distribution with maximum entropy for a given mean and variance. It also minimizes the uncertainty of quantum mechanical systems. It can be written (calling the mean $\mu$ and the standard deviation $\sigma$)
\[
f(x|\mu,\sigma^2) = Ae^{-\frac{(x-\mu)^2}{2\sigma^2}}
\]
\end{definition}
If a state is Gaussian, normalizing it takes some work, the Gaussian integral is standard, and the trick is to convert the one-dimensional problem in a two-dimension polar-coordinate problem
\begin{equation}
\label{eq:gauss}
\infint e^{-ax^2}dx = \sqrt{\pi/a}
\end{equation}
\begin{proof}
Consider
\[
I(a) = \infint e^{-ax^2}dx
\]
Now square this
\[
I(a)^2 = \infint e^{-ax^2}dx \infint e^{-ay^2}dy = \infint\infint e^{-a(x^2+y^2)}dxdy
\]
Switching to polar coordinates
\[
I(a)^2 = \infint \int^{2\pi}_{0}e^{-a\rho^2}\rho d\rho d\phi = \frac{\pi}{a}
\]
\end{proof}
Assume
\begin{equation}
\label{eq:38}
\ket{\psi} = Ae^{-\frac{(x-\mu)^2}{2\sigma^2}}
\end{equation}
Normalizing,
\begin{equation}
\label{eq:39}
1 = \braket{\psi|\psi} = \infint \braket{\psi|x}{x|\psi}dx = \infint|\psi(x)|^2dx
\end{equation}
\begin{equation}
\label{eq:40}
1 = \infint A^2e^{-\frac{(x-\mu)^2}{\sigma^2}}dx = A^2(\pi\sigma^2)^{1/2}
\end{equation}
Hence
\begin{equation}
\label{eq:41}
\psi(x) = \frac{1}{(\pi\sigma^2)^{1/4}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}
\end{equation}
and the probability of finding a particle between $x$ and $x + dx$ is
\begin{equation}
\label{eq:42}
P(x)dx = |\psi(x)|^2 = \frac{1}{(\pi\sigma^2)^{1/2}}e^{-\frac{(x-\mu)^2}{\sigma^2}}dx
\end{equation}
One can find the mean to be $\mu$ as expected and the standard deviation to be $\sigma/\sqrt{2}$. In order to find say $\braket{p|\psi}$, we written
\begin{align*}
\braket{p|\psi} &= \int\braket{p|x}\braket{x|\psi}dx \\
&= \int\psi^*_p(x)\psi(x)dx \\
&= \infint \frac{e^{ipx/\hbar}}{(2\pi\hbar)^{1/2}}\frac{e^{-(x-\mu)^2/2\sigma^2}}{(\pi\sigma^2)^{1/4}} \\
&= \left(\frac{\sigma^2}{\pi\hbar^2}\right)^{1/4}e^{-ip\mu/\hbar}e^{-p^2\sigma^2/2\hbar^2}
\end{align*}
The modulus of $\hbar(p)$ is a Gaussian of width $\hbar/\sigma\sqrt{2}$, this is the standard deviation, the mean is zero. Hence the famous uncertainty inequality because equality in the Gaussian case
\begin{equation}
\label{eq:43}
\sigma_x\sigma_p = \hbar/2
\end{equation}
States time evolve according to the Schrodinger equation:
\begin{equation}
\label{eq:tdse}
i\hbar\frac{d}{dt}\ket{\psi(t)} = H\ket{\psi(t)}
\end{equation}
For a given Hamiltonian $H = T + U$, we convert the classical expression with an equivalent quantum one, substitute this in, and then solve. If H has no explicit time dependence, the equation
\begin{equation}
\label{eq:s1}
i\hbar\ket{\dot{\psi}} = H\ket{\psi}
\end{equation}
The approach is to find the eigenvalues and eigenvectors of the Hamiltonian and then construct the propagator $U(t)$ in terms of these. Once we have $U(t)$, we can write
\begin{equation}
\label{eq:s2}
\ket{\psi(t)} = U(t)\ket{\psi(0)}
\end{equation}
Since the Schrodinger equation is first order in time, initial state value is sufficient and the initial value of the derivative is not required. Hence we solve the time-independent Schrodinger equation (which gives us eigenkets and eigenvalues)
\begin{equation}
\label{eq:tise}
H\ket{E} = E\ket{E}
\end{equation}
And assuming we find these, expand the state
\begin{equation}
\label{eq:s3}
\ket{\psi(t)} = \sum\ket{E}\braket{E|\psi(t)} \equiv \sum a_E(t)\ket{E}
\end{equation}
The equation for $a_E(t)$ follows if we act on both sides with $i\hbar\partial/\partial t - H$
\begin{equation}
0 = (i\hbar\partial/\partial t - H)\ket{\psi(t)} = \sum(i\hbar\dot{a}_E - Ea_E)\ket{E}
\end{equation}
Hence due to the fact that the kets are linearly independent
\begin{equation}
\label{eq:s4}
i\hbar\dot{a}_E = Ea_E
\end{equation}
Trivially this has the solution
\begin{equation}
\label{eq:s5}
a_E(t) = a_E(0)e^{-iEt/\hbar}
\end{equation}
\begin{equation}
\label{eq:s6}
\braket{E|\psi(t)} = \braket{E|\psi(0)}e^{-iEt/\hbar}
\end{equation}
Hence
\begin{equation}
\label{eq:s7}
\ket{\psi(t)} = \sum_E\ket{E}\braket{E|\psi(0)}e^{-iEt/\hbar}
\end{equation}
\begin{equation}
\label{eq:s8}
U(t) = \sum_E\ket{E}\bra{E}e^{-iEt/\hbar}
\end{equation}
In the case of degeneracy
\begin{equation}
\label{eq:s9}
U(t) = \sum_{\alpha}\sum_E\ket{E, \alpha}\bra{E, \alpha}e^{-iEt/\hbar}
\end{equation}
If $E$ is continuous, the sum must be replaced by an integral. The normal modes
%%
% The back matter contains appendices, bibliographies, indices, glossaries, etc.
\backmatter
\bibliography{1_quantum}
\bibliographystyle{plainnat}
\printindex
\end{document}