Moved chapters into includes to enable faster compilation via

includeonly.
This commit is contained in:
paul-loedige 2022-02-18 11:51:01 +01:00
parent 2e1601c846
commit 7fb1ba0e53
5 changed files with 85 additions and 52 deletions

View File

@ -1,18 +1,3 @@
\pagenumbering{roman}
%glossary
\printglossary[nonumberlist]
\newpage
%bibliography
\phantomsection
\addcontentsline{toc}{chapter}{Literatur}
\bibliographystyle{IEEEtran-de}
\bibliography{Bibliography.bib}
\newpage
%appendix
\appendix
\chapter{Anhang}

View File

@ -5,9 +5,7 @@
\newglossary{nomenclature}{nom}{ncl}{Nomenklatur}
\shorthandon{"}
%--------------------
%main glossary
%--------------------
% {{{ Main glossary%
\newglossaryentry{overfitting}{
name=Overfitting,
description={
@ -58,12 +56,13 @@
beeinflussen sich dabei aber nicht.} (\url{https://de.wikipedia.org/wiki/Unabh\%C3\%A4ngig_und_identisch_verteilte_Zufallsvariablen})
}
}
% }}} %
%--------------------
%acronyms
%--------------------
% {{{ acronyms%
\setabbreviationstyle[acronym]{long-short}
\newacronym{CNN}{CNN}{Convolutional Neural Network}
\newacronym{RNN}{RNN}{Recurrent Neural Network}
\newacronym{SSE}{SSE}{Summed Squared Error}
\newacronym{MSE}{MSE}{Mean Squared Error}
\newacronym{FRM}{FRM}{\gls{full_rank_matrix}}
@ -84,10 +83,11 @@
\newacronym{GPU}{GPU}{Graphic Processing Unit}
\newacronym{RMS}{RMS}{Root Mean Square}
%--------------------
%nomenclature
%--------------------
% }}} %
% {{{ Nomenclature%
% {{{ Nomencalture Commands %
%add new key
%\glsaddstoragekey{unit}{}{\glsentryunit}
\glsnoexpandfields
@ -113,6 +113,7 @@
\newcommand{\nomsym}[1]{\texorpdfstring{\glslink{#1}{\ensuremath{\glsentrysymbol{#1}}}}{#1}\xspace}
%use nomenclature entry (use in equation)
\newcommand{\nomeq}[1]{\glslink{#1}{\glsentrysymbol{#1}}}
% }}} Nomencalture Commands %
\newnom{summed_squared_error}{\gls{SSE}}{\text{\glsxtrshort{SSE}}}{\glsxtrfull{SSE}}
\newnom{mean_squared_error}{\gls{MSE}}{\text{\glsxtrshort{MSE}}}{\glsxtrfull{MSE}}
@ -138,6 +139,8 @@
\newnom{gaussian_process}{Gaußscher Prozess}{\mathcal{GP}}{}
\newnom{hyper_parameters}{Hyper"~Parameter}{\bm{\beta}}{}
\newnom{activation_function}{Aktivierungsfunktion}{\phi}{}
% }}} %
\shorthandoff{"}
\makeglossaries

View File

@ -17,6 +17,41 @@
\def \MODULECOMPACT{ML}
\def \DATE{\today}
\includeonly{
%Einleitung
chapters/Einleitung,
%Classical_Supervised_Learning
chapters/Classical_Supervised_Learning/Linear_Regression,
chapters/Classical_Supervised_Learning/Linear_Classification,
chapters/Classical_Supervised_Learning/Model_Selection,
chapters/Classical_Supervised_Learning/k-Nearest_Neighbors,
chapters/Classical_Supervised_Learning/Trees_and_Forests,
%Kernel_Methods
chapters/Kernel_Methods/Kernel-Regression,
chapters/Kernel_Methods/Support_Vector_Machines,
chapters/Bayesian_Learning/Bayesian_Learning,
chapters/Bayesian_Learning/Bayesian_Regression_Algorithms,
%Neural_Networks
chapters/Neural_Networks/Basics,
chapters/Neural_Networks/Gradient_Descent,
chapters/Neural_Networks/Regularization,
chapters/Neural_Networks/Practical_Considerations,
chapters/Neural_Networks/CNNs_and_RNNs,
%Classical_Unsupervised_Learning
chapters/Classical_Unsupervised_Learning/Dimensionality_Reduction_and_Clustering,
chapters/Classical_Unsupervised_Learning/Density_Estimation_and_Mixture_Models,
chapters/Classical_Unsupervised_Learning/Variational_Auto-Encoders,
%Mathematische_Grundlagen
chapters/Mathematische_Grundlagen/Lineare_Algebra,
chapters/Mathematische_Grundlagen/Probability_Theory,
chapters/Mathematische_Grundlagen/Kernel_Basics,
chapters/Mathematische_Grundlagen/Sub-Gradients,
chapters/Mathematische_Grundlagen/Constraint_Optimization,
chapters/Mathematische_Grundlagen/Gaussian_Identities,
%Anhang
Appendix
}
\input{Glossary.tex}
\begin{document}
@ -29,49 +64,60 @@
% {{{ Main Content%
\pagenumbering{arabic}
\part{Einleitung}
\input{chapters/Einleitung.tex}
\include{chapters/Einleitung.tex}
\part{Classical Supervised Learning}
\label{part:Classical Supervised Learning}
\input{chapters/Classical_Supervised_Learning/Linear_Regression.tex}
\input{chapters/Classical_Supervised_Learning/Linear_Classification.tex}
\input{chapters/Classical_Supervised_Learning/Model_Selection.tex}
\input{chapters/Classical_Supervised_Learning/k-Nearest_Neighbors.tex}
\input{chapters/Classical_Supervised_Learning/Trees_and_Forests.tex}
\include{chapters/Classical_Supervised_Learning/Linear_Regression.tex}
\include{chapters/Classical_Supervised_Learning/Linear_Classification.tex}
\include{chapters/Classical_Supervised_Learning/Model_Selection.tex}
\include{chapters/Classical_Supervised_Learning/k-Nearest_Neighbors.tex}
\include{chapters/Classical_Supervised_Learning/Trees_and_Forests.tex}
\part{Kernel Methods}
\label{part:Kernel Methods}
\input{chapters/Kernel_Methods/Kernel-Regression.tex}
\input{chapters/Kernel_Methods/Support_Vector_Machines.tex}
\include{chapters/Kernel_Methods/Kernel-Regression.tex}
\include{chapters/Kernel_Methods/Support_Vector_Machines.tex}
\part{Bayesian Learning}
\label{part:Bayesian Learning}
\input{chapters/Bayesian_Learning/Bayesian_Learning.tex}
\input{chapters/Bayesian_Learning/Bayesian_Regression_Algorithms.tex}
\include{chapters/Bayesian_Learning/Bayesian_Learning.tex}
\include{chapters/Bayesian_Learning/Bayesian_Regression_Algorithms.tex}
\part{Neural Networks}
\label{part:Neural Networks}
\input{chapters/Neural_Networks/Basics.tex}
\input{chapters/Neural_Networks/Gradient_Descent.tex}
\input{chapters/Neural_Networks/Regularization.tex}
\input{chapters/Neural_Networks/Practical_Considerations.tex}
\input{chapters/Neural_Networks/CNNs_and_LSTMs.tex}
\include{chapters/Neural_Networks/Basics.tex}
\include{chapters/Neural_Networks/Gradient_Descent.tex}
\include{chapters/Neural_Networks/Regularization.tex}
\include{chapters/Neural_Networks/Practical_Considerations.tex}
\include{chapters/Neural_Networks/CNNs_and_RNNs.tex}
\part{Classical Unsupervised Learning}
\label{part:Classical Unsupervised Learning}
\input{chapters/Classical_Unsupervised_Learning/Dimensionality_Reduction_and_Clustering.tex}
\input{chapters/Classical_Unsupervised_Learning/Density_Estimation_and_Mixture_Models.tex}
\input{chapters/Classical_Unsupervised_Learning/Variational_Auto-Encoders.tex}
\include{chapters/Classical_Unsupervised_Learning/Dimensionality_Reduction_and_Clustering.tex}
\include{chapters/Classical_Unsupervised_Learning/Density_Estimation_and_Mixture_Models.tex}
\include{chapters/Classical_Unsupervised_Learning/Variational_Auto-Encoders.tex}
\part{Mathematische Grundlagen}
\label{part:Mathematische Grundlagen}
\input{chapters/Mathematische_Grundlagen/Lineare_Algebra.tex}
\input{chapters/Mathematische_Grundlagen/Probability_Theory.tex}
\input{chapters/Mathematische_Grundlagen/Kernel_Basics.tex}
\input{chapters/Mathematische_Grundlagen/Sub-Gradients.tex}
\input{chapters/Mathematische_Grundlagen/Constraint_Optimization.tex}
\input{chapters/Mathematische_Grundlagen/Gaussian_Identities.tex}
\include{chapters/Mathematische_Grundlagen/Lineare_Algebra.tex}
\include{chapters/Mathematische_Grundlagen/Probability_Theory.tex}
\include{chapters/Mathematische_Grundlagen/Kernel_Basics.tex}
\include{chapters/Mathematische_Grundlagen/Sub-Gradients.tex}
\include{chapters/Mathematische_Grundlagen/Constraint_Optimization.tex}
\include{chapters/Mathematische_Grundlagen/Gaussian_Identities.tex}
% }}} %
\input{Appendix.tex}
\pagenumbering{roman}
%glossary
\printglossary[nonumberlist]
\newpage
%bibliography
\phantomsection
\addcontentsline{toc}{chapter}{Literatur}
\bibliographystyle{IEEEtran-de}
\bibliography{Bibliography.bib}
\include{Appendix.tex}
\end{document}

View File

@ -1,3 +0,0 @@
\chapter{CNNs and LSTMs}%
\label{cha:CNNs and LSTMs}

View File

@ -0,0 +1,2 @@
\chapter{\texorpdfstring{\glsxtrshortpl{CNN} and \glsxtrshortpl{RNN}}{\glsfmtshortpl{CNN} and \glsfmtshortpl{RNN}}}%
\label{cha:CNNs and RNNs}