···3344 \usepackage{graphicx} % Used to insert images into the paper
55 \usepackage{float}
66+ \usepackage{caption}
77+ \interfootnotelinepenalty=10000 % Stops footnotes overflowing onto the newt page
68 \usepackage[justification=centering]{caption} % Used for captions
79 \captionsetup[figure]{font=small} % Makes captions small
810 \newcommand\tab[1][0.5cm]{\hspace*{#1}} % Defines a new command to use 'tab' in text
···98100 their basic versions. In contrast, we will use different neural network
99101 architectures, as this method is currently the most used for image
100102 classification.
101101-102102- \todo{
103103- \\A couple of papers that may be useful (if needed):
104104- - LeNet: http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf
105105- - AlexNet: http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks
106106- - General comparison of LeNet and AlexNet:
107107- "On the Performance of GoogLeNet and AlexNet Applied to Sketches", Pedro Ballester and Ricardo Matsumura Araujo
108108- - Deep NN Architecture:
109109- https://www-sciencedirect-com.ezproxy.lib.monash.edu.au/science/article/pii/S0925231216315533
110110- }
111103112104 \subsection{Classical Machine Learning Methods}
113105···173165 The Fully Convolutional Network (FCN) contains only one dense layer for the final binary classification step.
174166 The FCN instead consists of an extra convolutional layer, resulting in an increased ability for the network to abstract the input data relative to the other two configurations.
175167 \\
176176- \textbf{Insert image of LeNet from slides}
168168+ \todo{Insert image of LeNet from slides if time}
177169178170 \section{Method} \label{sec:method}
179171 \tab
···217209 \subsection{Neural Network Testing}\label{nnTesting}
218210 \tab After training each network, a separate test set of images (and labels) was used to evaluate the models.
219211 The result of this testing was expressed primarily in the form of an accuracy (percentage).
220220- These results as well as the other methods presented in this paper are given in Figure \textbf{[insert ref to results here]} of the Results section.
221221- \textbf{***********}
212212+ These results as well as the other methods presented in this paper are given in Table \ref{tab:results}.
222213 % Kelvin Start
223214 \subsection{Benchmarking}\label{benchmarking}
224215···270261 % Kelvin End
271262272263 \section{Results} \label{sec:results}
264264+ \tab The time taken to train each of the neural networks and traditional approaches was measured and recorded alongside their accuracy (evaluated using a separate test dataset) in Table \ref{tab:results}.
265265+266266+ % Annealing image and caption
267267+ \begin{table}[H]
268268+ \centering
269269+ \renewcommand{\arraystretch}{1.5} % Adds some space to the table
270270+ \begin{tabular}{|c|c|c|}
271271+ \hline
272272+ \textbf{Method} & \textbf{Test Accuracy} & \textbf{Training Time (s)}\\
273273+ \hline
274274+ LeNet & 87.86\% & 65.67\\
275275+ \hline
276276+ CNN & 95.63\% & 119.31\\
277277+ \hline
278278+ FCN & 94.66\% & 113.94\\
279279+ \hline
280280+ Support Vector Machine & 83.50\% & 5.90\\
281281+ \hline
282282+ K Nearest Neighbours & 67.96\% & 0.22\\
283283+ \hline
284284+ Gaussian Naive Bayes & 85.44\% & 0.15\\
285285+ \hline
286286+ Random Forest & 92.23\% & 0.92\\
287287+ \hline
288288+ \end{tabular}
289289+ \captionsetup{width=0.70\textwidth}
290290+ \caption{Comparison of the accuracy and training time of each neural network and traditional machine learning technique}
291291+ \label{tab:results}
292292+ \end{table}
273293274294 \section{Conclusion} \label{sec:conclusion}
275295
+4-4
mini_proj/waldo_model.py
···4949 ## Define the model start and end
5050 model = Model(inputs=inputs, outputs=classif)
5151 # Optimizer recommended Adadelta values (lr=0.01)
5252- model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
5252+ model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
53535454 return model
5555···7979 ## Define the model start and end
8080 model = Model(inputs=inputs, outputs=classif)
8181 # Optimizer recommended Adadelta values (lr=0.01)
8282- model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
8282+ model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
83838484 return model
8585···107107108108 ## Define the model start and end
109109 model = Model(inputs=inputs, outputs=classif)
110110- model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
110110+ model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
111111112112 return model
113113···115115AlexNet architecture
116116'''
117117def AlexNet():
118118- inputs = Input(shape=(3, 64, 64))
118118+ #inputs = Input(shape=(3, 64, 64))
119119120120121121 return model