\relax \@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}} \newlabel{sec:introduction}{{1}{1}} \citation{Quinlan1992} \@writefile{toc}{\contentsline {section}{\numberline {2}Results}{2}} \newlabel{sec:results}{{2}{2}} \@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Finding the optimum parameters}{2}} \@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces The percentages of correct classifications for different \textit {k} values. Although the values are very close to one another, the highest correct classification percentage is attained at \textit {k=5}.}}{3}} \newlabel{fig:kvalues}{{1}{3}} \@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Correct classification rates for different trees. Unlike \textit {k}-NN, different trees perform much differently depending on how deep they are allowed to grow. For example, in this example the deepest tree is the one with at most $50$ instances in the leaf nodes. We will use this tree as the optimum tree in our comparisons.}}{3}} \newlabel{fig:tree}{{2}{3}} \@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Comparison of algorithms with their optimum parameters according to LOOCV. With these settings adopted, the best performing algorithm for SPLICE dataset turns out to be M5. In other words, tree learner performs better than a linear regression based learner or a similarity-based learner.}}{3}} \newlabel{fig:comparison}{{3}{3}} \@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Affect of Variable Selection on Performance}{4}} \@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces The variables selected by Wrapper.}}{4}} \newlabel{fig:wrapper}{{4}{4}} \bibstyle{abbrv} \bibdata{myref} \@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces The number/percentage of correct classifications after variable selection and performance comparison before and after variable selection through Wrapper. Note that variable selection improved the performance in all cases. However, for linear regression and M5, the improvements are negligable, whereas for \textit {k}-NN, the improvement is close to $3\%$.}}{5}} \newlabel{fig:after-wrapper}{{5}{5}} \@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Affect of Cross Validation on Performance}{5}} \@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Comparison of performance between LOOCV and 10FCV.}}{5}} \newlabel{fig:folds}{{6}{5}}