{\rm# GLOBALS: ``F'': frequeny tables; ``I'' : number of instances; # ``C'': how many classes?; ``N'': instances per class} {\bf function} update(class,train) {\rm# OUTPUT: changes to the globals.} {\rm# INPUT: a ``train''ing example containing attribute/value pairs # plus that case's ``class''} I++; {\bf if} (++N[class]==1){\bf then} C++{\bf fi} {\bf for} {\bf in} train {\bf if} (value != "?") {\bf then} F[class,attr,range]++{\bf fi} {\bf function} classify(test) {\rm# OUTPUT: ``what'' is the most likely hypothesis for the test case.} {\rm# INPUT: a ``test'' case containing attribute/value pairs.} k=1; m=2 {\rm # Control for Laplace and M-estimates.} like = -100000 {\rm # Initial, impossibly small likelihood.} {\bf for} H {\bf in} N {\rm# Check all hypotheses.} \{ prior = (N[H]+k)/(I+(k*C)) {\rm #\(\Leftarrow\)\(P(H)\)} temp = log(prior) {\bf for} {\bf in} attributes \{{\bf if} (value != "?") {\bf then} inc= F[H,attr,value]+(m*prior))/(N[H]+m) {\rm #\(\Leftarrow\)\(P(E\sb{i}{\given}H)\)} temp += log(inc) {\bf fi} \} {\bf if} (temp >= like){\bf then} like = temp; what=class{\bf fi} \} {\bf return} what