\IfFileExists{../../../../../../dpi.inpe.br/banon-pc2@80/2006/11.01.13.53/doc/ISMM2007proceedings.cls}{% \documentclass[ams,twocolumn,extendedabstract]{../../../../../../dpi.inpe.br/banon-pc2@80/2006/11.01.13.53/doc/ISMM2007proceedings} }{% \IfFileExists{../../../../../dpi.inpe.br/banon-pc2@80/2006/11.01.13.53/doc/ISMM2007proceedings.cls}{% \documentclass[ams,twocolumn,extendedabstract]{../../../../../dpi.inpe.br/banon-pc2@80/2006/11.01.13.53/doc/ISMM2007proceedings} }{% \documentclass[ams,twocolumn,extendedabstract]{ISMM2007proceedings} } } \RequirePackage[english]{babel} \RequirePackage{tabularx} \RequirePackage{graphicx} \RequirePackage{subfigure} \RequirePackage{hyperref} \RequirePackage{amsrefs} \newcommand{\R}{\mathbb{R}} \newcommand{\F}{\mathbb{F}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\E}{\mathbb{E}} \newcommand{\G}{\mathbb{G}} \newcommand{\B}{\mathbb{B}} \newcommand{\N}{\mathbb{N}} \newcommand{\LL}{\mathbb{L}} \newcommand{\A}{\mathbf{A}} \newcommand{\X}{\mathbf{X}} \newcommand{\FX}{\mathcal{F}(\X)} \newcommand{\vetx}{\mathbf{x}} \newcommand{\vety}{\mathbf{y}} \newcommand{\ima}{\mathbf{a}} \newcommand{\imb}{\mathbf{b}} \newcommand{\imc}{\mathbf{c}} \newcommand{\ims}{\mathbf{s}} \newcommand{\bb}{\begin{equation}} \newcommand{\ee}{\end{equation}} %\newcommand{\bbb}{\begin{align}} %\newcommand{\eee}{\end{align} } \def\amax{\kern 0em\hbox{\rm \kern .25em\lower.1ex\hbox{\rlap{$\vee$}}\kern -.07em\lower.2ex\hbox{$\square$}\kern.25em}} \def\amin{\kern 0em\hbox{\rm \kern .25em\lower.1ex\hbox{\rlap{$\wedge$}}\kern -.07em\lower.2ex\hbox{$\square$}\kern.25em}} \def\dualimp{\kern 0em\hbox{\rm \kern .25em\lower.1ex\hbox{\rlap{$\Rightarrow$}}\kern 0em\lower-1.2ex\hbox{$\overline{\hspace{2ex}}$}\kern.25em}} \def\circmax{\kern 0em\hbox{\rm \kern .25em\lower.1ex\hbox{\rlap{$\vee$}}\kern -.18em\lower-.1ex\hbox{$\bigcirc$}\kern.25em}} \def\circmin{\kern 0em\hbox{\rm \kern .25em\lower.1ex\hbox{\rlap{$\wedge$}}\kern -.18em\lower-.0ex\hbox{$\bigcirc$}\kern.25em}} \hypersetup{colorlinks=true,linkcolor=black,citecolor=black,urlcolor=black,filecolor=black} \begin{document} \title{A brief introduction to a two-layer morphological associative memory based on fuzzy operations} \begin{Authors} \Author{Peter Sussner} \Affilref[UNICAMP] \Author{Estev\~ao Esmi Laureano} \Affil [UNICAMP] {State University of Campinas (UNICAMP), Brazil \\* \email{\{sussner,ra050652\}@ime.unicamp.br}}% institutional e-mail address \end{Authors} \section{Introduction} \label{sec:Introduction} Morphological neural networks (MNNs) are a class of artificial neural networks that perform one of the elementary operations of mathematical morphology at every node. Morphological associative memories (MAMs) are among the types of MNNs that have emerged in recent years. Unlike many other models of neural associative memory, MAMs have been proposed from the outset for the storage and recall of real-valued patterns. Nevertheless, our focus in this paper is on the binary case. %Notable features of auto-associative morphological memories (AMMs) incude optimal absolute storage capacity and one-step %convergence. On the downside, the original MAM models suffer from a large number of spurious memories and limited error %correction capability. Several improved MAM versions have been developped in order to overcome these difficulties. %For example, P. Sussner presented a two-layer MAM based on variations of the so-called kernel and dual kernel methods %and a MAM based on fuzzified operations. Both approaches preserve the extremely high storage capacity of %$2^n$ patterns (where $n$ denotes the length of the input patterns) of the original AMM models. The latter approach relies on the fuzzification of the dilations and erosions used in the %original AMM models followed by a defuzzification algorithm. This approach also %exhibits an excellent tolerance with respect to arbitrary noise and does not possess any spurious memories apart from a ground state. However, %the defuzzification scheme mentioned above can be viewed as post-processing and does not represent an intrinsic part of the %MAM model. In this paper, we present a new two-layer MAM model. The first layer executes fuzzy operations that incorporate information on the kernel vectors corresponding to the fundamental memories. The second layer uses this information on the kernel vectors in order to recall the desired output pattern. This approach can be applied to the auto-associative as well as to the hetero-associative case. Our new MAM model outperformed several well-known neural associative memory models %such as the projection recorded dynamic associative memory, the Hopfield net, and Kosko's bidirectional %associative memory as well as our previous MAM models in experiments concerning the error correction capability. \section{Matrix operations in Minimax Algebra} %and their Relationship to Mathematical Morphology \label{MM} The theories of {\em minimax algebra} and {\em mathematical morphology} are closely related although they were developed for completely different purposes. %In contrast to MM that focusses on the complete lattice structure of sets of images, minimax algebra investigates the interactions between the lattice sup/inf structure and the group structure of real addition or multiplication. However, minimax algebra fails to exploit the lattice structure to the level that MM has and these theories have neglected important concepts of MM such as adjunctions. One of the basic algebraic structures occurring in minimax algebra is called {\em blog} (bounded lattice ordered group). The set $\R_{\pm \infty} = \R \cup \{+ \infty, - \infty \}$ %i.e. the reals extended by the %the symbols $+ \infty$ and $- \infty$, together with the operations "maximum" ($\vee$), "minimum" ($\wedge$), "addition" ($+$), and "dual addition" ($+^{\prime}$), provides the canonical example of a blog. For the purposes of this paper, it often suffices to consider $\R$, the set of finite elements of $\R_{\pm \infty}$. %%The systems $(\R_{\pm \infty}, \vee, +)$ e %%$(\R_{\pm \infty}, \wedge, +^\prime)$ represent belts. %The additions $+$ and $+^\prime$ behave as expected and coincide in most cases with %the following exceptions: %\begin{eqnarray} %(+\infty) + (-\infty) = (-\infty) + (+\infty) = -\infty \, , \\ %(+\infty) +^\prime (-\infty) = (-\infty) +^\prime (+\infty) = +\infty \, . %\end{eqnarray} % %The theory of minimax algebra includes a theory of conjugation. The %conjugate of a certain algebraic structure $\E$ is denoted by $\E^*$. %An element $x \in \E$ corresponds to an element $x^* \in \E^*$ under the %isomorphism of conjugation $\E \longrightarrow \E^*$. The element $x^* \in %\E^*$ is called {\em conjugate} of $x$. In the special case where %$x \in \R_{\pm \infty}$, the conjugate $x^*$ is given by %\begin{equation} %x^* = \cases{ %-x & \; \; \mbox{ if } \; x \in \R \cr %-\infty & \; \; \mbox{ if } \; x=+\infty \cr %+\infty & \; \; \mbox{ if } \; x=-\infty \; . \cr} %\end{equation} %A matrix $A \in \R_{\pm \infty}^{m \times n}$ corresponds to a conjugate %matrix $A^* \in \R_{\pm \infty}^{n \times m}$ given by %\begin{equation} %(a^*)_{ij}=(a_{ji})^* \; . %\end{equation} % % %The model of associative memories described in this paper employs %products of matrices which are defined in %minimax algebra \cite{cuninghame-green95}. Two types of matrix products exist in minimax algebra~\cite{cuninghame-green95}. For $A \in \R^{m \times p}$ and $B \in \R_{\pm \infty}^{p \times n}$, the matrix $C=A \amax B$, also called the {\em max product} of $A$ and $B$, and the matrix $D=A \amin B$, also called the {\em min product} of $A$ and $B$, are defined by \begin{equation} \label{boxmax} c_{ij}= \bigvee \limits _{k=1}^{p} (a_{ik}+b_{kj}) \, , \; \; d_{ij}= \bigwedge \limits _{k=1}^{p} (a_{ik} + b_{kj}) \, . \end{equation} Let $A \in \R^{m \times n}$. Let $\varepsilon_A$ and $\delta_A$ be such that $\varepsilon_A(\vetx) = A \amin \vetx$ and $\delta_A(\vetx) = A \amax \vetx$ for all $\vetx \in \R_{\pm \infty}^n$. %Note that $\varepsilon_A$ and $\delta_A$ %associate elements of the {\em complete lattice} $\R_{\pm \infty}^{n}$ with elements of the {\em complete lattice} %$\R_{\pm \infty}^{m}$. Obviously, $\varepsilon_A$ represents an {\em erosion} and $\delta_A$ represents a {\em dilation} from the {\em complete lattice} $\R_{\pm \infty}^{n}$ into the {\em complete lattice} $\R_{\pm \infty}^{m}$. \section{Basic concepts of morphological associative memorie}\label{MAM} %Associative neural memories (or short associative memories) %are artificial neural networks which allow for the storage %and retrieval of information (vector patterns). %Unlike conventional %computer associative memories, neural network models serving as %associative memories, are generally capable of retrieving a complete %output pattern ${\bf y}$ even if the input pattern ${\bf x}$ is %corrupted or incomplete. The purpose of {\em auto-associative} %memories is the retrieval of ${\bf x}$ from corrupted %or incomplete versions ${\tilde {\bf x}}$. If an artificial %associative memory stores associations $({\bf x},{\bf y})$, %where ${\bf x}$ cannot be viewed as a corrupted or incomplete version %of ${\bf y}$, then we speak of a {\em hetero-associative} memory. %Mathematically speaking, an associative memory describes a relation % $R \subseteq {\R}^{m} \times {\R}^{n}$. If $({\bf x}^{\xi},{\bf y}^{\xi}) %\in R$ for $\xi = 1, \ldots , k$, %i.e. if the input ${\bf x}^{\xi}$ produces the %output ${\bf y}^{\xi}$, then %the associative memory is said to {\em store} or {\em record} the %{\em memory associations} %$({\bf x}^{1},{\bf y}^{1}), \ldots , ({\bf x}^{k},{\bf y}^{k})$. Suppose that we wish to record $k$ vector pairs $\left( { {\bf x}^{1}, {\bf y}^{1}} \right) , \, \ldots \, , \, \left( { {\bf x}^{k}, {\bf y}^{k}} \right)$ using a {\em morphological associative memory} (MAM). Let $X = [ {\bf x}^{1}, \ldots , {\bf x}^{k} ]$ denote the matrix whose columns are the input patterns ${\bf x}^{1}, \, \ldots \, , \, {\bf x}^{k}$. Similarly, let $Y = [ {\bf y}^{1}, \ldots , {\bf y}^{k} ]$. We introduced two basic morphological memory models. %Both models %use a recording recipe which is similar to correlation recording %\cite{anderson72,kohonen72,nakano72}. The first approach consists of constructing an $m \times n$ matrix $W_{XY}$ as follows: \begin{equation} W_{XY}= Y \amin (-X)^{t} = \bigwedge \limits_{\xi =1}^{k} {{\bf y}^{\xi} \amin {-({\bf x}^{\xi })}^{t}} \,. \end{equation} \noindent The second, dual approach consists of constructing an $m \times n$ matrix $M_{XY}$ of the form \begin{equation} M_{XY}= Y \amax (-X)^{t} = \bigvee \limits _{\xi =1}^{k} {{\bf y}^{\xi } \amax {(-{\bf x}^{\xi })}^{t}} \,. \end{equation} If the matrix $W_{XY}$ receives a vector ${\bf x}$ as input, the product $W_{XY} \amax {\bf x}$ is formed. Dually, if the matrix $M_{XY}$ receives a vector ${\bf x}$ as input, the product $M_{XY} \amin {\bf x}$ is formed. %MAMs can be used to store real-valued or binary patters. In this paper, our focus %is on {\em binary MAMs}. If $X=Y$, %(i.e., ${\bf y^{\xi }= \, } {\bf x}^{\xi }$, for $\xi =1, %\, \ldots \, , \, k$), we obtain the {\em autoassociative morphological memories} (AMMs) $W_{XX}$ and $M_{XX}$. The properties of AMMs include an optimal absolute storage capacity and one-step convergence. %\begin{enumerate} %\item The absolute storage capacity of a binary AMM is $2^{n}$ %if the patterns to be stored have length $n$. %\item The output pattern remains stable under repeated applications %of binary AMMs ("one-step convergence"). %%\item The fixed points of both $W_{XX}$ and $M_{XX}$ include %%the original patterns as well as a large number of spurious states. %%\item The memory $W_{XX}$ exhibits tolerance with respect %%to eroded, i.e. incomplete patterns, and the %%memory $M_{XX}$ exhibits tolerance with respect %%to dilated patterns. %%\item Both $W_{XX}$ as well as $M_{XX}$ are not suited for patterns %%corrupted by both erosive and dilative noise. %\end{enumerate} \noindent {\bf{Example.}} \hspace{.1cm} %We provide a simple example to illustrate the observations above. %Consider the ten pattern images $ %{\bf p}^{1}, \, \ldots \, , \, %{\bf p}^{10}$ shown in Figure 1. %%(numbered from left %%to right and from top to bottom). %Using the standard row-scan method, each pattern image %${\bf p}^{\xi }$ can be converted into a pattern vector %${\bf x}^{\xi } \in \{0, 1 \}^{49}$. We used the ten pattern vectors ${\bf x}^{1}, \, \ldots \, , \, {\bf x}^{10} \in \{0, 1 \}^{49}$ corresponding to the images in Figure 1 in constructing the morphological memories $W_{XX}$ and $M_{XX}$. As expected each individual pattern vector ${\bf x}^{\xi}$ was perfectly recalled in a single application of either $W_{XX}$ or $M_{XX}$ and remains stable under renewed applications of either $W_{XX}$ or $M_{XX}$. \begin{figure} \includegraphics[width=.95\columnwidth]{wow} \caption{Ten binary patterns.} \label{fig:vowels} %The ten patterns used in constructing $W_{XX}$ and $M_{XX}$. The %output of $W_{XX}$ and the output of $M_{XX}$ %is identical to the input pattern.} \end{figure} %The same ten patterns served as %bipolar inputs to a discrete Hopfield net. Due to the %considerable amount of overlap between the patterns, %the Hopfield net failed to recall any of the original %patterns. %The tolerance of the memory $W_{XX}$ with respect to incomplete %patterns is exemplified in Figure 2. This type of robustness is due to the %fact that the original pattern %is the smallest fixed point $\geq$ the input pattern. Dually, the %memory $M_{XX}$ is very robust with respect to dilative changes in %the patterns. %\begin{figure} %\label{xeroded} %$\includegraphics[width=.95\columnwidth]{missingparts}$ %\caption{ %Examples of corrupted input patterns which were %perfectly recalled as the letter images "${\bf A}$", "${\bf E}$", %and "${\bf I}$" by %the morphological memory %$W_{XX}$ ($X$ consists of the patterns shown in Figure 1).} %\end{figure} \section{A new MAM based on fuzzy operations and the kernel method}\label{NewMAM} The AMMs $W_{XX}$ e $M_{XX}$ suffer from a large number of spurious memories and limited error correction capability~\cite{sussner03a}. % %\benu %\item Both models generate a large number of spurious memories. %\item The AMM $W_{XX}$ only exhibits tolerance with respect to erosive noise and %$M_{XX}$ only exhibits tolerance with respect to erosive noise. %\eenu Recently, some modified MAM models have been proposed in order to overcome these difficulties. For example, Sussner introduced a two-layer binary MAM %(for the auto-associative as well as %for the hetero-associative case) that yields a greatly reduced number of spurious memories and an improved error correction capability~\cite{sussner03a}. Another approach is based on either one of the following input-output schemes~\cite{sussner03b}: %consists in incorporating fuzzy operations in the recall phase of binary AMMs %\cite{sussner03b}. This procedure yields a MAM with binary inputs $\vetx$ and fuzzy outputs $\vety$. %The fuzzy pattern $\vety$ is defuzzified by means of a certain algorithm %${\mathcal T}$ or ${\mathcal T}^\prime$ which produces an approximation of the desired %uncorrupted pattern. The input-output schemes of Equations \ref{fuzzAMM} and \ref{dualfuzzAMM} summarize the funcionality of the %resulting binary AMMs. We use the symbol FAMMT ("fuzzy autoassociative morphological %memory with thresholding") to denote the model given by Equation \ref{fuzzAMM}. % \begin{eqnarray} \label{fuzzAMM} {\bf x} \, \longrightarrow \, W_{XX} {\tilde \amax} {\bf x} \, \longrightarrow \, {\rm Defuzz.} \, \longrightarrow \, {\bf y} \, , \\ \label{dualfuzzAMM} {\bf x} \, \longrightarrow \, M_{XX} {\tilde \amin} {\bf x} \, \longrightarrow \, {\rm Defuzz.} \, \longrightarrow \, {\bf y} \, . \end{eqnarray} Equations~\ref{fuzzAMM} and \ref{dualfuzzAMM} involve the fuzzy max product $\tilde{\amax}$ and the fuzzy min product $\tilde{\amin}$~\cite{sussner03b}. %which are defined as follows: %\begin{df} %Let $W \in \{ -1, 0 \}^{n \times n}$ and $M \in \{ 0, 1 \}^{n \times n}$. Let %${\mathbf w}_i^t$, ${\mathbf m}_i^t$ respectively, denote the transpose of the $i$-th row %of $W$, $M$ respectively. %Let ${\bf 0} \neq {\bf x} \in \{ 0, 1 \}^{n}$. If ${\mathcal C}({\mathbf m}_i^t)$, the complement of ${\mathbf m}_i^t$, %differs from ${\bf 0} \in \{ 0, 1 \}^{n}$ for all $i = 1, \ldots , n$, then we define $W {\tilde \amax} {\bf x}$, %the {\em fuzzy max product} of $W$ and ${\bf x}$, and %$M {\tilde \amin} {\bf x}$, %the {\em fuzzy min product} of $M$ and ${\bf x}$, as follows. %\begin{eqnarray} %\label{fuzzyamax} %(W {\tilde \amax} {\bf x})_{i} = {\cal SP}({\bf x},-{\bf w}_i^t) %\;, \\ %\label{fuzzyamin} %%(M {\tilde \amin} {\bf x})_{i} = {\cal S}({\cal C}{\bf m}_i^t), {\bf x}) %(M {\tilde \amin} {\bf x})_{i} = {\cal S}({\overline {\bf m}}_i^t, {\bf %y}) %\, , \\ \nonumber %%\forall \, i = 1, \; \ldots \; , n \, , %\end{eqnarray} %where %\begin{eqnarray} %{\cal SP}({\bf x},{\bf y}) = %\frac{ \sum_{i=1}^{n} 0 \vee (x_{i}-y_{i})}{\sum_{i=1}^{n} x_{i}} %\, , \\ %{\cal S}({\bf x},{\bf y})= 1 - {\cal SP}({\bf x},{\bf y}) \, . %%\forall \vetx, \vety \in \{0, 1} \, \mbox{such that} \, \vetx \neq {\mathbf 0} \, . %\end{eqnarray} %\end{df} The new two-layer MAM model that we introduce in this paper is based on a combination of the two approaches mentioned above~\cite{sussner03a,sussner03b}. %The hidden layer, that can be implemented using MaxNet \cite{yen92}, computes $h(\vetx)$ %where $h_i(\vetx) = 1 \doubleleftrightarrow x_i \geq \bigvee_{j=1}^n x_j$. %In the hidden layer, we apply a fuzzy min product followed by the following %thresholding function $h:\R^n\longrightarrow\{0,1\}^n$: % %\bb h_i(\vetx) = \left\{ \begin{array}{cl} 1 & \mbox{ if } %\;\;\; x_i \geq \bigvee_{j=1}^n x_j \\ 0 & \mbox{else.} \end{array} %\right \, . \ee Let $\{(\vetx^\xi,\vety^\xi): \xi=1,\ldots,k \}$ be the set of fundamental memories, where $\vetx^\xi \in \{0,1\}^n$ and $\vety^\xi \in \{0,1\}^m$. %The symbol $X$ denotes the matrix in $ \{0,1\}^{n \times k}$ whose columns are the vectors $\vetx^\xi$ %and the symbol $Y$ denotes the matrix in $\{0,1\}^{m \times k}$ whose columns are the vectors $\vety^\xi$. Let $Z$ be a matrix of the form $[\mathbf{z}^1, \mathbf{z}^2,\ldots,\mathbf{z}^k ] \in \{0,1\}^{p \times k}$ such that the columns $\mathbf{z}^\xi$ satisfy the conditions $\mathbf{z}^\xi \not\leq \mathbf{z}^\gamma$ and $\mathbf{z}^\xi \wedge \mathbf{z}^\gamma = \bf{0}$ for all $\gamma \not= \xi$. %The vectors $\mathbf{z}^\xi$ represent intermediary patterns. The following equations determine the recording phase of the proposed two-layer MAM. Recall that the symbol $M_X^{XZ}$ denotes $M_{XZ} \amin M_{XX}$~\cite{sussner03a}. % \bb \label{EqMAM1} % \, \, \, {\mathbf w} = h(M_{X}^{XZ} \tilde{\amin} \vetx) % \,, \ee % \bb \label{EqMAM2} % \vety = W_{ZY} \amax {\mathbf w} % \,.\ee \begin{eqnarray} \label{EqMAM1} \, \, \, {\mathbf w} = h(M_{X}^{XZ} \tilde{\amin} \vetx) \\ \vety = W_{ZY} \amax {\mathbf w} \label{EqMAM2} \,.\end{eqnarray} Here, we employed $h_i(\vetx) = 1 \Leftrightarrow x_i \geq \bigvee_{j=1}^n x_j$. %Note that $M_{X}^{XZ} \tilde{\amin} \vetx$ %\in [0,1]^{k}$ %yields a fuzzy vector and that an application %of $h$ defuzzifies this vector. \section{Experimental results} \begin{figure} \begin{center} \includegraphics[width=.95\columnwidth]{result} \caption{Percentage of Perfect Recall.} \label{fig:result} \end{center} \end{figure} We used an experiment from the literature to test our new model~\cite{sussner03b,oh94}. Consider the ten images $\vetx^1, \ldots, \vetx^{10}$ corresponding to the images of Figure~\ref{fig:vowels}. We stored the five associations $({\bf x}^1,{\bf x}^1), \ldots , ({\bf x}^5, {\bf x}^5)$ using $W_{XX}$, a discrete Hopfield net, and a projection-recorded DAM~\cite{hassoun93}. Moreover, we stored $({\bf x}^1,{\bf x}^6), \ldots , ({\bf x}^5, {\bf x}^{10})$ using a correlation-recorded BAM~\cite{hassoun93}, the morphological model given by Equation~\ref{fuzzAMM} (FAMMT), and our new MAM model given by Equations~\ref{EqMAM1} and \ref{EqMAM2} for the special case where $Z = I \in \{0,1\}^{k \times k}$. We introduced random noise into each of the uppercase vowels %${\bf p}^{\xi}$, where ${\xi} = 1, \ldots , 5$ by randomly reversing each pixel with probability $p$. %This process yields vectors ${\tilde {\bf x}}^{\xi}$ that represent corrupted versions %of the original vectors $\vetx^\xi$. Figure~\ref{fig:result} shows the mean percentage of perfect recalls of ${\bf x}^{\xi}$ for each probability $p$ in $1000$ experiments for each $\xi$. %Note that our new two-layer MAM model outperformed all other models we considered in this %experiment. \begin{bibsection} \begin{biblist}[\resetbiblist{7}] %\bib{heijmans}{book}{ % author={H.J.A.M. Heijmans}, % title={Morphological Image Operators}, % publisher={Academic Press}, % address={New York, NY}, % date={1994}, %} \bib{cuninghame-green95}{book}{ author = {R. Cuninghame-Green}, title = {Minimax Algebra and Applications}, booktitle = {Advances in Imaging and Electron Physics}, editor = {P. Hawkes}, volume = {90}, pages = {1\ndash 121}, year = {1995}, publisher = {Academic Press}, address = {New York, NY} } \bib{hassoun93}{book}{ author={M. H. Hassoun}, title={Dynamic Associative Neural memories}, publisher={Oxford University Press}, address={Oxford, U.K.}, date={1993}, booktitle={Associative Neural Memories: Theory and Implemantation}, editor={M. H. Hassoun} } \bib{oh94}{article}{ author={H. Oh}, author = {S. C. Kothari}, title={Adaptation for Learning in Bidirectional Associative Memory}, date={1994}, journal={IEEE Transactions on Neural Networks}, volume={5}, number ={4}, pages={576\ndash 583}, month ={july} } \bib{ritter98}{article}{ author={G. X. Ritter}, author={P. Sussner}, author={J. L. Diaz de Leon}, title={Morphological Associative Memories}, date={1998}, journal={IEEE Transactions on Neural Networks}, volume={9}, number ={2}, pages={281\ndash 293} } \bib{sussner03a}{article}{ author={P. Sussner}, title={Associative morphological memories based on variations of the kernel and dual kernel methods}, date={2003}, journal={Neural Networks}, volume={16}, number ={8}, pages={625\ndash 632}, month ={july} } \bib{sussner03b}{article}{ author={P. Sussner}, title={Generalizing Operations of Binary Morphological Autoassociative Memories using Fuzzy Set Theory}, date={2003}, journal={Journal of Mathematical Imaging and Vision}, volume={9}, number ={2}, pages={81\ndash 93}, month ={sep} } %\bib{hopfield82}{article}{ %author={J. J. Hopfield}, %title={Neural Networks and Physical Systems with Emergent Collective Computational Abilities}, %date={1982}, %journal={Proceedings of the National Academy of Sciences}, %volume={79}, %pages={2554\ndash 2558}, %month ={apr} %} %\bib{yen92}{article}{ %author={J.C. Yen and S. Chang}, %title={Improved Winner Take-All Neural Network}, %date={1992}, %journal={Electronics Letters}, %volume={28}, %number ={7}, %pages={662 \ndash 664}, %month ={march} %} %\bib{kosko88}{article}{ %author={B. Kosko}, %title={Bidirectional Associative Memories}, %date={1988}, %journal={IEEE Transactions on Systems, Man, and Cybernetics}, %volume={18}, %pages={49\ndash 60} %} %\bib{kosko92}{book}{ % author = {B. Kosko}, % title = {Neural Networks and Fuzzy Systems: A Dynamical Systems Approach to Machine Intelligence}, % publisher = {Prentice Hall}, % address = {Englewood Cliffs, N.J.}, % year = {1992}, %} % % \bib{kohonen84}{book}{ % author={Kohonen, T.}, % title={Self-organization and associative memory}, % publisher={Springer Verlag}, % date={1984}, % } \end{biblist} \end{bibsection} \end{document}