#PSYCO 452 Geometry of Delta learning #Delta rule learning of 3D vectors #This is a simple script that graphically demonstrates #learning of associations between pairs of 3D basis vectors #using the Delta rule # Author: Michael R.W. Dawson # This version: August 18, 2014 ###################################################### # Initialization of TBL patterns, learning rate, # and weight matrix ####################################################### #---------- Basis Vectors -- #Define 3 patterns to be used in learning #Note that each is a basis vector for 3D space! p1 <- matrix(c(1, 0, 0), nrow=3, ncol=1) #pattern 1 p2 <- matrix(c(0, 1, 0), nrow=3, ncol=1) #pattern 2 p3 <- matrix(c(0, 0, 1), nrow=3, ncol=1) #pattern 3 #---------- Linearly Independent Vectors -- #define 3 patterns to be used in learning #Note that these three are linearly independent vectors #p1 <- matrix(c(0.1, 0.2, 0.3), nrow=3, ncol=1) #pattern 1 #p2 <- matrix(c(0.4, -0.5, 0.6), nrow=3, ncol=1) #pattern 2 #p3 <- matrix(c(0.7, -0.9, -0.5), nrow=3, ncol=1) #pattern 3 #------------- Linearly Dependent Vectors #define 3 patterns to be used in learning #note that these three are linearly dependent vectors #p1 <- matrix(c(0.1, 0.2, 0.3), nrow=3, ncol=1) #pattern 1 #p2 <- matrix(c(0.4, 0.5, 0.6), nrow=3, ncol=1) #pattern 2 #p3 <- matrix(c(0.5, 0.7, 0.9), nrow=3, ncol=1) #pattern 3 #------------ Plotting Origin -- #define the origin to be used to plot network responses # as vectors in a 3D space origin <- matrix(c(0,0,0),nrow=1,ncol=3) #this is the origin #---- Define learning Rate #assign a value to the learning rate LearningRate LearningRate <- 0.1 #make LearningRate = 0.1 #------ Define maximum number of training sweeps MaxEpochs <- 150 #Create an initial weight matrix #---------- Tabula Rasa -- #Fill it with zeros -- tabula rasa! Wts <- matrix(c(0,0,0,0,0,0,0,0,0),nrow=3,ncol=3) #---------- Random noise to begin -- #or fill it with small random numbers -- noise! #Wts <- matrix(runif(9, min=-0.3, max=0.3),nrow=3,ncol=3) Wts #Make sure that the required R packages are installed #uncomment the two lines below if not! #install.packages("rgl") #install.packages("compositions") #Load the required libraries; they handle the graphing library("rgl") library("compositions") #check to see what we have! #display each pattern p1 p2 p3 #display LearningRate LearningRate #display intial connection weights Wts ############################################################### # Learn three different associations each epoch. After # # learning all three, test recall by presenting p1 as a cue. # # Plot the result to show the geometry of learning. # ############################################################### ################################################################ # Start by drawing a 3D space in which to put plotted vectors # ################################################################ open3d(windowRect = c(00,00, 800, 800)) #rgl graphics window #Use rgl to draw a coordinate system decorate3d(c(-1,1), c(-1,1), c(-1,1), xlab = "x", ylab = "y", zlab = "z", box = TRUE, axes = TRUE, main = NULL, sub = NULL) #Draw the three patterns in the system #Draw them in black #Each black vector is a correct output in our graph! vector <- t(p1) #must plot p1 as a row vector, so take transpose arrows3D(origin,vector,length=0.1,lwd=5,col="black") vector <- t(p2) #transpose p2 like p1 arrows3D(origin,vector,length=0.1,lwd=5,col="black") vector <- t(p3) #transpose p3 like p2 and p1 arrows3D(origin,vector,length=0.1,lwd=5,col="black") ######################################################### # With space drawn, let us do some learning. We will # test recall, and plot each of the recalled vectors in # the graph each sweep. How close does they come to the # correct responses drawn in black? ######################################################### #-------------------------------------- # Put learning and recall in a for loop for (epoch in 1:MaxEpochs) #loop until desired max reached { #begin for loop ########################################################## #Learn three different associations between pattern pairs: # p1 to cue p2 # p2 to cue p3 # p3 to cue p1 # Delta rule is used for all of this learning ########################################################## #----------------------------------------------------- #learn association 1: cue = p1, recall = p2 #first, what is recalled from current weights with p1 as cue? Recall <- Wts%*%p1 #premultipy column vector by weight matrix TminusO <- p2-Recall #compute error vector = T - O p1T <- t(p1) #transpose of p1 is a row vector #delta rule is Hebb learning of TminusO with cue! OuterProduct <- matrix(TminusO%o%p1T, nrow=3,ncol=3) #nuff said #Scale outer product by the learning rate DeltaW <- matrix(LearningRate*OuterProduct,nrow=3,ncol=3) #Add the weight changes to the existing weights Wts <- Wts + DeltaW #Association 1 has now been learned #------------------------------------------------------ #Learn association 2: cue = p2, recall = p3 #use the same method that was detailed above Recall <- Wts%*%p2 #premultipy column vector by weight matrix TminusO <- p3-Recall #compute error vector = T - O p2T <- t(p2) #transpose p2 OuterProduct <- matrix(TminusO%o%p2T, nrow=3,ncol=3) #nuff said DeltaW <- matrix(LearningRate*OuterProduct,nrow=3,ncol=3) Wts <- Wts + DeltaW #Association 2 has now been learned #------------------------------------------------------ #Learn association 3: cue = p3, recall = p1 #use the same method that was detailed above Recall <- Wts%*%p3 #premultipy column vector by weight matrix TminusO <- p1-Recall #compute error vector = T - O p3T <- t(p3) #transpose p2 OuterProduct <- matrix(TminusO%o%p3T, nrow=3,ncol=3) #nuff said DeltaW <- matrix(LearningRate*OuterProduct,nrow=3,ncol=3) Wts <- Wts + DeltaW #Association 3 has now been learned ############################################################ #Now that learning is done, let us test recall #Use each cue, and graph each recalled vector #Looking at the graph will show how the response changes #over time (i.e. over epochs) ############################################################ #------------------------------------------------------------ #first present p1 as a cue to the existing connections #activity is p1 premultiplied by Wts #Is p2 correctly recalled? Look at graph to see! Recall <- Wts %*% p1 #recall = Wts premultiplying cue vector #display recalled vector in the graph vector = t(Recall) #take transpose to make it a row vector arrows3D(origin,vector,length=0.1,lwd=5,col="red") #------------------------------------------------------------ #Repeat above with p2 as the cue Recall <- Wts %*% p2 #recall = Wts premultiplying cue vector #display recalled vector in the graph vector = t(Recall) #take transpose to make it a row vector arrows3D(origin,vector,length=0.1,lwd=5,col="blue") #------------------------------------------------------------ #Repeat above with p3 as the cue Recall <- Wts %*% p3 #recall = Wts premultiplying cue vector #display recalled vector in the graph vector = t(Recall) #take transpose to make it a row vector arrows3D(origin,vector,length=0.1,lwd=5,col="green") } #for loop ends here! #when for loop stops, so does learning and graphing!