Last active
September 29, 2017 16:48
-
-
Save vgangireddyin/511242f3f0410d6525ddb5c4ea3044bd to your computer and use it in GitHub Desktop.
Basic Neuron Network Implementation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
""" | |
Created on Fri Apr 10 17:48:52 2015 | |
@author: gangireddy | |
Thanks to http://danielfrg.com/ for this basic neural network implementation, it is modified accordingly | |
""" | |
import math | |
import random | |
import numpy as np | |
class NN: | |
def __init__(self, NI, NH, NO, useinputs): | |
# number of nodes in layers | |
self.ni = NI + 1 # +1 for bias | |
self.nh = NH | |
self.no = NO | |
# initialize node-activations | |
self.ai, self.ah, self.ao = [],[], [] | |
self.ai = [1.0]*self.ni | |
self.ah = [1.0]*self.nh | |
self.ao = [1.0]*self.no | |
# create node weight matrices | |
self.wi = np.zeros((self.ni, self.nh)) | |
self.wo = np.zeros((self.nh, self.no)) | |
# initialize node weights to random vals | |
randomizeMatrix ( self.wi, -0.2, 0.2 ) | |
randomizeMatrix ( self.wo, -0.2, 0.2 ) | |
# create last change in weights matrices for momentum | |
self.ci = np.zeros((self.ni, self.nh)) | |
self.co = np.zeros((self.nh, self.no)) | |
self.ct = np.zeros(self.ni - 1) | |
#ltable | |
self.ltable = [0.0] * (self.ni - 1) | |
randomizeList(self.ltable, -0.2, 0.2) | |
self.useinputs = useinputs | |
def getInputs(self, x): | |
inputs = [0.0] * self.ni | |
for i in range(self.ni - 1): | |
if x[i]: | |
inputs[i] = self.ltable[i] | |
inputs[-1] = 1.0 | |
return inputs | |
def runNN (self, x): | |
if len(x) != self.ni-1: | |
print 'incorrect number of inputs' | |
#inputs = x | |
if not self.useinputs: | |
inputs = self.getInputs(x) | |
else: | |
inputs = x | |
for i in range(self.ni-1): | |
self.ai[i] = inputs[i] | |
for j in range(self.nh): | |
sum = 0.0 | |
for i in range(self.ni): | |
sum +=( self.ai[i] * self.wi[i][j] ) | |
self.ah[j] = sigmoid (sum) | |
for k in range(self.no): | |
sum = 0.0 | |
for j in range(self.nh): | |
sum +=( self.ah[j] * self.wo[j][k] ) | |
self.ao[k] = sigmoid (sum) | |
return self.ao | |
def backPropagate (self, x ,targets, N, M): | |
# http://www.youtube.com/watch?v=aVId8KMsdUU&feature=BFa&list=LLldMCkmXl4j9_v0HeKdNcRA | |
# calc output deltas | |
output_deltas = [0.0] * self.no | |
for k in range(self.no): | |
error = targets[k] - self.ao[k] | |
output_deltas[k] = error * dsigmoid(self.ao[k]) | |
# update output weights | |
for j in range(self.nh): | |
for k in range(self.no): | |
# output_deltas[k] * self.ah[j] is the full derivative of dError/dweight[j][k] | |
change = output_deltas[k] * self.ah[j] | |
self.wo[j][k] += N*change + M*self.co[j][k] | |
self.co[j][k] = change | |
# calc hidden deltas | |
hidden_deltas = [0.0] * self.nh | |
for j in range(self.nh): | |
error = 0.0 | |
for k in range(self.no): | |
error += output_deltas[k] * self.wo[j][k] | |
hidden_deltas[j] = error * dsigmoid(self.ah[j]) | |
#update input weights | |
temp = list(self.wi) | |
for i in range (self.ni): | |
for j in range (self.nh): | |
change = hidden_deltas[j] * self.ai[i] | |
#print 'activation',self.ai[i],'synapse',i,j,'change',change | |
self.wi[i][j] += N*change + M*self.ci[i][j] | |
self.ci[i][j] = change | |
#update ltable | |
if not self.useinputs: | |
for i in range(self.ni - 1): | |
change = 0 | |
if x[i]: | |
for j in range(self.nh): | |
change += temp[i][j] * hidden_deltas[j] | |
self.ltable[i] += N * change + M * self.ct[i] | |
self.ct[i] = change | |
# calc combined error | |
# 1/2 for differential convenience & **2 for modulus | |
error = 0.0 | |
for k in range(len(targets)): | |
error = 0.5 * (targets[k]-self.ao[k])**2 | |
return error | |
def weights(self): | |
print 'Input weights:' | |
for i in range(self.ni): | |
print self.wi[i] | |
print 'Output weights:' | |
for j in range(self.nh): | |
print self.wo[j] | |
print '' | |
def test(self, patterns): | |
inputs = patterns[0] | |
print 'Inputs:', patterns[0], '-->', self.runNN(inputs), '\tTarget', patterns[1] | |
def train (self, patterns, errorRate = 0.001, N=0.5, M=0.1): | |
for i in range(100): | |
inputs = patterns[0] | |
targets = patterns[1] | |
self.runNN(inputs) | |
error = self.backPropagate(inputs, targets, N, M) | |
#self.test(patterns) | |
#print 'final convergence error rate: ', error | |
def sigmoid (x): | |
return math.tanh(x) | |
# the derivative of the sigmoid function in terms of output | |
# proof here: | |
# http://www.math10.com/en/algebra/hyperbolic-functions/hyperbolic-functions.html | |
def dsigmoid (y): | |
return 1 - y**2 | |
def makeMatrix ( I, J, fill=0.0): | |
m = [] | |
for i in range(I): | |
m.append([fill]*J) | |
return m | |
def randomizeMatrix ( matrix, a, b): | |
for i in range ( len (matrix) ): | |
for j in range ( len (matrix[0]) ): | |
matrix[i][j] = random.uniform(a,b) | |
def randomizeList ( matrix, a, b): | |
for i in range ( len (matrix) ): | |
matrix[i] = random.uniform(a,b) | |
''' | |
if __name__ == "__main__": | |
pat = [ [[1,0], [-1]], [[0,1], [+1]] ] | |
myNN = NN ( 2, 2, 1) | |
for i in pat: | |
myNN.train(i, useInputs = True) | |
''' |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment