Vous êtes sur la page 1sur 2

import numpy as np

# sigmoid function
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))

# input dataset
X = np.array([[0],
[0.008403361],
[0.035714286],
[0.054621849],
[0.067226891],
[0.117647059],
[0.243697479],
[0.369747899],
[0.495798319],
[0.621848739],
[0.74789916],
[0.87394958],
[1]])

# output dataset
y = np.array([[0.965022422],
[0.425112108],
[0.557847534],
[0.569506726],
[0.624215247],
[0.64573991],
[0.704035874],
[0.753363229],
[0.852017937],
[0.914798206],
[0.941704036],
[0.959641256],
[1]])

# seed random numbers to make calculation


# deterministic (just a good practice)
np.random.seed(1)

# randomly initialize our weights with mean 0


syn0 = 2*np.random.random((1,3)) - 1
syn1 = 2*np.random.random((3,1)) - 1

for j in range(60000):

# Feed forward through layers 0, 1, and 2


l0 = X
l1 = nonlin(np.dot(l0,syn0))
l2 = nonlin(np.dot(l1,syn1))

# how much did we miss the target value?


l2_error = y - l2

if (j% 10000) == 0:
print(l2_error)
print ("Error in Training:" + str(np.mean(np.abs(l2_error))))
l2_delta = l2_error*nonlin(l2,deriv=True)

# how much did each l1 value contribute to the l2 error (according to the
weights)?
l1_error = l2_delta.dot(syn1.T)

# in what direction is the target l1?


# were we really sure? if so, don't change too much.
l1_delta = l1_error * nonlin(l1,deriv=True)

syn1 += l1.T.dot(l2_delta)
syn0 += l0.T.dot(l1_delta)

X_test = ([[0.004201681],
[0.031512605],
[0.039915966],
[0.06302521],
[0.071428571],
[0.180672269],
[0.306722689],
[0.432773109],
[0.558823529],
[0.68487395],
[0.81092437],
[0.93697479]])

y_test = ([[0.670852018],
[0.566816143],
[0.557847534],
[0.595515695],
[0.624215247],
[0.659192825],
[0.695067265],
[0.843049327],
[0.905829596],
[0.932735426],
[0.959641256],
[1]])

l0_test = X_test
l1_test = nonlin(np.dot(l0_test,syn0))
l2_test = nonlin(np.dot(l1_test,syn1))

l2_test_error = y_test - l2_test

print(l2_test_error)
print ("Error in Prediction:" + str(np.mean(np.abs(l2_test_error))))

Vous aimerez peut-être aussi