2013-03-03 9 views
1

Je viens d'écrire un simple réseau de neurones en python comme je les ai étudiés récemment. J'utilise backpropogation. La fonction d'activation est un sigmoïde. Les entrées et les poids génèrent aléatoirement et la sortie idéale est 0. Je suis nouveau sur python, donc le code est écrit très inefficacement, mais il est lisible. Quand je cours le code, la sortie est toujours zéro et je n'arrive pas à trouver pourquoi. De plus, je n'ai pas utilisé de module.Réseau neuronal virtuel en Python

from random import uniform 
input_one_input_value = 1 
input_two_input_value = 0 
bias_value = 1 

#use global to globalize function-based variables 
#use lists to store data in future 
def hidden_One(): 
    global weighted_sum_hidden_one 
    weighted_sum_hidden_one = (input_one_input_value * weights[0]) + (input_two_input_value * weights[1]) + (bias_value * weights[2]) 
    hidden_one_output = activation(weighted_sum_hidden_one) 
    return hidden_one_output 

def hidden_Two(): 
    global weighted_sum_hidden_two 
    weighted_sum_hidden_two = (input_one_input_value * weights[3]) + (input_two_input_value * weights[4]) + (bias_value * weights[5]) 
    hidden_two_output = activation(weighted_sum_hidden_two) 
    return hidden_two_output 

def output_One(): 
    weighted_sum = (hidden_One() * weights[6]) + (hidden_Two() * weights[7]) + (bias_value * weights[8]) 
    return activation(weighted_sum) 

def activation(x): 
    sigmoid_value = 1/(1+(2.71828 ** x)) 
    return sigmoid_value 

def calculate_gradient(): 
    E = ideal_output - actual_output 
    output_delta = -E * activation(weights[6] + weights[7] + weights[8]) 
    h1_delta = activation(weighted_sum_hidden_one) * weights[6] * output_delta 
    h2_delta = activation(weighted_sum_hidden_two) * weights[7] * output_delta 
    b2_delta = activation(bias_value) * weights[8] * output_delta 
    i1_delta = activation(input_one_input_value) * ((weights[0] * h1_delta) + (weights[3] * h2_delta)) 
    i2_delta = activation(input_one_input_value) * ((weights[1] * h1_delta) + (weights[4] * h2_delta)) 
    b1_delta = activation(bias_value) * ((weights[2] * h1_delta) + (weights[5] * h2_delta)) 
    global w1_gradient 
    global w2_gradient 
    global w3_gradient 
    global w4_gradient 
    global w5_gradient 
    global w6_gradient 
    global w7_gradient 
    global w8_gradient 
    global w9_gradient 
    w1_gradient = input_one_input_value * h1_delta 
    w2_gradient = input_two_input_value * h1_delta 
    w3_gradient = bias_value * h1_delta 
    w4_gradient = input_one_input_value * h2_delta 
    w5_gradient = input_two_input_value * h2_delta 
    w6_gradient = bias_value * h2_delta 
    w7_gradient = hidden_One() * output_delta 
    w8_gradient = hidden_Two() * output_delta 
    w9_gradient = bias_value * output_delta 


def backpropogation(): 
    E = .7 #learning rate 
    a = .3 #momentum to prevent settling for local minima 
    global weightchanges_previous 
    global weight_change 
    weightchanges_previous = [] 
    weight_change = [] 
    if len(weightchanges_previous) == 0: 
     weight_change.append((E * w1_gradient)) 
     weight_change.append((E * w2_gradient)) 
     weight_change.append((E * w3_gradient)) 
     weight_change.append((E * w4_gradient)) 
     weight_change.append((E * w5_gradient)) 
     weight_change.append((E * w6_gradient)) 
     weight_change.append((E * w7_gradient)) 
     weight_change.append((E * w8_gradient)) 
     weight_change.append((E * w9_gradient)) 
     weightchanges_previous.append(weight_change[0]) 
     weightchanges_previous.append(weight_change[1]) 
     weightchanges_previous.append(weight_change[2]) 
     weightchanges_previous.append(weight_change[3]) 
     weightchanges_previous.append(weight_change[4]) 
     weightchanges_previous.append(weight_change[5]) 
     weightchanges_previous.append(weight_change[6]) 
     weightchanges_previous.append(weight_change[7]) 
     weightchanges_previous.append(weight_change[8]) 

    elif len(weightchanges_previous) != 0: 
     weight_change[0] = (E * w1_gradient) + (a * weightchanges_previous[0]) 
     weight_change[1] = (E * w2_gradient) + (a * weightchanges_previous[1]) 
     weight_change[2] = (E * w3_gradient) + (a * weightchanges_previous[2]) 
     weight_change[3] = (E * w4_gradient) + (a * weightchanges_previous[3]) 
     weight_change[4] = (E * w5_gradient) + (a * weightchanges_previous[4]) 
     weight_change[5] = (E * w6_gradient) + (a * weightchanges_previous[5]) 
     weight_change[6] = (E * w7_gradient) + (a * weightchanges_previous[6]) 
     weight_change[7] = (E * w8_gradient) + (a * weightchanges_previous[7]) 
     weight_change[8] = (E * w9_gradient) + (a * weightchanges_previous[8]) 
     while len(weightchanges_previous) > 0 : weightchanges_previous.pop() 
     weightchanges_previous.append((E * w1_gradient) + (a * weightchanges_previous[0])) 
     weightchanges_previous.append((E * w2_gradient) + (a * weightchanges_previous[1])) 
     weightchanges_previous.append((E * w3_gradient) + (a * weightchanges_previous[2])) 
     weightchanges_previous.append((E * w4_gradient) + (a * weightchanges_previous[3])) 
     weightchanges_previous.append((E * w5_gradient) + (a * weightchanges_previous[4])) 
     weightchanges_previous.append((E * w6_gradient) + (a * weightchanges_previous[5])) 
     weightchanges_previous.append((E * w7_gradient) + (a * weightchanges_previous[6])) 
     weightchanges_previous.append((E * w8_gradient) + (a * weightchanges_previous[7])) 
     weightchanges_previous.append((E * w9_gradient) + (a * weightchanges_previous[8])) 

def edit_weights(): 
    weights[0] += weight_change[0] 
    weights[1] += weight_change[1] 
    weights[2] += weight_change[2] 
    weights[3] += weight_change[3] 
    weights[4] += weight_change[4] 
    weights[5] += weight_change[5] 
    weights[6] += weight_change[6] 
    weights[7] += weight_change[7] 
    weights[8] += weight_change[8] 
    while len(weight_change) > 0 : weight_change.pop() 

weights = [] 
x = 0 
while x <=8: 
    weights.append(uniform(-10, 10)) 
    x += 1 

ideal_output = 0 
actual_output = output_One() 
print "Output %d" % output_One() 

x = 0 
while x <= 10: 
    output_One() 
    calculate_gradient() 
    backpropogation() 
    edit_weights() 
    print "Output %d" % output_One() 
    print "----------------------" 
    actual_output = output_One() 
    x += 1 

print "FINAL WEIGHTS:" 
print weights[0] 
print weights[1] 
print weights[2] 
print weights[3] 
print weights[4] 
print weights[5] 
print weights[6] 
print weights[7] 
print weights[8] 

Répondre

1

Votre problème est la ligne de sortie:

print "Output %d" % output_One() 

Vous utilisez %d qui imprime des valeurs entières il arrondit les valeurs Abaissement entier le plus proche (conversion entier). Utilisez %f à la place et vous devriez obtenir les nombres à virgule flottante correctement.

+0

@ user2127869 Est-ce que cela répondait à votre question? Si c'est le cas, veuillez le marquer comme s'il vous plaît afin qu'il ne reste pas sans réponse. – nemo

+0

Ouais, merci! Je l'ai compris il y a quelques heures. – mackintosh18

+0

@ user2127869: Excellent! L'étape suivante consiste à accepter cette réponse. Comment cela est fait est décrit à http://stackoverflow.com/faq#howtoask. En bref: Cochez la case en dessous du compteur de réputation de la réponse que vous souhaitez accepter. – nemo