Je vivais assez peu d'erreurs (OOM, problèmes de forme, etc.) que j'avais réussi à réparer en quelque sorte.logits de InvalidArgumentError et les étiquettes doivent être de la même taille: logits_size = [3215,25] labels_size = [10,25]
Mais je ne peux pas obtenir ma tête autour de cette erreur. J'ai beaucoup cherché et j'ai aussi essayé l'entropie croisée avec la méthode logits dans tensorflow et la fonction tf.squeeze également mais cela ne m'a pas aidé à résoudre cette erreur. Voici le lien du code (c'est un github avec toute la stacktrace et les erreurs).
Voici le lien pour l'ensemble de données (Il est à environ 500 Mb)
Voici le code (juste au cas):
from PIL import Image
import numpy as np
import glob
from numpy import array
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
import h5py
import tensorflow as tf
def loading_saving_image_as_grayscale_train(img):
##combined_path='M:/PycharmProjects/AI+DL+CP/test_img'+img
loading=Image.open(img)
loading=loading.resize((28,28),Image.ANTIALIAS)
loading=loading.convert('L')
#loading.show()
conversion_to_array=np.asarray(loading,dtype=float)
train_data.append(conversion_to_array)
def loading_saving_image_as_grayscale_test(img):
#combined_path = 'M:/PycharmProjects/AI+DL+CP/train_img/' + img
#print(combined_path)
loading=Image.open(img,'r')
loading=loading.resize((28,28),Image.ANTIALIAS)
loading=loading.convert('L')
conversion_to_array=np.asarray(loading,dtype=float)
test_data.append(conversion_to_array)
import os
import requests, zipfile, io
import pandas as pd
#url = requests.get('https://he-s3.s3.amazonaws.com/media/hackathon/deep-learning-challenge-1/identify-the-objects/a0409a00-8-dataset_dp.zip')
#data = zipfile.ZipFile(io.BytesIO(url.content))
#data.extractall()
#os.listdir()
dataframe1=pd.read_csv('test.csv')
dataframe1.index=dataframe1.index+1
only_index=dataframe['image_id']
test_data=[]
train_data=[]
train=glob.glob('train_img/*.png')
test=glob.glob('test_img/*.png')
#other=loading_saving_image_as_grayscale('M:/PycharmProjects/AI+DL+CP/test_img/test_1000b.png')
#print(Image.open('M:/PycharmProjects/AI+DL+CP/test_img/test_1000b.png'))
#print(test)
#loading_sample=Image.open('M:/PycharmProjects/AI+DL+CP/test_img/test_1000b.png')
#loading_sample.show()
#print(train)
#print(test)
for data in train:
#print(data)
loading_saving_image_as_grayscale_train(data)
for item in test:
#print(item)
loading_saving_image_as_grayscale_test(item)
#print(train_data)
#print(test_data)
'''with Image.fromarray(train_data[1]) as img:
width,height=img.size
print(width,height)
'''
def OneHot(label,n_classes):
label=np.array(label).reshape(-1)
label=np.eye(n_classes)[label]
return label
dataframe=pd.read_csv('train.csv')
train_data=np.asarray(train_data)
test_data=np.asarray(test_data)
uni=dataframe['label']
dataframe1=pd.read_csv('test.csv')
dataframe1.index=dataframe1.index+1
only_index=dataframe['image_id']
label=LabelEncoder()
integer_encoding=label.fit_transform(uni)
#del uni
#del dataframe
#print(integer_encoding)
binary=OneHotEncoder(sparse=False)
integer_encoding=integer_encoding.reshape(len(integer_encoding),1)
onehot=binary.fit_transform(integer_encoding)
train_data=np.reshape(train_data,[-1,28,28,1])
test_data=np.reshape(test_data,[-1,28,28,1])
#onehot=np.reshape(onehot,[-1,10])
train_data=np.transpose(train_data,(0,2,1,3))
test_data=np.transpose(test_data,(0,2,1,3))
train_data=train_data.astype(np.float32)
test_data=test_data.astype(np.float32)
print(train_data.shape,test_data.shape,onehot.shape)
graph = tf.Graph()
with graph.as_default():
# placeholders for input data batch_size x 32 x 32 x 3 and labels batch_size x 10
data_placeholder = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
label_placeholder = tf.placeholder(tf.int32, shape=[None, 25])
# defining decaying learning rate
global_step = tf.Variable(0)
decay_rate = tf.train.exponential_decay(1e-4, global_step=global_step, decay_steps=10000, decay_rate=0.97)
layer1_weights = tf.Variable(tf.truncated_normal([3, 3, 1, 64],stddev=0.1))
layer1_biases = tf.Variable(tf.constant(0.1, shape=[64]))
layer2_weights = tf.Variable(tf.truncated_normal([3, 3, 64,32],stddev=0.1))
layer2_biases = tf.Variable(tf.constant(0.1,shape=[32]))
layer3_weights = tf.Variable(tf.truncated_normal([2, 2, 32, 20],stddev=0.1))
layer3_biases = tf.Variable(tf.constant(0.1,shape=[20]))
layer4_weights = tf.Variable(tf.truncated_normal([20,25],stddev=0.1))
layer4_biases = tf.Variable(tf.constant(0.1,shape=[25]))
layer5_weights = tf.Variable(tf.truncated_normal([25, 25], stddev=0.1))
layer5_biases = tf.Variable(tf.constant(0.1, shape=[25]))
def layer_multiplication(data_input_given):
#Convolutional Layer 1
#data_input_given=np.reshape(data_input_given,[-1,64,64,1])
CNN1=tf.nn.relu(tf.nn.conv2d(data_input_given,layer1_weights,strides=[1,1,1,1],padding='SAME')+layer1_biases)
print('CNN1 Done!!')
#Pooling Layer
Pool1=tf.nn.max_pool(CNN1,ksize=[1,4,4,1],strides=[1,4,4,1],padding='SAME')
print('Pool1 DOne')
#second Convolution layer
CNN2=tf.nn.relu(tf.nn.conv2d(Pool1,layer2_weights,strides=[1,1,1,1],padding='SAME'))+layer2_biases
print('CNN2 Done')
#Second Pooling
Pool2 = tf.nn.max_pool(CNN2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
print('pool2 Done')
#Third Convolutional Layer
CNN3 = tf.nn.relu(tf.nn.conv2d(Pool2, layer3_weights, strides=[1, 1, 1, 1], padding='SAME')) + layer3_biases
print('CNN3 Done')
#Third Pooling Layer
Pool3 = tf.nn.max_pool(CNN3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
print('Pool3 DOne')
#Fully Connected Layer
Pool4=tf.nn.max_pool(Pool3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
FullyCon=tf.reshape(Pool4,[-1,20])
FullyCon=tf.nn.relu(tf.matmul(FullyCon,layer4_weights)+layer4_biases)
print('Fullyconnected Done')
dropout = tf.nn.dropout(FullyCon, 0.4)
dropout=tf.reshape(dropout,[-1,25])
dropout=tf.matmul(dropout,layer5_weights)+layer5_biases
#print(dropout.shape)
return dropout
train_input = layer_multiplication(train_data)
print(train_input.shape)
loss = (tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label_placeholder,logits=train_input))
+ 0.01 * tf.nn.l2_loss(layer1_weights)
+ 0.01 * tf.nn.l2_loss(layer2_weights)
+ 0.01 * tf.nn.l2_loss(layer3_weights)
+ 0.01 * tf.nn.l2_loss(layer4_weights)
)
#other=(tf.squeeze(label_placeholder))
#print(tf.shape())
optimizer = tf.train.GradientDescentOptimizer(name='Stochastic', learning_rate=decay_rate).minimize(loss,global_step=global_step)
#print(train_input.shape)
batch_size = 10
num_steps=10000
prediction=[]
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for i in range(num_steps):
print("in loop")
offset = (i * batch_size) % (onehot.shape[0] - batch_size)
batch_data = train_data[offset:(offset + batch_size), :, :]
batch_labels = onehot[offset:(offset + batch_size), :]
print("training")
feed_dict = {data_placeholder: batch_data, label_placeholder: batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_input], feed_dict=feed_dict)
print(sess.run(tf.argmax(label_placeholder, 1), feed_dict={x:test_data}))
prediction.append(sess.run(tf.argmax(label_placeholder,1),feed_dict={x:test_data}))
print('Finished')
submit=pd.Dataframe({'image_id':only_index, 'label':prediction})
submit.to_csv('submit.csv',index=False)
I aussi eu un doute concernant la prédiction des étiquettes de classe. Quelqu'un peut-il me dire si la méthode que j'utilise pour stocker les étiquettes de classe prévues fonctionnera ou non?
va essayer ce conseils today.Any solution sur comment dois-je remodeler la dernière couche correctement afin d'éviter cette erreur? –
Pour la dernière opération de réorganisation, dois-je faire ce que vous avez fait ci-dessus (celui avec [-1,25])? –
La suggestion est dans la réponse - vous devriez être en mesure de lire les formes Pool4, et ce que vous avez besoin pour remodeler. La dernière transformation est en fait redondante, votre tenseur devrait déjà être de cette taille avant la main, si ce n'est pas le cas, alors quelque chose ne va pas dans les lignes précédentes. – lejlot