Je veux enregistrer le train et la perte de validation dans les fichiers csv dans tflearn, puis le recharger comme nous le faisons dans keras avec l'historique pour tracer des graphiques. S'il vous plaît aidez-moiAccéder à l'historique de formation du modèle dans TFLearn?
0
A
Répondre
1
Je ne suis pas sûr que cela aidera, mais vous pouvez enregistrer le modèle avec ce code: model.save('mnist.tflearn')
Après cela, quand vous le souhaitez, vous pouvez recharger le modèle. Voici un exemple pour recharger le modèle:
from __future__ import division, print_function, absolute_import
import tflearn.datasets.mnist as mnist
import csv
import tflearn
from tflearn.layers.core import input_data,dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
from tflearn.metrics import Accuracy
X, Y, test_x, test_y = mnist.load_data(one_hot=True)
shape = 28
X = X.reshape([-1, shape, shape, 1])
test_x = test_x.reshape([-1, shape, shape, 1])
###################################
# Image transformations
###################################
# normalisation of images
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
# Create extra synthetic training data by flipping & rotating images
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
###################################
# Define network architecture
###################################
# Input is a 32x32 image with 3 color channels (red, green and blue)
network = input_data(shape=[None, shape, shape, 1],
data_preprocessing=img_prep,
data_augmentation=img_aug)
# 1: Convolution layer with 32 filters, each 3x3x3
conv_1 = conv_2d(network, 32, 2, activation='relu', name='conv_1')
# 2: Max pooling layer
network = max_pool_2d(conv_1, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 3: Convolution layer with 64 filters
conv_2 = conv_2d(network, 64, 2, activation='relu', name='conv_2')
# 2: Max pooling layer
network = max_pool_2d(conv_2, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 4: Convolution layer with 64 filters
conv_3 = conv_2d(network, 64, 2, activation='relu', name='conv_3')
# 5: Max pooling layer
network = max_pool_2d(conv_3, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 5: Convolution layer with 64 filters
conv_4 = conv_2d(network, 128, 2, activation='relu', name='conv_4')
# 6: Max pooling layer
network = max_pool_2d(conv_4, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 7: Convolution layer with 64 filters
conv_5 = conv_2d(network, 256, 2, activation='relu', name='conv_5')
# 8: Max pooling layer
network = max_pool_2d(conv_5, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 9: Convolution layer with 64 filters
conv_6 = conv_2d(network, 256, 2, activation='relu', name='conv_6')
# 10: Max pooling layer
network = max_pool_2d(conv_6, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 11: Fully-connected 512 node layer
network = fully_connected(network, 1024, activation='relu')
# 13: Fully-connected layer with two outputs
network = fully_connected(network, 10, activation='softmax')
# Configure how the network will be trained
acc = Accuracy(name="Accuracy")
network = regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.0005, metric=acc)
# Wrap the network in a model object
model = tflearn.DNN(network)
model.load('mnist.tflearn')
for i in xrange(0, len(testX)):
im = [testX[i]]
a = model.predict(im)