|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | + |
| 3 | +""" Auto Encoder Example. |
| 4 | +Using an auto encoder on MNIST handwritten digits. |
| 5 | +References: |
| 6 | + Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based |
| 7 | + learning applied to document recognition." Proceedings of the IEEE, |
| 8 | + 86(11):2278-2324, November 1998. |
| 9 | +Links: |
| 10 | + [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ |
| 11 | +""" |
| 12 | +from __future__ import division, print_function, absolute_import |
| 13 | + |
| 14 | +import tensorflow as tf |
| 15 | +import numpy as np |
| 16 | +import matplotlib.pyplot as plt |
| 17 | + |
| 18 | +# Import MINST data |
| 19 | +import input_data |
| 20 | +mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) |
| 21 | + |
| 22 | +# Parameters |
| 23 | +learning_rate = 0.01 |
| 24 | +training_epochs = 20 |
| 25 | +batch_size = 256 |
| 26 | +display_step = 1 |
| 27 | +examples_to_show = 10 |
| 28 | + |
| 29 | +# Network Parameters |
| 30 | +n_hidden_1 = 256 # 1st layer num features |
| 31 | +n_hidden_2 = 128 # 2nd layer num features |
| 32 | +n_input = 784 # MNIST data input (img shape: 28*28) |
| 33 | + |
| 34 | +# tf Graph input (only pictures) |
| 35 | +X = tf.placeholder("float", [None, n_input]) |
| 36 | + |
| 37 | +weights = { |
| 38 | + 'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), |
| 39 | + 'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), |
| 40 | + 'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), |
| 41 | + 'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])), |
| 42 | +} |
| 43 | +biases = { |
| 44 | + 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), |
| 45 | + 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), |
| 46 | + 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), |
| 47 | + 'decoder_b2': tf.Variable(tf.random_normal([n_input])), |
| 48 | +} |
| 49 | + |
| 50 | +# Building the encoder |
| 51 | +def encoder(x): |
| 52 | + # Encoder Hidden layer with relu activation #1 |
| 53 | + layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1'])) |
| 54 | + # Decoder Hidden layer with relu activation #2 |
| 55 | + layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2'])) |
| 56 | + return layer_2 |
| 57 | + |
| 58 | +# Building the decoder |
| 59 | +def decoder(x): |
| 60 | + # Encoder Hidden layer with relu activation #1 |
| 61 | + layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1'])) |
| 62 | + # Decoder Hidden layer with relu activation #2 |
| 63 | + layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2'])) |
| 64 | + return layer_2 |
| 65 | + |
| 66 | +# Construct model |
| 67 | +encoder_op = encoder(X) |
| 68 | +decoder_op = decoder(encoder_op) |
| 69 | + |
| 70 | +y_pred = decoder_op |
| 71 | +y_true = X |
| 72 | +# Define loss and optimizer, minimize the squared error |
| 73 | +cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) |
| 74 | +optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) |
| 75 | + |
| 76 | +# Initializing the variables |
| 77 | +init = tf.initialize_all_variables() |
| 78 | + |
| 79 | +# Launch the graph |
| 80 | +with tf.Session() as sess: |
| 81 | + sess.run(init) |
| 82 | + total_batch = int(mnist.train.num_examples/batch_size) |
| 83 | + # Training cycle |
| 84 | + for epoch in range(training_epochs): |
| 85 | + # Loop over all batches |
| 86 | + for i in range(total_batch): |
| 87 | + batch_xs, batch_ys = mnist.train.next_batch(batch_size) |
| 88 | + # Fit training using batch data |
| 89 | + _, cost_value = sess.run([optimizer, cost], feed_dict={X: batch_xs}) |
| 90 | + # Display logs per epoch step |
| 91 | + if epoch % display_step == 0: |
| 92 | + print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(cost_value)) |
| 93 | + |
| 94 | + print("Optimization Finished!") |
| 95 | + |
| 96 | + #Applying encode and decode over test set |
| 97 | + encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) |
| 98 | + # Compare original images with their reconstructions |
| 99 | + f, a = plt.subplots(2, 10, figsize=(10, 2)) |
| 100 | + for i in range(examples_to_show): |
| 101 | + a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) |
| 102 | + a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) |
| 103 | + f.show() |
| 104 | + plt.draw() |
| 105 | + plt.waitforbuttonpress() |
| 106 | + |
| 107 | +# # Regression, with mean square error |
| 108 | +# net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001, |
| 109 | +# loss='mean_square', metric=None) |
| 110 | +# |
| 111 | +# # Training the auto encoder |
| 112 | +# model = tflearn.DNN(net, tensorboard_verbose=0) |
| 113 | +# model.fit(X, X, n_epoch=10, validation_set=(testX, testX), |
| 114 | +# run_id="auto_encoder", batch_size=256) |
| 115 | +# |
| 116 | +# # Encoding X[0] for test |
| 117 | +# print("\nTest encoding of X[0]:") |
| 118 | +# # New model, re-using the same session, for weights sharing |
| 119 | +# encoding_model = tflearn.DNN(encoder, session=model.session) |
| 120 | +# print(encoding_model.predict([X[0]])) |
| 121 | +# |
| 122 | +# # Testing the image reconstruction on new data (test set) |
| 123 | +# print("\nVisualizing results after being encoded and decoded:") |
| 124 | +# testX = tflearn.data_utils.shuffle(testX)[0] |
| 125 | +# # Applying encode and decode over test set |
| 126 | +# encode_decode = model.predict(testX) |
| 127 | +# # Compare original images with their reconstructions |
| 128 | +# f, a = plt.subplots(2, 10, figsize=(10, 2)) |
| 129 | +# for i in range(10): |
| 130 | +# a[0][i].imshow(np.reshape(testX[i], (28, 28))) |
| 131 | +# a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) |
| 132 | +# f.show() |
| 133 | +# plt.draw() |
| 134 | +# plt.waitforbuttonpress() |
0 commit comments