|
| 1 | +''' |
| 2 | +Loss Visualization with TensorFlow. |
| 3 | +This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) |
| 4 | +
|
| 5 | +Author: Aymeric Damien |
| 6 | +Project: https://github.com/aymericdamien/TensorFlow-Examples/ |
| 7 | +''' |
| 8 | + |
| 9 | +import tensorflow as tf |
| 10 | +import numpy |
| 11 | + |
| 12 | +# Import MINST data |
| 13 | +import input_data |
| 14 | +mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) |
| 15 | + |
| 16 | +# Use Logistic Regression from our previous example |
| 17 | + |
| 18 | +# Parameters |
| 19 | +learning_rate = 0.01 |
| 20 | +training_epochs = 10 |
| 21 | +batch_size = 100 |
| 22 | +display_step = 1 |
| 23 | + |
| 24 | +# tf Graph Input |
| 25 | +x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784 |
| 26 | +y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes |
| 27 | + |
| 28 | +# Create model |
| 29 | + |
| 30 | +# Set model weights |
| 31 | +W = tf.Variable(tf.zeros([784, 10]), name="weights") |
| 32 | +b = tf.Variable(tf.zeros([10]), name="bias") |
| 33 | + |
| 34 | +# Construct model |
| 35 | +activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax |
| 36 | + |
| 37 | +# Minimize error using cross entropy |
| 38 | +cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy |
| 39 | +optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent |
| 40 | + |
| 41 | +# Initializing the variables |
| 42 | +init = tf.initialize_all_variables() |
| 43 | + |
| 44 | +# Create a summary to monitor cost function |
| 45 | +tf.scalar_summary("loss", cost) |
| 46 | + |
| 47 | +# Merge all summaries to a single operator |
| 48 | +merged_summary_op = tf.merge_all_summaries() |
| 49 | + |
| 50 | +# Launch the graph |
| 51 | +with tf.Session() as sess: |
| 52 | + sess.run(init) |
| 53 | + |
| 54 | + # Set logs writer into folder /tmp/tensorflow_logs |
| 55 | + summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def) |
| 56 | + |
| 57 | + # Training cycle |
| 58 | + for epoch in range(training_epochs): |
| 59 | + avg_cost = 0. |
| 60 | + total_batch = int(mnist.train.num_examples/batch_size) |
| 61 | + # Loop over all batches |
| 62 | + for i in range(total_batch): |
| 63 | + batch_xs, batch_ys = mnist.train.next_batch(batch_size) |
| 64 | + # Fit training using batch data |
| 65 | + sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys}) |
| 66 | + # Compute average loss |
| 67 | + avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch |
| 68 | + # Write logs at every iteration |
| 69 | + summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys}) |
| 70 | + summary_writer.add_summary(summary_str, epoch*total_batch + i) |
| 71 | + # Display logs per epoch step |
| 72 | + if epoch % display_step == 0: |
| 73 | + print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost) |
| 74 | + |
| 75 | + print "Optimization Finished!" |
| 76 | + |
| 77 | + # Test model |
| 78 | + correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1)) |
| 79 | + # Calculate accuracy |
| 80 | + accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) |
| 81 | + print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}) |
| 82 | + |
| 83 | +''' |
| 84 | +Run the command line: tensorboard --logdir=/tmp/tensorflow |
| 85 | +Open http://localhost:6006/ into your web browser |
| 86 | +''' |
0 commit comments