Skip to content

Commit 5c17428

Browse files
committed
update description
1 parent 148dc52 commit 5c17428

File tree

5 files changed

+590
-0
lines changed

5 files changed

+590
-0
lines changed

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@ Code examples for some popular machine learning algorithms, using TensorFlow lib
2121
### 4 - Multi GPU
2222
- Basic Operations on multi-GPU ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4%20-%20Multi%20GPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4%20-%20Multi%20GPU/multigpu_basics.py))
2323

24+
### 5 - User Interface (Tensorboard)
25+
- Graph Visualization ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5%20-%20User%20Interface/graph_visualization.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5%20-%20User%20Interface/graph_visualization.py))
26+
- Loss Visualization ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5%20-%20User%20Interface/loss_visualization.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5%20-%20User%20Interface/loss_visualization.py))
27+
2428
## Dependencies
2529
```
2630
tensorflow
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
'''
2+
Graph Visualization with TensorFlow.
3+
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4+
5+
Author: Aymeric Damien
6+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
7+
'''
8+
9+
import tensorflow as tf
10+
import numpy
11+
12+
# Import MINST data
13+
import input_data
14+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
15+
16+
# Use Logistic Regression from our previous example
17+
18+
# Parameters
19+
learning_rate = 0.01
20+
training_epochs = 10
21+
batch_size = 100
22+
display_step = 1
23+
24+
# tf Graph Input
25+
x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
26+
y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
27+
28+
# Create model
29+
30+
# Set model weights
31+
W = tf.Variable(tf.zeros([784, 10]), name="weights")
32+
b = tf.Variable(tf.zeros([10]), name="bias")
33+
34+
# Construct model
35+
activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
36+
37+
# Minimize error using cross entropy
38+
cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
39+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
40+
41+
# Initializing the variables
42+
init = tf.initialize_all_variables()
43+
44+
# Launch the graph
45+
with tf.Session() as sess:
46+
sess.run(init)
47+
48+
# Set logs writer into folder /tmp/tensorflow_logs
49+
summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
50+
51+
# Training cycle
52+
for epoch in range(training_epochs):
53+
avg_cost = 0.
54+
total_batch = int(mnist.train.num_examples/batch_size)
55+
# Loop over all batches
56+
for i in range(total_batch):
57+
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
58+
# Fit training using batch data
59+
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
60+
# Compute average loss
61+
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
62+
# Display logs per epoch step
63+
if epoch % display_step == 0:
64+
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
65+
66+
print "Optimization Finished!"
67+
68+
# Test model
69+
correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
70+
# Calculate accuracy
71+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
72+
print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
73+
74+
'''
75+
Run the command line: tensorboard --logdir=/tmp/tensorflow
76+
Open http://localhost:6006/ into your web browser
77+
'''
78+
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
'''
2+
Loss Visualization with TensorFlow.
3+
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4+
5+
Author: Aymeric Damien
6+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
7+
'''
8+
9+
import tensorflow as tf
10+
import numpy
11+
12+
# Import MINST data
13+
import input_data
14+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
15+
16+
# Use Logistic Regression from our previous example
17+
18+
# Parameters
19+
learning_rate = 0.01
20+
training_epochs = 10
21+
batch_size = 100
22+
display_step = 1
23+
24+
# tf Graph Input
25+
x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
26+
y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
27+
28+
# Create model
29+
30+
# Set model weights
31+
W = tf.Variable(tf.zeros([784, 10]), name="weights")
32+
b = tf.Variable(tf.zeros([10]), name="bias")
33+
34+
# Construct model
35+
activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
36+
37+
# Minimize error using cross entropy
38+
cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
39+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
40+
41+
# Initializing the variables
42+
init = tf.initialize_all_variables()
43+
44+
# Create a summary to monitor cost function
45+
tf.scalar_summary("loss", cost)
46+
47+
# Merge all summaries to a single operator
48+
merged_summary_op = tf.merge_all_summaries()
49+
50+
# Launch the graph
51+
with tf.Session() as sess:
52+
sess.run(init)
53+
54+
# Set logs writer into folder /tmp/tensorflow_logs
55+
summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
56+
57+
# Training cycle
58+
for epoch in range(training_epochs):
59+
avg_cost = 0.
60+
total_batch = int(mnist.train.num_examples/batch_size)
61+
# Loop over all batches
62+
for i in range(total_batch):
63+
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
64+
# Fit training using batch data
65+
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
66+
# Compute average loss
67+
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
68+
# Write logs at every iteration
69+
summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
70+
summary_writer.add_summary(summary_str, epoch*total_batch + i)
71+
# Display logs per epoch step
72+
if epoch % display_step == 0:
73+
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
74+
75+
print "Optimization Finished!"
76+
77+
# Test model
78+
correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
79+
# Calculate accuracy
80+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
81+
print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
82+
83+
'''
84+
Run the command line: tensorboard --logdir=/tmp/tensorflow
85+
Open http://localhost:6006/ into your web browser
86+
'''

notebooks/5 -User Interface/graph_visualization.ipynb

Lines changed: 226 additions & 0 deletions
Large diffs are not rendered by default.

notebooks/5 -User Interface/loss_visualization.ipynb

Lines changed: 196 additions & 0 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)