Skip to content

Commit 69f2dc8

Browse files
committed
improve notebook display
1 parent 3f7255a commit 69f2dc8

File tree

9 files changed

+55
-29
lines changed

9 files changed

+55
-29
lines changed

examples/3 - Neural Networks/convolutional_network.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
'''
2-
A Multilayer Perceptron implementation example using TensorFlow library.
2+
A Convolutional Network implementation example using TensorFlow library.
33
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
44
55
Author: Aymeric Damien

notebooks/1 - Introduction/basic_operations.ipynb

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
},
99
"outputs": [],
1010
"source": [
11-
"#Basic Operations example using TensorFlow library.\n",
12-
"#Author: Aymeric Damien\n",
13-
"#Project: https://github.com/aymericdamien/TensorFlow-Examples/"
11+
"# Basic Operations example using TensorFlow library.\n",
12+
"# Author: Aymeric Damien\n",
13+
"# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
1414
]
1515
},
1616
{
@@ -192,8 +192,7 @@
192192
"# The output of the op is returned in 'result' as a numpy `ndarray` object.\n",
193193
"with tf.Session() as sess:\n",
194194
" result = sess.run(product)\n",
195-
" print result\n",
196-
" # ==> [[ 12.]]"
195+
" print result"
197196
]
198197
}
199198
],

notebooks/1 - Introduction/helloworld.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
},
2020
"outputs": [],
2121
"source": [
22-
"#Simple hello world using TensorFlow\n",
22+
"# Simple hello world using TensorFlow\n",
2323
"\n",
2424
"# Create a Constant op\n",
2525
"# The op is added as a node to the default graph.\n",

notebooks/2 - Basic Classifiers/linear_regression.ipynb

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@
123123
},
124124
{
125125
"cell_type": "code",
126-
"execution_count": 10,
126+
"execution_count": 22,
127127
"metadata": {
128128
"collapsed": false
129129
},
@@ -189,11 +189,13 @@
189189
"\n",
190190
" #Display logs per epoch step\n",
191191
" if epoch % display_step == 0:\n",
192-
" print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \\\n",
192+
" print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n",
193+
" \"{:.9f}\".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \\\n",
193194
" \"W=\", sess.run(W), \"b=\", sess.run(b)\n",
194195
"\n",
195196
" print \"Optimization Finished!\"\n",
196-
" print \"cost=\", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), \"W=\", sess.run(W), \"b=\", sess.run(b)\n",
197+
" print \"cost=\", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), \\\n",
198+
" \"W=\", sess.run(W), \"b=\", sess.run(b)\n",
197199
"\n",
198200
" #Graphic display\n",
199201
" plt.plot(train_X, train_Y, 'ro', label='Original data')\n",
@@ -204,7 +206,7 @@
204206
},
205207
{
206208
"cell_type": "code",
207-
"execution_count": 18,
209+
"execution_count": 23,
208210
"metadata": {
209211
"collapsed": false
210212
},
@@ -216,7 +218,7 @@
216218
"<IPython.core.display.Image object>"
217219
]
218220
},
219-
"execution_count": 18,
221+
"execution_count": 23,
220222
"metadata": {},
221223
"output_type": "execute_result"
222224
}

notebooks/2 - Basic Classifiers/logistic_regression.ipynb

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
"outputs": [],
1010
"source": [
1111
"# A logistic regression learning algorithm example using TensorFlow library.\n",
12-
"# This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
12+
"# This example is using the MNIST database of handwritten digits \n",
13+
"# (http://yann.lecun.com/exdb/mnist/)\n",
1314
"\n",
1415
"# Author: Aymeric Damien\n",
1516
"# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
@@ -114,8 +115,10 @@
114115
"outputs": [],
115116
"source": [
116117
"# Minimize error using cross entropy\n",
117-
"cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy\n",
118-
"optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent"
118+
"# Cross entropy\n",
119+
"cost = -tf.reduce_sum(y*tf.log(activation)) \n",
120+
"# Gradient Descent\n",
121+
"optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) "
119122
]
120123
},
121124
{

notebooks/2 - Basic Classifiers/nearest_neighbor.ipynb

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
"outputs": [],
1010
"source": [
1111
"# A nearest neighbor learning algorithm example using TensorFlow library.\n",
12-
"# This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
12+
"# This example is using the MNIST database of handwritten digits \n",
13+
"# (http://yann.lecun.com/exdb/mnist/)\n",
1314
"\n",
1415
"# Author: Aymeric Damien\n",
1516
"# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
@@ -345,13 +346,23 @@
345346
" # Get nearest neighbor\n",
346347
" nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})\n",
347348
" # Get nearest neighbor class label and compare it to its true label\n",
348-
" print \"Test\", i, \"Prediction:\", np.argmax(Ytr[nn_index]), \"True Class:\", np.argmax(Yte[i])\n",
349+
" print \"Test\", i, \"Prediction:\", np.argmax(Ytr[nn_index]), \\\n",
350+
" \"True Class:\", np.argmax(Yte[i])\n",
349351
" # Calculate accuracy\n",
350352
" if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):\n",
351353
" accuracy += 1./len(Xte)\n",
352354
" print \"Done!\"\n",
353355
" print \"Accuracy:\", accuracy"
354356
]
357+
},
358+
{
359+
"cell_type": "code",
360+
"execution_count": null,
361+
"metadata": {
362+
"collapsed": true
363+
},
364+
"outputs": [],
365+
"source": []
355366
}
356367
],
357368
"metadata": {

notebooks/3 - Neural Networks/convolutional_network.ipynb

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,9 @@
88
},
99
"outputs": [],
1010
"source": [
11-
"# A Multilayer Perceptron implementation example using TensorFlow library.\n",
12-
"# This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
11+
"# A Convolutional Networ implementation example using TensorFlow library.\n",
12+
"# This example is using the MNIST database of handwritten digits\n",
13+
"# (http://yann.lecun.com/exdb/mnist/)\n",
1314
"\n",
1415
"# Author: Aymeric Damien\n",
1516
"# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
@@ -103,7 +104,8 @@
103104
"source": [
104105
"# Create model\n",
105106
"def conv2d(img, w, b):\n",
106-
" return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='SAME'),b))\n",
107+
" return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], \n",
108+
" padding='SAME'),b))\n",
107109
"\n",
108110
"def max_pool(img, k):\n",
109111
" return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n",
@@ -149,10 +151,14 @@
149151
"source": [
150152
"# Store layers weight & bias\n",
151153
"weights = {\n",
152-
" 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # 5x5 conv, 1 input, 32 outputs\n",
153-
" 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # 5x5 conv, 32 inputs, 64 outputs\n",
154-
" 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), # fully connected, 7*7*64 inputs, 1024 outputs\n",
155-
" 'out': tf.Variable(tf.random_normal([1024, n_classes])) # 1024 inputs, 10 outputs (class prediction)\n",
154+
" # 5x5 conv, 1 input, 32 outputs\n",
155+
" 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), \n",
156+
" # 5x5 conv, 32 inputs, 64 outputs\n",
157+
" 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), \n",
158+
" # fully connected, 7*7*64 inputs, 1024 outputs\n",
159+
" 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), \n",
160+
" # 1024 inputs, 10 outputs (class prediction)\n",
161+
" 'out': tf.Variable(tf.random_normal([1024, n_classes])) \n",
156162
"}\n",
157163
"\n",
158164
"biases = {\n",

notebooks/3 - Neural Networks/multilayer_perceptron.ipynb

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
"outputs": [],
1010
"source": [
1111
"# A Multilayer Perceptron implementation example using TensorFlow library.\n",
12-
"# This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
12+
"# This example is using the MNIST database of handwritten digits\n",
13+
"# (http://yann.lecun.com/exdb/mnist/)\n",
1314
"\n",
1415
"# Author: Aymeric Damien\n",
1516
"# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
@@ -103,8 +104,10 @@
103104
"source": [
104105
"# Create model\n",
105106
"def multilayer_perceptron(_X, _weights, _biases):\n",
106-
" layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) #Hidden layer with RELU activation\n",
107-
" layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation\n",
107+
" #Hidden layer with RELU activation\n",
108+
" layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) \n",
109+
" #Hidden layer with RELU activation\n",
110+
" layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) \n",
108111
" return tf.matmul(layer_2, weights['out']) + biases['out']"
109112
]
110113
},
@@ -150,8 +153,10 @@
150153
"outputs": [],
151154
"source": [
152155
"# Define loss and optimizer\n",
153-
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss\n",
154-
"optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer"
156+
"# Softmax loss\n",
157+
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) \n",
158+
"# Adam Optimizer\n",
159+
"optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) "
155160
]
156161
},
157162
{

tensorflow_setup.md

Whitespace-only changes.

0 commit comments

Comments
 (0)