|
| 1 | +# Import MINST data |
| 2 | +import input_data |
| 3 | +mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) |
| 4 | + |
| 5 | +import tensorflow as tf |
| 6 | + |
| 7 | +# Parameters |
| 8 | +learning_rate = 0.001 |
| 9 | +training_iters = 200000 |
| 10 | +batch_size = 64 |
| 11 | +display_step = 20 |
| 12 | + |
| 13 | +#Network Parameters |
| 14 | +n_input = 784 #MNIST data input |
| 15 | +n_classes = 10 #MNIST total classes |
| 16 | +dropout = 0.8 |
| 17 | + |
| 18 | +# Create model |
| 19 | +x = tf.placeholder(tf.types.float32, [None, n_input]) |
| 20 | +y = tf.placeholder(tf.types.float32, [None, n_classes]) |
| 21 | +keep_prob = tf.placeholder(tf.types.float32) #dropout |
| 22 | + |
| 23 | +def conv2d(name, l_input, w, b): |
| 24 | + return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name) |
| 25 | + |
| 26 | +def max_pool(name, l_input, k): |
| 27 | + return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name) |
| 28 | + |
| 29 | +def norm(name, l_input, lsize=4): |
| 30 | + return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name) |
| 31 | + |
| 32 | +def conv_net(_X, _weights, _biases, _dropout): |
| 33 | + _X = tf.reshape(_X, shape=[-1, 28, 28, 1]) |
| 34 | + |
| 35 | + conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1']) |
| 36 | + pool1 = max_pool('pool1', conv1, k=2) |
| 37 | + norm1 = norm('norm1', pool1, lsize=4) |
| 38 | + norm1 = tf.nn.dropout(norm1, _dropout) |
| 39 | + |
| 40 | + conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2']) |
| 41 | + pool2 = max_pool('pool2', conv2, k=2) |
| 42 | + norm2 = norm('norm2', pool2, lsize=4) |
| 43 | + norm2 = tf.nn.dropout(norm2, _dropout) |
| 44 | + |
| 45 | + conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3']) |
| 46 | + pool3 = max_pool('pool3', conv3, k=2) |
| 47 | + norm3 = norm('norm3', pool3, lsize=4) |
| 48 | + norm3 = tf.nn.dropout(norm3, _dropout) |
| 49 | + |
| 50 | + dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) |
| 51 | + dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') |
| 52 | + |
| 53 | + dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') |
| 54 | + |
| 55 | + out = tf.matmul(dense2, _weights['out']) + _biases['out'] |
| 56 | + return out |
| 57 | + |
| 58 | +weights = { |
| 59 | + 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])), |
| 60 | + 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])), |
| 61 | + 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])), |
| 62 | + 'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])), |
| 63 | + 'wd2': tf.Variable(tf.random_normal([1024, 1024])), |
| 64 | + 'out': tf.Variable(tf.random_normal([1024, 10])) |
| 65 | +} |
| 66 | + |
| 67 | +biases = { |
| 68 | + 'bc1': tf.Variable(tf.random_normal([64])), |
| 69 | + 'bc2': tf.Variable(tf.random_normal([128])), |
| 70 | + 'bc3': tf.Variable(tf.random_normal([256])), |
| 71 | + 'bd1': tf.Variable(tf.random_normal([1024])), |
| 72 | + 'bd2': tf.Variable(tf.random_normal([1024])), |
| 73 | + 'out': tf.Variable(tf.random_normal([n_classes])) |
| 74 | +} |
| 75 | + |
| 76 | +pred = conv_net(x, weights, biases, keep_prob) |
| 77 | +cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) |
| 78 | +optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) |
| 79 | + |
| 80 | +#Evaluate model |
| 81 | +correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) |
| 82 | +accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.types.float32)) |
| 83 | + |
| 84 | +# Train |
| 85 | +init = tf.initialize_all_variables() |
| 86 | +with tf.Session() as sess: |
| 87 | + sess.run(init) |
| 88 | + step = 1 |
| 89 | + while step * batch_size < training_iters: |
| 90 | + batch_xs, batch_ys = mnist.train.next_batch(batch_size) |
| 91 | + sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout}) |
| 92 | + if step % display_step == 0: |
| 93 | + acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) |
| 94 | + loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) |
| 95 | + print "Iter " + str(step*batch_size) + ", Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc) |
| 96 | + step += 1 |
| 97 | + print "Optimization Finished!" |
| 98 | + #Accuracy on 256 mnist test images |
| 99 | + print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}) |
0 commit comments