Contact
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
| Download

📚 The CoCalc Library - books, templates and other resources

Views: 96145
License: OTHER
1
""" Using convolutional net on MNIST dataset of handwritten digits
2
MNIST dataset: http://yann.lecun.com/exdb/mnist/
3
CS 20: "TensorFlow for Deep Learning Research"
4
cs20.stanford.edu
5
Chip Huyen ([email protected])
6
Lecture 07
7
"""
8
import os
9
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
10
import time
11
12
import tensorflow as tf
13
14
import utils
15
16
class ConvNet(object):
17
def __init__(self):
18
self.lr = 0.001
19
self.batch_size = 128
20
self.keep_prob = tf.constant(0.75)
21
self.gstep = tf.Variable(0, dtype=tf.int32,
22
trainable=False, name='global_step')
23
self.n_classes = 10
24
self.skip_step = 20
25
self.n_test = 10000
26
self.training=False
27
28
def get_data(self):
29
with tf.name_scope('data'):
30
train_data, test_data = utils.get_mnist_dataset(self.batch_size)
31
iterator = tf.data.Iterator.from_structure(train_data.output_types,
32
train_data.output_shapes)
33
img, self.label = iterator.get_next()
34
self.img = tf.reshape(img, shape=[-1, 28, 28, 1])
35
# reshape the image to make it work with tf.nn.conv2d
36
37
self.train_init = iterator.make_initializer(train_data) # initializer for train_data
38
self.test_init = iterator.make_initializer(test_data) # initializer for train_data
39
40
def inference(self):
41
conv1 = tf.layers.conv2d(inputs=self.img,
42
filters=32,
43
kernel_size=[5, 5],
44
padding='SAME',
45
activation=tf.nn.relu,
46
name='conv1')
47
pool1 = tf.layers.max_pooling2d(inputs=conv1,
48
pool_size=[2, 2],
49
strides=2,
50
name='pool1')
51
52
conv2 = tf.layers.conv2d(inputs=pool1,
53
filters=64,
54
kernel_size=[5, 5],
55
padding='SAME',
56
activation=tf.nn.relu,
57
name='conv2')
58
pool2 = tf.layers.max_pooling2d(inputs=conv2,
59
pool_size=[2, 2],
60
strides=2,
61
name='pool2')
62
63
feature_dim = pool2.shape[1] * pool2.shape[2] * pool2.shape[3]
64
pool2 = tf.reshape(pool2, [-1, feature_dim])
65
fc = tf.layers.dense(pool2, 1024, activation=tf.nn.relu, name='fc')
66
dropout = tf.layers.dropout(fc,
67
self.keep_prob,
68
training=self.training,
69
name='dropout')
70
self.logits = tf.layers.dense(dropout, self.n_classes, name='logits')
71
72
def loss(self):
73
'''
74
define loss function
75
use softmax cross entropy with logits as the loss function
76
compute mean cross entropy, softmax is applied internally
77
'''
78
#
79
with tf.name_scope('loss'):
80
entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.label, logits=self.logits)
81
self.loss = tf.reduce_mean(entropy, name='loss')
82
83
def optimize(self):
84
'''
85
Define training op
86
using Adam Gradient Descent to minimize cost
87
'''
88
self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.loss,
89
global_step=self.gstep)
90
91
def summary(self):
92
'''
93
Create summaries to write on TensorBoard
94
'''
95
with tf.name_scope('summaries'):
96
tf.summary.scalar('loss', self.loss)
97
tf.summary.scalar('accuracy', self.accuracy)
98
tf.summary.histogram('histogram loss', self.loss)
99
self.summary_op = tf.summary.merge_all()
100
101
def eval(self):
102
'''
103
Count the number of right predictions in a batch
104
'''
105
with tf.name_scope('predict'):
106
preds = tf.nn.softmax(self.logits)
107
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(self.label, 1))
108
self.accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
109
110
def build(self):
111
'''
112
Build the computation graph
113
'''
114
self.get_data()
115
self.inference()
116
self.loss()
117
self.optimize()
118
self.eval()
119
self.summary()
120
121
def train_one_epoch(self, sess, saver, init, writer, epoch, step):
122
start_time = time.time()
123
sess.run(init)
124
self.training = True
125
total_loss = 0
126
n_batches = 0
127
try:
128
while True:
129
_, l, summaries = sess.run([self.opt, self.loss, self.summary_op])
130
writer.add_summary(summaries, global_step=step)
131
if (step + 1) % self.skip_step == 0:
132
print('Loss at step {0}: {1}'.format(step, l))
133
step += 1
134
total_loss += l
135
n_batches += 1
136
except tf.errors.OutOfRangeError:
137
pass
138
saver.save(sess, 'checkpoints/convnet_layers/mnist-convnet', step)
139
print('Average loss at epoch {0}: {1}'.format(epoch, total_loss/n_batches))
140
print('Took: {0} seconds'.format(time.time() - start_time))
141
return step
142
143
def eval_once(self, sess, init, writer, epoch, step):
144
start_time = time.time()
145
sess.run(init)
146
self.training = False
147
total_correct_preds = 0
148
try:
149
while True:
150
accuracy_batch, summaries = sess.run([self.accuracy, self.summary_op])
151
writer.add_summary(summaries, global_step=step)
152
total_correct_preds += accuracy_batch
153
except tf.errors.OutOfRangeError:
154
pass
155
156
print('Accuracy at epoch {0}: {1} '.format(epoch, total_correct_preds/self.n_test))
157
print('Took: {0} seconds'.format(time.time() - start_time))
158
159
def train(self, n_epochs):
160
'''
161
The train function alternates between training one epoch and evaluating
162
'''
163
utils.safe_mkdir('checkpoints')
164
utils.safe_mkdir('checkpoints/convnet_layers')
165
writer = tf.summary.FileWriter('./graphs/convnet_layers', tf.get_default_graph())
166
167
with tf.Session() as sess:
168
sess.run(tf.global_variables_initializer())
169
saver = tf.train.Saver()
170
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/convnet_layers/checkpoint'))
171
if ckpt and ckpt.model_checkpoint_path:
172
saver.restore(sess, ckpt.model_checkpoint_path)
173
174
step = self.gstep.eval()
175
176
for epoch in range(n_epochs):
177
step = self.train_one_epoch(sess, saver, self.train_init, writer, epoch, step)
178
self.eval_once(sess, self.test_init, writer, epoch, step)
179
writer.close()
180
181
if __name__ == '__main__':
182
model = ConvNet()
183
model.build()
184
model.train(n_epochs=15)
185