Contact
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
| Download

📚 The CoCalc Library - books, templates and other resources

Views: 96144
License: OTHER
1
""" Starter code for a simple regression example using eager execution.
2
Created by Akshay Agrawal ([email protected])
3
CS20: "TensorFlow for Deep Learning Research"
4
cs20.stanford.edu
5
Lecture 04
6
"""
7
import time
8
9
import tensorflow as tf
10
import tensorflow.contrib.eager as tfe
11
import matplotlib.pyplot as plt
12
13
import utils
14
15
DATA_FILE = 'data/birth_life_2010.txt'
16
17
# In order to use eager execution, `tfe.enable_eager_execution()` must be
18
# called at the very beginning of a TensorFlow program.
19
tfe.enable_eager_execution()
20
21
# Read the data into a dataset.
22
data, n_samples = utils.read_birth_life_data(DATA_FILE)
23
dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))
24
25
# Create variables.
26
w = tfe.Variable(0.0)
27
b = tfe.Variable(0.0)
28
29
# Define the linear predictor.
30
def prediction(x):
31
return x * w + b
32
33
# Define loss functions of the form: L(y, y_predicted)
34
def squared_loss(y, y_predicted):
35
return (y - y_predicted) ** 2
36
37
def huber_loss(y, y_predicted, m=1.0):
38
"""Huber loss."""
39
t = y - y_predicted
40
# Note that enabling eager execution lets you use Python control flow and
41
# specificy dynamic TensorFlow computations. Contrast this implementation
42
# to the graph-construction one found in `utils`, which uses `tf.cond`.
43
return t ** 2 if tf.abs(t) <= m else m * (2 * tf.abs(t) - m)
44
45
def train(loss_fn):
46
"""Train a regression model evaluated using `loss_fn`."""
47
print('Training; loss function: ' + loss_fn.__name__)
48
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
49
50
# Define the function through which to differentiate.
51
def loss_for_example(x, y):
52
return loss_fn(y, prediction(x))
53
54
# `grad_fn(x_i, y_i)` returns (1) the value of `loss_for_example`
55
# evaluated at `x_i`, `y_i` and (2) the gradients of any variables used in
56
# calculating it.
57
grad_fn = tfe.implicit_value_and_gradients(loss_for_example)
58
59
start = time.time()
60
for epoch in range(100):
61
total_loss = 0.0
62
for x_i, y_i in tfe.Iterator(dataset):
63
loss, gradients = grad_fn(x_i, y_i)
64
# Take an optimization step and update variables.
65
optimizer.apply_gradients(gradients)
66
total_loss += loss
67
if epoch % 10 == 0:
68
print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples))
69
print('Took: %f seconds' % (time.time() - start))
70
print('Eager execution exhibits significant overhead per operation. '
71
'As you increase your batch size, the impact of the overhead will '
72
'become less noticeable. Eager execution is under active development: '
73
'expect performance to increase substantially in the near future!')
74
75
train(huber_loss)
76
plt.plot(data[:,0], data[:,1], 'bo')
77
# The `.numpy()` method of a tensor retrieves the NumPy array backing it.
78
# In future versions of eager, you won't need to call `.numpy()` and will
79
# instead be able to, in most cases, pass Tensors wherever NumPy arrays are
80
# expected.
81
plt.plot(data[:,0], data[:,0] * w.numpy() + b.numpy(), 'r',
82
label="huber regression")
83
plt.legend()
84
plt.show()
85
86