Contact
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
| Download

📚 The CoCalc Library - books, templates and other resources

Views: 96144
License: OTHER
1
""" Starter code for a simple regression example using eager execution.
2
Created by Akshay Agrawal ([email protected])
3
CS20: "TensorFlow for Deep Learning Research"
4
cs20.stanford.edu
5
Lecture 04
6
"""
7
import time
8
9
import tensorflow as tf
10
import tensorflow.contrib.eager as tfe
11
import matplotlib.pyplot as plt
12
13
import utils
14
15
DATA_FILE = 'data/birth_life_2010.txt'
16
17
# In order to use eager execution, `tfe.enable_eager_execution()` must be
18
# called at the very beginning of a TensorFlow program.
19
#############################
20
########## TO DO ############
21
#############################
22
23
# Read the data into a dataset.
24
data, n_samples = utils.read_birth_life_data(DATA_FILE)
25
dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))
26
27
# Create weight and bias variables, initialized to 0.0.
28
#############################
29
########## TO DO ############
30
#############################
31
w = None
32
b = None
33
34
# Define the linear predictor.
35
def prediction(x):
36
#############################
37
########## TO DO ############
38
#############################
39
pass
40
41
# Define loss functions of the form: L(y, y_predicted)
42
def squared_loss(y, y_predicted):
43
#############################
44
########## TO DO ############
45
#############################
46
pass
47
48
def huber_loss(y, y_predicted):
49
"""Huber loss with `m` set to `1.0`."""
50
#############################
51
########## TO DO ############
52
#############################
53
pass
54
55
def train(loss_fn):
56
"""Train a regression model evaluated using `loss_fn`."""
57
print('Training; loss function: ' + loss_fn.__name__)
58
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
59
60
# Define the function through which to differentiate.
61
#############################
62
########## TO DO ############
63
#############################
64
def loss_for_example(x, y):
65
pass
66
67
# Obtain a gradients function using `tfe.implicit_value_and_gradients`.
68
#############################
69
########## TO DO ############
70
#############################
71
grad_fn = None
72
73
start = time.time()
74
for epoch in range(100):
75
total_loss = 0.0
76
for x_i, y_i in tfe.Iterator(dataset):
77
# Compute the loss and gradient, and take an optimization step.
78
#############################
79
########## TO DO ############
80
#############################
81
optimizer.apply_gradients(gradients)
82
total_loss += loss
83
if epoch % 10 == 0:
84
print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples))
85
print('Took: %f seconds' % (time.time() - start))
86
print('Eager execution exhibits significant overhead per operation. '
87
'As you increase your batch size, the impact of the overhead will '
88
'become less noticeable. Eager execution is under active development: '
89
'expect performance to increase substantially in the near future!')
90
91
train(huber_loss)
92
plt.plot(data[:,0], data[:,1], 'bo')
93
# The `.numpy()` method of a tensor retrieves the NumPy array backing it.
94
# In future versions of eager, you won't need to call `.numpy()` and will
95
# instead be able to, in most cases, pass Tensors wherever NumPy arrays are
96
# expected.
97
plt.plot(data[:,0], data[:,0] * w.numpy() + b.numpy(), 'r',
98
label="huber regression")
99
plt.legend()
100
plt.show()
101
102