Contact
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
| Download

📚 The CoCalc Library - books, templates and other resources

Views: 96145
License: OTHER
1
""" Starter code for simple linear regression example using placeholders
2
Created by Chip Huyen ([email protected])
3
CS20: "TensorFlow for Deep Learning Research"
4
cs20.stanford.edu
5
Lecture 03
6
"""
7
import os
8
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9
import time
10
11
import numpy as np
12
import matplotlib.pyplot as plt
13
import tensorflow as tf
14
15
import utils
16
17
DATA_FILE = 'data/birth_life_2010.txt'
18
19
# Step 1: read in data from the .txt file
20
data, n_samples = utils.read_birth_life_data(DATA_FILE)
21
22
# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
23
# Remember both X and Y are scalars with type float
24
X, Y = None, None
25
#############################
26
########## TO DO ############
27
#############################
28
29
# Step 3: create weight and bias, initialized to 0.0
30
# Make sure to use tf.get_variable
31
w, b = None, None
32
#############################
33
########## TO DO ############
34
#############################
35
36
# Step 4: build model to predict Y
37
# e.g. how would you derive at Y_predicted given X, w, and b
38
Y_predicted = None
39
#############################
40
########## TO DO ############
41
#############################
42
43
# Step 5: use the square error as the loss function
44
loss = None
45
#############################
46
########## TO DO ############
47
#############################
48
49
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
50
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
51
52
start = time.time()
53
54
# Create a filewriter to write the model's graph to TensorBoard
55
#############################
56
########## TO DO ############
57
#############################
58
59
with tf.Session() as sess:
60
# Step 7: initialize the necessary variables, in this case, w and b
61
#############################
62
########## TO DO ############
63
#############################
64
65
# Step 8: train the model for 100 epochs
66
for i in range(100):
67
total_loss = 0
68
for x, y in data:
69
# Execute train_op and get the value of loss.
70
# Don't forget to feed in data for placeholders
71
_, loss = ########## TO DO ############
72
total_loss += loss
73
74
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
75
76
# close the writer when you're done using it
77
#############################
78
########## TO DO ############
79
#############################
80
writer.close()
81
82
# Step 9: output the values of w and b
83
w_out, b_out = None, None
84
#############################
85
########## TO DO ############
86
#############################
87
88
print('Took: %f seconds' %(time.time() - start))
89
90
# uncomment the following lines to see the plot
91
# plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
92
# plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data')
93
# plt.legend()
94
# plt.show()
95