TJ CSL
Search…
Tensorflow
Tensorflow is a powerful compute-graph based software allowing high-performance neural networks to be constructed in Python. It's specific strength is in seamless GPU execution.

Which Machines

Only Duke and Zoidberg are publicly accessible. For the rest of the machines, you must either use Slurm (for the Borg Cluster) or get a custom login from the Sysadmin in charge of the machine.

Example Program

1
'''
2
A linear regression learning algorithm example using TensorFlow library.
3
Author: Aymeric Damien
4
Project: https://github.com/aymericdamien/TensorFlow-Examples/
5
'''
6
7
from __future__ import print_function
8
9
import tensorflow as tf
10
import numpy
11
import matplotlib.pyplot as plt
12
rng = numpy.random
13
14
# Parameters
15
learning_rate = 0.01
16
training_epochs = 1000
17
display_step = 50
18
19
# Training Data
20
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
21
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
22
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
23
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
24
n_samples = train_X.shape[0]
25
26
# tf Graph Input
27
X = tf.placeholder("float")
28
Y = tf.placeholder("float")
29
30
# Set model weights
31
W = tf.Variable(rng.randn(), name="weight")
32
b = tf.Variable(rng.randn(), name="bias")
33
34
# Construct a linear model
35
pred = tf.add(tf.multiply(X, W), b)
36
37
# Mean squared error
38
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
39
# Gradient descent
40
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
41
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
42
43
# Initialize the variables (i.e. assign their default value)
44
init = tf.global_variables_initializer()
45
46
# Start training
47
with tf.Session() as sess:
48
49
# Run the initializer
50
sess.run(init)
51
52
# Fit all training data
53
for epoch in range(training_epochs):
54
for (x, y) in zip(train_X, train_Y):
55
sess.run(optimizer, feed_dict={X: x, Y: y})
56
57
# Display logs per epoch step
58
if (epoch+1) % display_step == 0:
59
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
60
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
61
"W=", sess.run(W), "b=", sess.run(b))
62
63
print("Optimization Finished!")
64
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
65
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
66
67
# Graphic display
68
plt.plot(train_X, train_Y, 'ro', label='Original data')
69
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
70
plt.legend()
71
plt.show()
72
73
# Testing example, as requested (Issue #2)
74
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
75
test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
76
77
print("Testing... (Mean square loss Comparison)")
78
testing_cost = sess.run(
79
tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
80
feed_dict={X: test_X, Y: test_Y}) # same function as cost above
81
print("Testing cost=", testing_cost)
82
print("Absolute mean square loss difference:", abs(
83
training_cost - testing_cost))
84
85
plt.plot(test_X, test_Y, 'bo', label='Testing data')
86
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
87
plt.legend()
88
plt.show()
Copied!
Last modified 2yr ago