import matplotlib.pyplot as plt import numpy as np import tensorflow as tf # Define input data X_data = np.arange(100, step=1) y_data = X_data + 20 * np.sin(X_data/10) # Plot input data plt.figure(1) plt.scatter(X_data, y_data) # Define data size and batch size n_samples = 100 batch_size = 10 # Tensorflow is finicky about shapes, so resize X_data = np.reshape(X_data, (n_samples, 1)) y_data = np.reshape(y_data, (n_samples, 1)) # Define placeholders for input X = tf.placeholder(tf.float32, shape=(batch_size, 1)) y = tf.placeholder(tf.float32, shape=(batch_size, 1)) # Define linear regression and cost function W = tf.get_variable("weights", (1, 1), initializer=tf.constant_initializer(0.8)) b = tf.get_variable("bias", (1,), initializer=tf.constant_initializer(-5)) y_pred = tf.matmul(X, W) + b loss = tf.reduce_sum((y - y_pred)**2/n_samples) # Sample code to run full gradient descent: # Define optimizer operation opt_operation = tf.train.AdamOptimizer().minimize(loss) with tf.Session() as sess: # Initialize Variables in graph sess.run(tf.initialize_all_variables()) # Gradient descent loop for 1000 steps for _ in range(1000): # Select random minibatch indices = np.random.choice(n_samples, batch_size) X_batch, y_batch = X_data[indices], y_data[indices] # Do gradient descent step _, loss_val = sess.run([opt_operation, loss], feed_dict={X: X_batch, y: y_batch}) # Display results plt.clf() plt.scatter(X_data, y_data) plt.scatter(X_batch, y_batch) plt.plot([0, 100], [b.eval(), np.dot(100, W.eval()) + b.eval()], 'r--') plt.draw() plt.pause(0.0001)