We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent a6142b6 commit ee1340aCopy full SHA for ee1340a
1 file changed
lab-03-2-minimizing_cost_gradient_update.py
@@ -17,7 +17,7 @@
17
hypothesis = X * W
18
19
# cost/loss function
20
-cost = tf.reduce_sum(tf.square(hypothesis - Y))
+cost = tf.reduce_mean(tf.square(hypothesis - Y))
21
22
# Minimize: Gradient Descent using derivative: W -= learning_rate * derivative
23
learning_rate = 0.1
0 commit comments