Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

Why is my graph a crazy flickering monster?

I am trying to create a graph showing the correlation between mini batch accuracy and validation accuracy of a neural net. But instead, I have a crazy graph that is flickering at a super high frequency and is zoomed in on a very small portion of the graph.

Here is my code:

num_nodes=1024
batch_size = 128
beta = 0.01


def animate(i):
    graph_data = open('NeuralNetData.txt','r').read()
    lines = graph_data.split('\n')
    xs = []
    ys = []
    for line in lines:
        if len(line) > 1:
            x, y = line.split(',')
            xs.append(x)
            ys.append(y)
    ax1.clear()
    ax1.plot(xs, ys,label='validation accuracy')
    ax1.legend(loc='lower right')
    ax1.set_ylabel("Accuracy(%)", fontsize=15)
    ax1.set_xlabel("Images Seen", fontsize=15)
    ax1.set_title("Neural Network Accuracy Data\nStochastic Gradient Descent", fontsize=10)
    plt.show()

def animate2(i):
   graph_data = open('NeuralNetData2.txt','r').read()
   lines = graph_data.split('\n')
   xs = []
   ys = []
   for line in lines:
      if len(line) > 1:
          x, y = line.split(',')
          xs.append(x)
          ys.append(y)
   ax1.plot(xs, ys, label='mini-batch accuracy')
   ax1.legend(loc='lower right')
   plt.tight_layout()
   plt.show()

style.use('fivethirtyeight')

#Creating Graph
fig = plt.figure(figsize=(50,50))
ax1 = fig.add_subplot(1,1,1)

#1 hidden layer using RELUs and trying regularization techniques

with graph.as_default():

    # Input data. For the training data, we use a placeholder that will be fed
    # at run time with a training minibatch.
    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))
    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
    tf_valid_dataset = tf.constant(valid_dataset)
    tf_test_dataset = tf.constant(test_dataset)

    # Variables.
    weights_1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_nodes]))
    biases_1 = tf.Variable(tf.zeros([num_nodes]))
    weights_2 = tf.Variable(tf.truncated_normal([num_nodes, num_labels]))
    biases_2 = tf.Variable(tf.zeros([num_labels]))

    # Training computation.
    logits_1 = tf.matmul(tf_train_dataset, weights_1) + biases_1
    relu_layer= tf.nn.relu(logits_1)
    logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
    # Normal loss function
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_2, labels=tf_train_labels))
    # Loss function with L2 Regularization with beta=0.01
    regularizers = tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2)
    loss = tf.reduce_mean(loss + beta * regularizers)

    # Optimizer.
    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

    # Predictions for the training
    train_prediction = tf.nn.softmax(logits_2)

    # Predictions for validation 
    logits_1 = tf.matmul(tf_valid_dataset, weights_1) + biases_1
    relu_layer= tf.nn.relu(logits_1)
    logits_2 = tf.matmul(relu_layer, weights_2) + biases_2

    valid_prediction = tf.nn.softmax(logits_2)

    # Predictions for test
    logits_1 = tf.matmul(tf_test_dataset, weights_1) + biases_1
    relu_layer= tf.nn.relu(logits_1)
    logits_2 = tf.matmul(relu_layer, weights_2) + biases_2

    test_prediction =  tf.nn.softmax(logits_2)

num_steps = 3001

open("NeuralNetData.txt","w").close()
open("NeuralNetData.txt","a+")
open("NeuralNetData2.txt","w+").close()
open("NeuralNetData2.txt","a+")

with tf.Session(graph=graph) as session:
    tf.global_variables_initializer().run()
    print("Initialized")
    for step in range(num_steps):
        f= open("NeuralNetData.txt","a")
        t= open("NeuralNetData2.txt","a")
        # Pick an offset within the training data, which has been randomized.
        # Note: we could use better randomization across epochs.
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        images_seen = step* batch_size
        # Generate a minibatch.
        batch_data = train_dataset[offset:(offset + batch_size), :]
        batch_labels = train_labels[offset:(offset + batch_size), :]
        # Prepare a dictionary telling the session where to feed the minibatch.
        # The key of the dictionary is the placeholder node of the graph to be fed,
        # and the value is the numpy array to feed to it.
        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
        if (images_seen % 1000 == 0):
            print("Minibatch loss at step {}: {}".format(step, l))
            print("Minibatch accuracy: {:.1f}".format(accuracy(predictions, batch_labels)))
            print("Validation accuracy: {:.1f}".format(accuracy(valid_prediction.eval(), valid_labels)))
            x=str(images_seen)
            y=str(accuracy(valid_prediction.eval(), valid_labels))
            f.write(x+','+y+'\n')
            f.close()
            r=str(accuracy(predictions, batch_labels))
            t.write(x+','+r+'\n')
            t.close()
            ani = animation.FuncAnimation(fig, animate, interval=1000)
            ani2 = animation.FuncAnimation(fig, animate2, interval=1000)
    print("Test accuracy: {:.1f}".format(accuracy(test_prediction.eval(), test_labels)))
like image 508
Rajeev Datta Avatar asked Dec 05 '25 14:12

Rajeev Datta


1 Answers

First, don't call plt.show() inside an updating function that is called by FuncAnimation. Instead it should probably called exactly once at the end of the script.

Second, it seems you are using two different FuncAnimations which work on the same axes (ax1). One of those is clearing that axes. So what may happen is that the plot is updated by one function while it is cleared by the other - the outcome is probably close to chaos.

Third, you are creating 6002 FuncAnimations instead of only one or two. Each of them will operate on the same axes. So if the above already produced chaos, this will produce 6002 times chaos.

like image 91
ImportanceOfBeingErnest Avatar answered Dec 07 '25 06:12

ImportanceOfBeingErnest



Donate For Us

If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!