2017-07-21 33 views
2

我正在尝试创建一个图表,显示迷你批量精度和神经网络验证准确度之间的相关性。但是相反,我有一个疯狂的图形,以超高频率闪烁,并放大了图表的一小部分。为什么我的图形是一个疯狂的闪烁怪物?

这里是我的代码:

num_nodes=1024 
batch_size = 128 
beta = 0.01 


def animate(i): 
    graph_data = open('NeuralNetData.txt','r').read() 
    lines = graph_data.split('\n') 
    xs = [] 
    ys = [] 
    for line in lines: 
     if len(line) > 1: 
      x, y = line.split(',') 
      xs.append(x) 
      ys.append(y) 
    ax1.clear() 
    ax1.plot(xs, ys,label='validation accuracy') 
    ax1.legend(loc='lower right') 
    ax1.set_ylabel("Accuracy(%)", fontsize=15) 
    ax1.set_xlabel("Images Seen", fontsize=15) 
    ax1.set_title("Neural Network Accuracy Data\nStochastic Gradient Descent", fontsize=10) 
    plt.show() 

def animate2(i): 
    graph_data = open('NeuralNetData2.txt','r').read() 
    lines = graph_data.split('\n') 
    xs = [] 
    ys = [] 
    for line in lines: 
     if len(line) > 1: 
      x, y = line.split(',') 
      xs.append(x) 
      ys.append(y) 
    ax1.plot(xs, ys, label='mini-batch accuracy') 
    ax1.legend(loc='lower right') 
    plt.tight_layout() 
    plt.show() 

style.use('fivethirtyeight') 

#Creating Graph 
fig = plt.figure(figsize=(50,50)) 
ax1 = fig.add_subplot(1,1,1) 

#1 hidden layer using RELUs and trying regularization techniques 

with graph.as_default(): 

    # Input data. For the training data, we use a placeholder that will be fed 
    # at run time with a training minibatch. 
    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) 
    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) 
    tf_valid_dataset = tf.constant(valid_dataset) 
    tf_test_dataset = tf.constant(test_dataset) 

    # Variables. 
    weights_1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_nodes])) 
    biases_1 = tf.Variable(tf.zeros([num_nodes])) 
    weights_2 = tf.Variable(tf.truncated_normal([num_nodes, num_labels])) 
    biases_2 = tf.Variable(tf.zeros([num_labels])) 

    # Training computation. 
    logits_1 = tf.matmul(tf_train_dataset, weights_1) + biases_1 
    relu_layer= tf.nn.relu(logits_1) 
    logits_2 = tf.matmul(relu_layer, weights_2) + biases_2 
    # Normal loss function 
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_2, labels=tf_train_labels)) 
    # Loss function with L2 Regularization with beta=0.01 
    regularizers = tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2) 
    loss = tf.reduce_mean(loss + beta * regularizers) 

    # Optimizer. 
    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) 

    # Predictions for the training 
    train_prediction = tf.nn.softmax(logits_2) 

    # Predictions for validation 
    logits_1 = tf.matmul(tf_valid_dataset, weights_1) + biases_1 
    relu_layer= tf.nn.relu(logits_1) 
    logits_2 = tf.matmul(relu_layer, weights_2) + biases_2 

    valid_prediction = tf.nn.softmax(logits_2) 

    # Predictions for test 
    logits_1 = tf.matmul(tf_test_dataset, weights_1) + biases_1 
    relu_layer= tf.nn.relu(logits_1) 
    logits_2 = tf.matmul(relu_layer, weights_2) + biases_2 

    test_prediction = tf.nn.softmax(logits_2) 

num_steps = 3001 

open("NeuralNetData.txt","w").close() 
open("NeuralNetData.txt","a+") 
open("NeuralNetData2.txt","w+").close() 
open("NeuralNetData2.txt","a+") 

with tf.Session(graph=graph) as session: 
    tf.global_variables_initializer().run() 
    print("Initialized") 
    for step in range(num_steps): 
     f= open("NeuralNetData.txt","a") 
     t= open("NeuralNetData2.txt","a") 
     # Pick an offset within the training data, which has been randomized. 
     # Note: we could use better randomization across epochs. 
     offset = (step * batch_size) % (train_labels.shape[0] - batch_size) 
     images_seen = step* batch_size 
     # Generate a minibatch. 
     batch_data = train_dataset[offset:(offset + batch_size), :] 
     batch_labels = train_labels[offset:(offset + batch_size), :] 
     # Prepare a dictionary telling the session where to feed the minibatch. 
     # The key of the dictionary is the placeholder node of the graph to be fed, 
     # and the value is the numpy array to feed to it. 
     feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} 
     _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) 
     if (images_seen % 1000 == 0): 
      print("Minibatch loss at step {}: {}".format(step, l)) 
      print("Minibatch accuracy: {:.1f}".format(accuracy(predictions, batch_labels))) 
      print("Validation accuracy: {:.1f}".format(accuracy(valid_prediction.eval(), valid_labels))) 
      x=str(images_seen) 
      y=str(accuracy(valid_prediction.eval(), valid_labels)) 
      f.write(x+','+y+'\n') 
      f.close() 
      r=str(accuracy(predictions, batch_labels)) 
      t.write(x+','+r+'\n') 
      t.close() 
      ani = animation.FuncAnimation(fig, animate, interval=1000) 
      ani2 = animation.FuncAnimation(fig, animate2, interval=1000) 
    print("Test accuracy: {:.1f}".format(accuracy(test_prediction.eval(), test_labels))) 
+0

假设'FuncAnimation(fig,animate,interval = 1000)'是制作图形的部分,你知道那个'interval'变量代表什么,因为这可能是问题所在。 –

+0

使用两个图像对象来显示你的图形。后台缓冲区和前台缓冲区。画到背面,显示正面。使用临时图像缓冲区交换正面和背面并重复整个过程。 – Sal

回答

1

首先,不要叫plt.show()FuncAnimation称为更新功能里面。相反,它应该在脚本的末尾恰好调用一次。第二,看起来你正在使用两个不同的FuncAnimations,这两个不同的工作轴在同一个轴上(ax1)。其中之一是清除那些轴。所以可能发生的情况是,一个函数更新一个函数而另一个函数被另一个函数更新 - 结果可能接近混沌。

第三,您正在创建6002个FuncAnimations而不是仅一个或两个。它们中的每一个都将在相同的轴上运行。所以如果上面已经产生混乱,这会产生6002次混乱。

+0

我仍然无法为此问题创建稳定的图表。你会把plt.show()放在哪里?你能告诉我一个修改过的程序吗?感谢大家的帮助! –

+0

不,我不能告诉你一个修改过的程序,因为这个问题的代码不是[mcve],我可以修改和测试。 – ImportanceOfBeingErnest