2017-08-31 54 views
0

可能有人请说明以下情况:是否有可能为Tensorflow图在外面跑会议

我使用Tensorflow创建了一个简单的卷积神经网络。我正在使用一个类,并且在构造函数中创建了我的图。然后,我使用我写的一种列车方法来训练网络。我也在使用队列和feed-in机制。这是从代码的摘录:

class Super_res: 
'Create a CNN model which augments the resolution of an image' 
# object initialization (python) - constructor 
def __init__(self, input, output, batch_size, record_size, weights, biases): # input (neurons), output (no. neurons), batch_size (batches to process before registering delta), record_size() 
    print("Initializing object") 
    self.input = input 
    self.output = output 
    self.batch_size = batch_size 
    self.record_size = record_size 
    self.weights = weights 
    self.biases = biases 

    # initialize data batch readers. Parameters: [Path], record_size, batch_size 
    self.data_batch = data_reader3.batch_generator([DATA_PATH_OPTICAL_TRAIN],self.record_size, self.batch_size) # train set 
    self.data_batch_eval = data_reader3.batch_generator([DATA_PATH_EVAL],self.record_size, self.batch_size) # eval set 

    # this returns a [batch_size, 2, n_input] tensor. The second dimension is comprised of the low-res image and the GT high-res image. Each of these images is comprised of n_input entries (flat vector) 
    self.data1 = tf.placeholder_with_default(tf.transpose(self.data_batch, [1, 0, 2]), [2, batch_size, n_input]) # one for optical and another for GT image [batch_size, n_input] each 

    self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability) - this placeholder can accept a Tensor of arbitrary shape 

    # create network model 
    self.pred = self.cnn_model(self.data1[0], self.weights,  self.biases) # self.data1[0] is the low-res data 

def train(self): 
    #self.low_res = self.data1[0] 
    #self.high_res = self.data1[1] 

    # define loss and optimizer 
    #self.cost = tf.reduce_mean(tf.pow(self.data1[1] - self.pred, 2)) 
    #self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost) 

    # Initializing the variables 
    init = tf.global_variables_initializer() 

    # Initialize session 
    with tf.Session() as sess: 
     sess.run(init) 
     coord = tf.train.Coordinator() 
     threads = tf.train.start_queue_runners(coord=coord) 
     step = 1 
     print("Entering training") 

     # Keep training until reach max iterations 
     while step * batch_size < training_iters: 
      #_, c = sess.run([self.optimizer, self.cost]) 
      conv_result = sess.run(self.pred) 
      print(conv_result) 
      #data2 = self.data1[0] 
      #print(data2) 

      if step % display_step == 0: 
       print("Step:", '%04d' % (step+1)) 
      # "cost=", c) 
      step = step + 1 

     coord.request_stop() 
     coord.join(threads) 

当我运行这段代码,我碰到下面的错误输出:

Entering training 
Traceback (most recent call last): 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\client\session.py", line 1139, in _do_call 
return fn(*args) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\client\session.py", line 1121, in _run_fn 
status, run_metadata) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\contextlib.py", line 66, in __exit__ 
next(self.gen) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 466, in raise_exception_on_not_ok_status 
pywrap_tensorflow.TF_GetCode(status)) 
tensorflow.python.framework.errors_impl.OutOfRangeError: RandomShuffleQueue '_2_shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 512, current size 0) 
    [[Node: shuffle_batch = QueueDequeueManyV2[component_types= [DT_FLOAT], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"] (shuffle_batch/random_shuffle_queue, shuffle_batch/n)]] 
    [[Node: shuffle_batch/_25 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5_shuffle_batch", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"]()]] 

During handling of the above exception, another exception occurred: 

Traceback (most recent call last): 
File "super_res_class.py", line 137, in <module> 
p.train() 
File "super_res_class.py", line 106, in train 
conv_result = sess.run(self.pred) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\client\session.py", line 789, in run 
run_metadata_ptr) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\client\session.py", line 997, in _run 
feed_dict_string, options, run_metadata) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\client\session.py", line 1132, in _do_run 
target_list, options, run_metadata) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\client\session.py", line 1152, in _do_call 
raise type(e)(node_def, op, message) 
tensorflow.python.framework.errors_impl.OutOfRangeError: RandomShuffleQueue '_2_shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 512, current size 0) 
    [[Node: shuffle_batch = QueueDequeueManyV2[component_types= [DT_FLOAT], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"] (shuffle_batch/random_shuffle_queue, shuffle_batch/n)]] 
     [[Node: shuffle_batch/_25 = _Recv[client_terminated=false,  recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5_shuffle_batch",  tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"]()]] 

Caused by op 'shuffle_batch', defined at: 
File "super_res_class.py", line 136, in <module> 
p = Super_res(1024,1024,512,record_size, weights, biases) # params (n_input, n_output, batch_size) 
File "super_res_class.py", line 50, in __init__ 
self.data_batch = data_reader3.batch_generator([DATA_PATH_OPTICAL_TRAIN],self.record_size, self.batch_size) # train set 
File "E:\google_drive\Doctorate\matlab code\Tensorflow\doctorate_CNN\dong_recreation\data_reader3.py", line 156, in batch_generator 
capacity=capacity, min_after_dequeue=min_after_dequeue) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\training\input.py", line 1217, in shuffle_batch 
name=name) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\training\input.py", line 788, in _shuffle_batch 
dequeued = queue.dequeue_many(batch_size, name=name) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\ops\data_flow_ops.py", line 457, in dequeue_many 
self._queue_ref, n=n, component_types=self._dtypes, name=name) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages \tensorflow\python\ops\gen_data_flow_ops.py", line 946, in _queue_dequeue_many_v2 
timeout_ms=timeout_ms, name=name) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 767, in apply_op 
op_def=op_def) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\framework\ops.py", line 2506, in create_op 
original_op=self._default_original_op, op_def=op_def) 
File "C:\Users\divin\Miniconda3\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\framework\ops.py", line 1269, in __init__ 
self._traceback = _extract_stack() 

OutOfRangeError (see above for traceback): RandomShuffleQueue '_2_shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 512, current size 0) 
    [[Node: shuffle_batch = QueueDequeueManyV2[component_types=[DT_FLOAT], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](shuffle_batch/random_shuffle_queue, shuffle_batch/n)]] 
    [[Node: shuffle_batch/_25 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5_shuffle_batch", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"]()]] 


(tensorflow_gpu) E:\google_drive\Doctorate\matlab code\Tensorflow\doctorate_CNN\dong_recreation> 

当我从预解码输出取出sess.run()时,代码似乎正常运行。

def train(self): 
    #self.low_res = self.data1[0] 
    #self.high_res = self.data1[1] 

    # define loss and optimizer 
    #self.cost = tf.reduce_mean(tf.pow(self.data1[1] - self.pred, 2)) 
    #self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost) 

    # Initializing the variables 
    init = tf.global_variables_initializer() 

    # Initialize session 
    with tf.Session() as sess: 
     sess.run(init) 
     coord = tf.train.Coordinator() 
     threads = tf.train.start_queue_runners(coord=coord) 
     step = 1 
     print("Entering training") 

     # Keep training until reach max iterations 
     while step * batch_size < training_iters: 
      #_, c = sess.run([self.optimizer, self.cost]) 
      conv_result = self.pred 
      print(conv_result) 
      #data2 = self.data1[0] 
      #print(data2) 

      if step % display_step == 0: 
       print("Step:", '%04d' % (step+1)) 
      # "cost=", c) 
      step = step + 1 

     coord.request_stop() 
     coord.join(threads) 

难道有人请给我解释一下吗?通常情况下,图表仅在会话下运行时才被评估!这里给出了什么?

回答

1

只要说conv_result = self.pred不会做任何事情 - 事实上,您需要做sess.run(self.pred)才能执行它。你得到的错误是关于你的模型的其他内容。正如他们所说,你的InputProducer有一个空队列。由于您提供的信息无法诊断,但我会在网站上进一步搜索,以了解为什么您的InputProducer未填充/具有零大小。