Tensorflow疑问: 在利用for循环构造一个循环网络时,只能对本循环内的变量求导?

我想对RNN中每个时间步的loss计算关于RNNkernel和bias的梯度,于是更改了static_rnn的源码。

问题代码是这样的:

    kernel = tf.Variable(initial_kernel,name='meta_rnn_kernel',trainable=False);bias = tf.Variable(initial_bias,name='meta_rnn_bias',trainable=False) 
    for time, input_ in enumerate(inputs):
      if time > 0:
        varscope.reuse_variables()
      # pylint: disable=cell-var-from-loop
      call_cell = lambda: cell.meta_call(input_, state,kernel,bias)
      output = zero_output
      # pylint: enable=cell-var-from-loop
      if sequence_length is not None:
          if time == 0:
                (output, state) = _rnn_step(
                    time=time,
                    sequence_length=sequence_length,
                    min_sequence_length=min_sequence_length,
                    max_sequence_length=max_sequence_length,
                    zero_output=zero_output,
                    state=state,
                    call_cell=call_cell,
                    state_size=cell.state_size)
          else:
                _update_kernel_bias(meta_learner,initial_kernel,initial_bias,kernel,bias,y[:,time],yn[:,time],\
                                    output,embedding_params,x_nonzero[:,time],batch_size,timestep,max_items_basket,max_items_all)
                (output, state) = _rnn_step(
                    time=time,
                    sequence_length=sequence_length,
                    min_sequence_length=min_sequence_length,
                    max_sequence_length=max_sequence_length,
                    zero_output=zero_output,
                    state=state,
                    call_cell=call_cell,
                    state_size=cell.state_size)

其中_update_k_b函数为:

def _update_kernel_bias(meta_learner,initial_kernel,initial_bias,kernel,bias,y,yn,rnn_output,embedding_params,x_nonzero,batch_size,timestep,max_items_basket,max_items_all):
    '''y是上个时刻的输出[batch_size,max_items_basket] rnn_output[batch_size,latent_dimension] x_nonzero[batch_size]
    注意这些输入都要是已经从time上切片了的'''
    # def is_in_for_tensor(element,set):
    #   while i in range(int(set.shape[0])):
    #     isIn = tf.constant(False,tf.bool)
    #     isIn = isIn | tf.equal(element,set[i])
    #   return isIn

    # def generate_negatvie_samples_for_update(y,batch_size,timestep,max_items_basket,max_items_all):
    #     '''训练时随机动态生成负样本,以用来计算BPRLoss。
    #     y [batch_size,max_items_basket] 
    #     返回:ndarray[batch_size,max_items_basket]'''
    #     ynlist=[]
    #     for i in range(batch_size):
    #         temp =[]
    #         for k in range(max_items_basket):
    #             negative_itemid = y[i][0]
    #             while

    #             while negative_itemid in y[i]:
    #                 negative_itemid = random.randrange(0,max_items_all)
    #             temp+=[negative_itemid]
    #         ynlist.append(temp)
    #     return np.array(ynlist)

    loss=tf.constant(0.0,dtype=tf.float32)
    ye = tf.nn.embedding_lookup(embedding_params,y)  #[batch_size,maxitems,ld]
    yne = tf.nn.embedding_lookup(embedding_params,yn)
    for i in range(batch_size):
            pos_scores = tf.reshape(tf.matmul(ye[i],tf.transpose(rnn_output[i:i+1])),(-1,)) #[maxitems]
            neg_scores = tf.reshape(tf.matmul(yne[i],tf.transpose(rnn_output[i:i+1])),(-1,)) #[maxitems]
            scores = pos_scores[:x_nonzero[i]]-neg_scores[:x_nonzero[i]]
            scores = -tf.log(tf.sigmoid(scores))  #[nonzero_nums]
            scores = tf.reduce_sum(scores) #全空的话,0;不全空的话,就正常
            loss+= scores
    KERNEL_SHAPE = kernel.shape
    BIAS_SHAPE = bias.shape
    loss_gradients = tf.gradients(loss,[kernel,bias])
    loss_gradients_k = tf.reshape(loss_gradients[0],shape=(-1,))
    loss_gradients_b = tf.reshape(loss_gradients[1],shape=(-1,))
    loss_gradients = tf.concat([loss_gradients_k,loss_gradients_b],axis=0)
    _kernel = tf.reshape(kernel,shape=(-1,))
    KERNEL_SIZE = _kernel.shape[0]
    print(KERNEL_SIZE)
    _bias = tf.reshape(bias,shape=(-1,))
    _initial_kernel = tf.reshape(initial_kernel,shape=(-1,))
    _initial_bias = tf.reshape(initial_bias,shape=(-1,))
    meta_learner_input = tf.concat([loss_gradients,_kernel,_bias,_initial_bias,_initial_kernel],axis=0)
    meta_learner_input = tf.reshape(meta_learner_input,shape=(1,-1))
    meta_learner_output = meta_learner(meta_learner_input)
    new_kernel = meta_learner_output[:,:KERNEL_SIZE]
    new_kernel = tf.reshape(new_kernel,shape=KERNEL_SHAPE)
    print("newkernel",new_kernel)
    new_bias = meta_learner_output[:,KERNEL_SIZE:]
    new_bias = tf.reshape(new_bias,shape=BIAS_SHAPE)
    kernel.assign(new_kernel)
    bias.assign(new_bias)

然后这个跑出来报错:

Traceback (most recent call last):
  File "metaDREAM.py", line 163, in <module>
    train(lr=0.0001,batch_size=None,ewc_coefficient=1e-6,l2=1e-6,timestep=None,max_iter=10,pooling_type=None,variablescope='MetaTestA1',best_base_scope='TESTA4',bestepoch=1)
  File "metaDREAM.py", line 69, in train
    max_items_all,initial_state=init_state,time_major=True,sequence_length=xs)
  File "/home/likewise-open/SENSETIME/liupengcheng/Workspace/metaDream/test/myrnn_for_meta_testv2.py", line 126, in static_rnn_for_meta
    output,embedding_params,x_nonzero[:,time],batch_size,timestep,max_items_basket,max_items_all)
  File "/home/likewise-open/SENSETIME/liupengcheng/Workspace/metaDream/test/myrnn_for_meta_testv2.py", line 396, in _update_kernel_bias
    loss_gradients_k = tf.reshape(loss_gradients[0],shape=(-1,))
  File "/home/likewise-open/SENSETIME/liupengcheng/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 3938, in reshape
    "Reshape", tensor=tensor, shape=shape, name=name)
  File "/home/likewise-open/SENSETIME/liupengcheng/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 528, in _apply_op_helper
    (input_name, err))
ValueError: Tried to convert 'tensor' to a tensor and failed. Error: None values not supported.

分析可得,问题不在reshape一行,而在reshape上面一行,求loss_gradients得到的是None,即反向传播时找不到loss和kernel的依赖关系。

很奇怪啊,我之前还写了对正常的static_rnn(包括dynamic_rnn)的BPR loss,都能正常求导的。为啥拆开了就不行了?

然后抱着试试看的心态,试出了下面的代码是没问题的:

    for time, input_ in enumerate(inputs):
      if time > 0:
        varscope.reuse_variables()
      # pylint: disable=cell-var-from-loop
      call_cell = lambda: cell.meta_call(input_, state,kernel,bias)
      output = zero_output
      # pylint: enable=cell-var-from-loop
      if sequence_length is not None:
            if time != len(inputs):
                (output, state) = _rnn_step(
                    time=time,
                    sequence_length=sequence_length,
                    min_sequence_length=min_sequence_length,
                    max_sequence_length=max_sequence_length,
                    zero_output=zero_output,
                    state=state,
                    call_cell=call_cell,
                    state_size=cell.state_size)
                _update_kernel_bias(meta_learner,initial_kernel,initial_bias,kernel,bias,y[:,time],yn[:,time],\
                                    output,embedding_params,x_nonzero[:,time],batch_size,timestep,max_items_basket,max_items_all)
            else:
                (output, state) = _rnn_step(
                    time=time,
                    sequence_length=sequence_length,
                    min_sequence_length=min_sequence_length,
                    max_sequence_length=max_sequence_length,
                    zero_output=zero_output,
                    state=state,
                    call_cell=call_cell,
                    state_size=cell.state_size)

也就是说,我在一个循环内求导是可以,而跑到下一个循环体里就不行了。

这没道理啊…我写循环其实相当于代码复用了,比如我有3个时间步,那么这两段代码展开来应该是一样的呀,为什么就不行呢?

写到这里我貌似明白了…因为time没有-1...修改

    for time, input_ in enumerate(inputs):
      if time > 0:
        varscope.reuse_variables()
      # pylint: disable=cell-var-from-loop
      call_cell = lambda: cell.meta_call(input_, state,kernel,bias)
      output = zero_output
      # pylint: enable=cell-var-from-loop
      if sequence_length is not None:
          if time == 0:
                (output, state) = _rnn_step(
                    time=time,
                    sequence_length=sequence_length,
                    min_sequence_length=min_sequence_length,
                    max_sequence_length=max_sequence_length,
                    zero_output=zero_output,
                    state=state,
                    call_cell=call_cell,
                    state_size=cell.state_size)
          else:
                _update_kernel_bias(meta_learner,initial_kernel,initial_bias,kernel,bias,y[:,time-1],yn[:,time-1],\
                                    output,embedding_params,x_nonzero[:,time-1],batch_size,timestep,max_items_basket,max_items_all)
                (output, state) = _rnn_step(
                    time=time,
                    sequence_length=sequence_length,
                    min_sequence_length=min_sequence_length,
                    max_sequence_length=max_sequence_length,
                    zero_output=zero_output,
                    state=state,
                    call_cell=call_cell,
                    state_size=cell.state_size)

依旧不对,感觉是没道理,感觉可能错误出在output上?循环体导致output处反向传播中断了?但是说不出原因。

希望聪明的你解答我的问题鸭。

posted @ 2018-12-25 15:53  大胖子球花  阅读(1600)  评论(0编辑  收藏  举报