Python Invalidargumenterror: You must feed a value for placeholder tensor 'y' with dtype float and Shape [?,10]

Asked

Viewed 67 times

1

Hello I’m using a CNN network for street view house Numbers SVHN data.

When I try to run my code below to be able to generate a summary that can be read by Tensorboard through graphs, the following error appears:

Invalidargumenterror: You must feed a value for placeholder tensor 'y' with dtype float and Shape [?,10] [[Node: y = Placeholderdtype=DT_FLOAT, Shape=[? ,10], _device="/job:localhost/replica:0/task:0/device:CPU:0"]]

I’ve tried to find how to feed the placeholder tensor y but can’t understand why I have to feed with another value if my label has already been set with that function.

I couldn’t find an answer anywhere. Can anyone help me? Thank you

My code:

    """Criando o modelo CNN para o conjunto das imagens do SVHN"""

TENSORBOARD_SUMMARIES_DIR = '/ArqAna/svhn_classifier_logs'

"""Carregando os dados"""
#Abrindo o arquivo
anadate = h5py.File('SVHN_dados.h5', 'r')

#Carregando o conjunto de treinamento, teste e validação
X_treino = anadate['X_treino'][:]
y_treino = anadate['y_treino'][:]
X_teste = anadate['X_teste'][:]
y_teste = anadate['y_teste'][:]
X_val = anadate['X_val'][:]
y_val = anadate['y_val'][:]

#Fecha o arquivo
anadate.close()

print('Conjunto de treinamento', X_treino.shape, y_treino.shape)
print('Conjunto de validação', X_val.shape, y_val.shape)
print('Conjunto de teste', X_teste.shape, y_teste.shape)

#Definindo a função de plotar imagens randomicas do conjunto de imagens
def plot_images(images, nrows, ncols, cls_true, cls_pred=None):
    # Inicialize o subplotgrid
    fig, axes = plt.subplots(nrows, ncols)

    # Seleciona randomicamente nrows * ncols imagens
    rs = np.random.choice(images.shape[0], nrows*ncols)

    # Para cada eixo objeto na grid
    for i, ax in zip(rs, axes.flat): 

        # Predições que não passaram
        if cls_pred is None:
            title = "True: {0}".format(np.argmax(cls_true[i]))

        # Quando as predições passaram mostra labels + predictions
        else:
            title = "True: {0}, Pred: {1}".format(np.argmax(cls_true[i]), cls_pred[i])  

        # Mostra a imagem
        ax.imshow(images[i,:,:,0], cmap='binary')

        # Anota a imagem
        ax.set_title(title)

        # Não sobrescreve a grid
        ax.set_xticks([])
        ax.set_yticks([])

def prepare_log_dir():
    '''Limpa os arquivos de log e cria novos diretórios para colocar
         o arquivo de log do tensorbard.''' 
    if tf.gfile.Exists(TENSORBOARD_SUMMARIES_DIR):
        tf.gfile.DeleteRecursively(TENSORBOARD_SUMMARIES_DIR)
    tf.gfile.MakeDirs(TENSORBOARD_SUMMARIES_DIR)

def get_batch(X, y, batch_size=100):
    for i in np.arange(0, y.shape[0], batch_size):
        end = min(X.shape[0], i + batch_size)
        yield(X[i:end],y[i:end])

#Usando placeholder
comp = 32*32
x = tf.placeholder(tf.float32, shape = [None, 32, 32, 1], name='x')
y = tf.placeholder(tf.float32, shape = [None,10], name='y')
y_cls = tf.argmax(y, 1)

discard_rate = tf.placeholder(tf.float32, name='Discard_rate')
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'        

def cnn_model_fn(features):
  """Função modelo para CNN."""
  # Camada de entrada
  # SVHN imagens são 32x32 pixels e 1 canal de cor
  input_layer = tf.reshape(features, [-1, 32, 32, 1])

  # Camada Convolucional #1
  # Utiliza 32 filtros extraindo regiões de 5x5 pixels com função de ativação ReLU
  # Com preenchimento para conservar a width and height (evitar que a saída "encolha").
  # Input Tensor Shape: [batch_size, 32, 32, 1]
  # Output Tensor Shape: [batch_size, 32, 32, 32]
  conv1 = tf.layers.conv2d(
      inputs=input_layer,
      filters=32,
      kernel_size=[5, 5],
      padding="same",
      activation=tf.nn.relu)

  # Camada Pooling #1
  # Primeira camada max pooling com um filtro 2x2 e um passo de 2
  # Input Tensor Shape: [batch_size, 32, 32, 32]
  # Output Tensor Shape: [batch_size, 14, 14, 32]
  pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

  # Camada Convolucional #2
  # Computa 64 features usando um filtro 5x5 
  # Com preenchimento para conservar a width and height (evitar que a saída "encolha").
  # Input Tensor Shape: [batch_size, 14, 14, 32]
  # Output Tensor Shape: [batch_size, 14, 14, 64]
  conv2 = tf.layers.conv2d(
      inputs=pool1,
      filters=64,
      kernel_size=[5, 5],
      padding="same",
      activation=tf.nn.relu)

  # Camada Pooling #2
  # Segunda camada max pooling com um filtro 2x2 e um passo de 2
  # Input Tensor Shape: [batch_size, 14, 14, 64]
  # Output Tensor Shape: [batch_size, 8, 8, 64]
  pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

  # Flatten tensor em um batch de vetores
  # Input Tensor Shape: [batch_size, 8, 8, 64]
  # Output Tensor Shape: [batch_size, 8 * 8 * 64]
  pool2_flat = tf.reshape(pool2, [-1, 8 * 8 * 64])

  # Camada Dense 
  # Densely conectada camada com 1024 neuronios
  # Input Tensor Shape: [batch_size, 8 * 8 * 64]
  # Output Tensor Shape: [batch_size, 1024]
  dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)

  # Adicionar operação de dropout; 0.6 probabilidade que o elemento será mantido
  dropout = tf.layers.dropout(
      inputs=dense, rate=discard_rate)

  # Camada Logits
  # Input Tensor Shape: [batch_size, 1024]
  # Output Tensor Shape: [batch_size, 10]
  logits = tf.layers.dense(inputs=dropout, units=10)

  return logits

max_epochs = 1
num_examples = X_treino.shape[0]

prepare_log_dir()

#Calculando a predição, otimização e a acurácia
with tf.name_scope('Model_Prediction'):
    prediction = cnn_model_fn(x)
    tf.summary.scalar('Model_Prediction', prediction)
prediction_cls = tf.argmax(prediction, 1)

with tf.name_scope('loss'):
    #loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels = y, logits = prediction))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= prediction,labels=y)) 
    tf.summary.scalar('loss', loss)

with tf.name_scope('Adam_optimizer'):
    optimizer = tf.train.AdamOptimizer().minimize(loss)

#Verificando se a classe prevista é igual à verdadeira classe de cada imagem
correct_prediction = tf.equal(prediction_cls, y_cls)

#Checando o elenco prediction to float e calcular a média
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

t_summary = tf.summary.merge_all()

#Abrindo a sessão do Tensorflow e salvando o arquivo
sess = tf.Session()
x = sess.graph.get_tensor_by_name('x:0')
summary_writer = tf.summary.FileWriter('/ArqAna/summary', sess.graph)
sess.run(tf.global_variables_initializer())

saver = tf.train.Saver()
pasta = 'ArqAna/'
if not os.path.exists(pasta):
    os.makedirs(pasta)
diret = os.path.join(pasta, 'ana_svhn')

val_summary = sess.run(t_summary)

summary_writer.add_summary(val_summary)

summary_writer = tf.train.SummaryWriter('/ArqAna/svhn_classifier_logs',sess.graph)

#saver.restore(sess=session, save_path=diret)

#Inicializando
##Sem exemplos em cada batch para atualizar os pesos
batch_size = 100

#Discartando ou fuse % de neuronios em Modo de treinamento
discard_per = 0.7

#with tf.Session() as sess:
 #   sess.run(tf.global_variables_initializer())

#Calculando o treinamento
treino_loss = []
valid_loss = []
for epoch in range(max_epochs):
    print ('Treinando a rede')
    epoch_loss = 0
    print ()
    print ('Epoca ', epoch+1 , ': \n')
    step = 0   

    ## Treinando as epocas
    for (epoch_x , epoch_y) in get_batch(X_treino, y_treino, batch_size):
        _, treino_accu, c = sess.run([optimizer, accuracy, loss], feed_dict={x: epoch_x, y: epoch_y, discard_rate: discard_per})
        treino_loss.append(c)

        if(step%40 == 0):
            print ("Passo:", step, "\n", "\nMini-Batch Loss   : ", c)
            print('Mini-Batch Acuracia :' , treino_accu*100.0, '%')

            ## Validando a prediction e os sumarios
            accu = 0.0
            for (epoch_x , epoch_y) in get_batch(X_val, y_val, 100):                            
                correct, _c = sess.run([correct_prediction, loss], feed_dict={x: epoch_x, y: epoch_y, discard_rate: 0.0})
                valid_loss.append(_c)
                accu+= np.sum(correct[correct == True])
            print('Validação Acuracia :' , accu*100.0/y_val.shape[0], '%')
            print ()
        step = step + 1


    print ('Epoca', epoch+1, 'completado em', max_epochs)

## Testando a prediction e os sumarios
accu = 0.0
for (epoch_x , epoch_y) in get_batch(X_teste, y_teste, 100):
    correct = sess.run([correct_prediction], feed_dict={x: epoch_x, y: epoch_y, discard_rate: 0.0})
    accu+= np.sum(correct[correct == True])
print('Teste Acuracia :' , accu*100.0/y_teste.shape[0], '%')
print ()


#Salvando o arquivo ArqAna
saver.save(sess = sess, save_path = diret)

#Plotando as imagens do treino
plot_images(X_treino, 2, 4, y_treino);

#Avaliar os dados de teste
teste_pred = []
for (epoch_x , epoch_y) in get_batch(X_teste, y_teste, 100):
    correct = sess.run([prediction_cls], feed_dict={x: epoch_x, y: epoch_y, discard_rate: 0.0})
    teste_pred.append((np.asarray(correct, dtype=int)).T)

#Converter a lista numa lista de numpy array
def flatten(lists):
    results = []
    for numbers in lists:
        for x in numbers:
            results.append(x)
    return np.asarray(results)
flat_array = flatten(teste_pred)
flat_array = (flat_array.T)
flat_array = flat_array[0]
flat_array.shape

#Plotando os resultados classificados errados
incorrect = flat_array != np.argmax(y_teste, axis=1)
images = X_teste[incorrect]
cls_true = y_teste[incorrect]
cls_pred = flat_array[incorrect]
plot_images(images, 2, 6, cls_true, cls_pred);

#Plotando os resultados classificados corretos de uma amostra randomica do conjunto de teste
correct = np.invert(incorrect)
images = X_teste[correct]
cls_true = y_teste[correct]
cls_pred = flat_array[correct]
plot_images(images, 2, 6, cls_true, cls_pred);

#Plotando a perda do treinamento e da validação
import matplotlib.pyplot as plt
#%matplotlib inline
plt.plot(treino_loss ,'r')
plt.plot(valid_loss, 'g')
No answers

Browser other questions tagged

You are not signed in. Login or sign up in order to post.