0
I’m trying to implement a 3D facial recognition algorithm using convolution neural networks (CNN). I implemented a Generator image for rgb images and a Generator image for in-depth images (Grayscale). Considering that I have two different inputs, I designed two CNN models one with input_shape=(height,width,3) and the other with input_shape=(height,width,1). Independently I can train the networks with their respective image Generator, however, after concatenating the two branches/ models, and merging both generators, I came across this warning and error:
WARNING:tensorflow:Model was constructed with Shape (None, 400, 400, 1) for input Kerastensor(type_spec=Tensorspec(Shape=(None, 400, 400, 1), dtype=Tf.float32, name='Depth_input_input'), name='Depth_input_input', Description="created by layer 'Depth_input_input'"), but it was called on an input with incompatible Shape (None, None)
"Valueerror: Input 0 of layer Depth_input is incompatible with the layer: expected min_ndim=4, found ndim=2. Full Shape Received: (None, None)"
What can I do to solve the problem? I appreciate any help
Follow my code below:
height=400
width=400
shape=(height,width)
# ########################### RGB ############################
model_rgb = tf.keras.models.Sequential()
model_rgb.add(Conv2D(filters=16, kernel_size=3, activation='relu', name="RGB_Input", input_shape=(height,width, 3)))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Dropout(0.3))
model_rgb.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
#model_rgb.add(Dropout(0.2))
model_rgb.add(Conv2D(filters=128, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
#model_rgb.add(Dropout(0.2))
model_rgb.add(Flatten())
model_rgb.add(Dense(units=512, activation='relu'))
model_rgb.add(Dropout(0.3))
model_rgb.add(Dense(units=128, activation='relu'))
model_rgb.add(Dropout(0.3))
# ########################### DEPTH ###########################
model_depth = tf.keras.models.Sequential()
model_depth.add(Conv2D(filters=16, kernel_size=3, activation='relu', name="Depth_Input", input_shape=(height, width, 1)))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Dropout(0.3))
model_depth.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Flatten())
model_depth.add(Dense(units=512, activation='relu'))
model_depth.add(Dropout(0.3))
model_depth.add(Dense(units=128, activation='relu'))
model_depth.add(Dropout(0.3))
#### Concatenating branches ####
merge = Concatenate()([model_rgb.output, model_depth.output])
merged_out = Dense(units=16, activation='relu')(merge)
merged_out = Dense(units=2, activation='softmax')(merged_out)
merged_model = Model([model_rgb.input, model_depth.input], merged_out)
merged_model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
history_merged = merged_model.fit(gen_flow,
epochs=70,
shuffle=True,
)
Code for image generators:
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.4,
height_shift_range=0.4,
shear_range=0.4,
zoom_range=0.4,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# ########################### RGB ###########################
print("RGB Generators: \n")
train_generator_rgb = train_datagen.flow_from_directory(directory=train_data_rgb, target_size=shape,
class_mode='categorical',
batch_size=16)
val_generator_rgb = val_datagen.flow_from_directory(directory=val_data_rgb,
target_size=shape,
class_mode='categorical',
batch_size=12)
# ########################### --- ###########################
# ########################### DEPTH ###########################
print("\n\nDepth Generators: \n")
train_generator_depth = train_datagen.flow_from_directory(directory=train_data_depth,
target_size=shape,
color_mode="grayscale",
class_mode='categorical',
batch_size=16)
val_generator_depth = val_datagen.flow_from_directory(directory=val_data_depth,
target_size=shape,
color_mode="grayscale",
class_mode='categorical',
batch_size=12)
# ########################### ----- ###########################
def gen_flow_for_two_inputs(X1, X2):
while True:
X1i = train_generator_rgb.next()
X2i = train_generator_depth.next()
yield [X1i[0], X2i[1]], X1i[1]
# Create generator
gen_flow = gen_flow_for_two_inputs(train_data_rgb, train_data_depth)