Cannot convert a symbolic Keras input/output to a numpy array error when subclassing tf.keras.Model

  keras, python, tensorflow

I’m trying to build an UNet type architecture with EfficientNet backbone from tensorflow.keras.applications, and I’m running into this error when fitting the model, even though I don’t use any numpy computations within the model definition.

TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.

I suspect that the error comes from the way I defined the backbone, but the thing is, the model build works fine, and the number of parameters are correct.

def decoder_block(x, skip_layer, filters, kernel_size, dropout_rate=0.1, **kwargs):
    x = layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size,
                               strides=(2, 2), padding="same", **s_config)(x)
    x = layers.Concatenate()([x, skip_layer])
    x = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=(1, 1),
                      padding="same", **s_config)(x)
    x = layers.BatchNormalization()(x)
    x = layers.AlphaDropout(rate=dropout_rate, seed=global_seed)(x)

    x = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=(1, 1),
                      padding="same", **s_config)(x)
    x = layers.BatchNormalization()(x)
    return x

class UNet(tf.keras.Model):
    def __init__(self,
                 kernel_size=(3, 3),
                 local_pooling_layer=layers.MaxPool2D((2, 2)),
        super().__init__(self, name="UNet", **kwargs)
        if model_size not in ["small", "base"]:
            model_size = "base"
            print("Model size input is not amongst {}. Model size is defaulted to base.".format(["small", "base"]))
        self.model_size = model_size
        self.num_classes = num_classes
        self.img_size = img_size
        self.filters_root = filters_root
        self.kernel_size = kernel_size
        self.backbone = backbone if backbone else get_unet_backbone(
            backbone_name="Efficient_net", pretrained_weights="imagenet")
        self.backbone.trainable = False  # we aren't training the encoder backbone
        self.dropout_rate = dropout_rate  # dropout is applied in each conv_block

    def call(self, input_layer):
        # set backbone encoder to take input
        x = self.backbone(input_layer)

        # encoder layers in order of top down (with input intake at the top)
        encoder_output_layer_names = [ for l in self.backbone.layers if "a_activation" in]
        encoder = [self.backbone.get_layer(name) for name in encoder_output_layer_names]
        if self.model_size == "small":
            encoder = encoder[:3] + encoder[-2:]

        # bridge connection
        bridge = encoder.pop(-1).output

        # decoder layers
        for idx in range(len(encoder) - 1, 0, -1):
            x = decoder_block(bridge,
                              filters=self.filters_root * 2 ** idx,

        # output layer
        outputs = layers.Conv2D(1, kernel_size=(1, 1), padding="same",

        return outputs

Source: Python Questions