Negative dimension size caused by subtracting 2 from 1 with padding added

  keras, python, tensorflow

This is my model-

class CNNHyperModel(HyperModel):
    def __init__(self, input_shape, num_classes):
        self.input_shape = input_shape
        self.num_classes = num_classes

    def build(self, hp):
        model = keras.Sequential()

  


        model.add(
            Conv1D(
                
                filters=hp.Int(
                    "neurons_1", min_value=20, max_value=400, step=20, default=200,),

                
                kernel_size=hp.Int("kernal_sizes_1", min_value=1, max_value=10, step=1, default=3,),
                strides=hp.Int("stride_sizes_1", min_value=1, max_value=10, step=1, default=3,),
                activation="relu",
                input_shape=(25,1),
                padding="same",
            )
        )

        model.add(
            Dropout(
                rate=hp.Float(
                    "dropout_1", min_value=0.0, max_value=0.5, default=0.25, step=0.05,
                )
            )
        )

        model.add(BatchNormalization())


        model.add(
            Conv1D(
                
                filters=hp.Int(
                    "neurons_2", min_value=20, max_value=400, step=20, default=200,),

                
                kernel_size=hp.Int("kernal_sizes_2", min_value=1, max_value=10, step=1, default=3,),
                strides=hp.Int("stride_sizes_2", min_value=1, max_value=10, step=1, default=3,),
                activation="relu",
                padding="same",
            )
        )

        model.add(
            Dropout(
                rate=hp.Float(
                    "dropout_2", min_value=0.0, max_value=0.5, default=0.25, step=0.05,
                )
            )
        )

        model.add(BatchNormalization())

        model.add(MaxPooling1D(pool_size=2))

        model.add(
            Dropout(
                rate=hp.Float(
                    "dropout_3", min_value=0.0, max_value=0.5, default=0.25, step=0.05,
                )
            )
        )

        model.add(Flatten())

        model.add(
            Dense(
                units=hp.Int(
                    "dense_units_1", min_value=20, max_value=400, step=20, default=100
                ),
                activation=hp.Choice(
                    "dense_activation",
                    values=["relu"],
                    default="relu",
                ),
            )
        )

        model.add(
            Dropout(
                rate=hp.Float(
                    "dropout_4", min_value=0.0, max_value=0.5, default=0.25, step=0.05,
                )
            )
        )

        model.add(
            Dense(
                units=hp.Int(
                    "dense_units_2", min_value=20, max_value=400, step=20, default=100
                ),
                activation=hp.Choice(
                    "dense_activation",
                    values=["relu"],
                    default="relu",
                ),
            )
        )

        model.add(
            Dropout(
                rate=hp.Float(
                    "dropout_4", min_value=0.0, max_value=0.5, default=0.25, step=0.05,
                )
            )
        )

        model.add(
            Dense(
                units=hp.Int(
                    "dense_units_3", min_value=20, max_value=400, step=20, default=100
                ),
                activation=hp.Choice(
                    "dense_activation",
                    values=["relu"],
                    default="relu",
                ),
            )
        )

        model.add(
            Dropout(
                rate=hp.Float(
                    "dropout_4", min_value=0.0, max_value=0.5, default=0.25, step=0.05,
                )
            )
        )

        model.add(Dense(1, activation="sigmoid"))



        model.compile(
            optimizer=keras.optimizers.Adam(
                hp.Float(
                    "learning_rate",
                    min_value=1e-6,
                    max_value=1e-2,
                    sampling="LOG",
                    default=1e-3,
                )
            ),
            loss="binary_crossentropy",
            metrics=[f1_m],
        )
        return model

The error I’m getting-

ValueError: in user code:    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:855 train_function  *        return step_function(self, iterator)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:845 step_function  **        outputs = model.distribute_strategy.run(run_step, args=(data,))    /usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:1285 run        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:2833 call_for_each_replica        return self._call_for_each_replica(fn, args, kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:3608 _call_for_each_replica        return fn(*args, **kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:838 run_step  **        outputs = model.train_step(data)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:795 train_step        y_pred = self(x, training=True)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py:1030 __call__        outputs = call_fn(inputs, *args, **kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/sequential.py:394 call        outputs = layer(inputs, **kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py:1030 __call__        outputs = call_fn(inputs, *args, **kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/functional.py:1447 call        return getattr(self._module, self._method_name)(*args, **kwargs)    /usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py:1006 __call__        outputs = call_fn(inputs, *args, **kwargs)    /usr/local/lib/python3.7/dist-packages/keras/layers/pooling.py:73 call        data_format=self.data_format)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper        return target(*args, **kwargs)    /usr/local/lib/python3.7/dist-packages/keras/backend.py:5706 pool2d        x, pool_size, strides, padding=padding, data_format=tf_data_format)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper        return target(*args, **kwargs)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/nn_ops.py:4660 max_pool        name=name)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_nn_ops.py:5344 max_pool        data_format=data_format, name=name)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/op_def_library.py:750 _apply_op_helper        attrs=attr_protos, op_def=op_def)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py:601 _create_op_internal        compute_device)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:3565 _create_op_internal        op_def=op_def)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:2042 __init__        control_input_ops, op_def)    /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:1883 _create_c_op        raise ValueError(str(e)) 




 ValueError: Negative dimension size caused by subtracting 2 from 1 for '{{node sequential/module_wrapper_4/max_pooling1d/MaxPool}} = MaxPool[T=DT_FLOAT, data_format="NHWC", explicit_paddings=[], ksize=[1, 2, 1, 1], padding="VALID", strides=[1, 2, 1, 1]](sequential/module_wrapper_4/max_pooling1d/ExpandDims)' with input shapes: [?,1,1,360]

This happened in the second epoch of my tuning-

enter image description here

Source: Python Questions

LEAVE A COMMENT