Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

How to create a sklearn Pipeline that includes feature selection and KerasClassifier? Issue with input_dim changing during GridSearchCV

I have created a sklearn Pipeline that uses SelectPercentile(f_classif) for feature selection piped into a KerasClassifier. The percentile used for SelectPercentile is a hyperparameter in grid search. This means the input dimensions will vary during gridsearch and I have been unsuccessful setting the input_dim of the KerasClassifier to adapt to this parameter accordingly.

I don't think a way to access the reduced data dimension being piped in the the KerasClassifier within sklearn's GridSearchCV. Maybe there's a way to have a single hyperparmeter that is shared between SelectPercentile and KerasClassifier in Pipeline (so that the percentile hyperpameter can determine input_dim)? I suppose a possible solution could be to build a custom classifier that wraps the two steps in the pipeline into a single step so that the percentile hyperparameter can be shared.

So far the error consistently produces variations of "ValueError: Error when checking input: expected dense_1_input to have shape (112,) but got array with shape (23,)" during model fitting.

def create_baseline(input_dim=10, init='normal', activation_1='relu', activation_2='relu', optimizer='SGD'):
    # Create model
    model = Sequential()
    model.add(Dense(50, input_dim=np.shape(X_train)[1], kernel_initializer=init, activation=activation_1))
    model.add(Dense(25, kernel_initializer=init, activation=activation_2))
    model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
    # Compile model
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
    return model

tuned_parameters = dict(
                            anova__percentile = [20, 40, 60, 80],
                            NN__optimizer = ['SGD', 'Adam'],
                            NN__init = ['glorot_normal', 'glorot_uniform'],
                            NN__activation_1 = ['relu', 'sigmoid'],
                            NN__activation_2 = ['relu', 'sigmoid'],
                            NN__batch_size = [32, 64, 128, 256]
                        )

kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=2)
for train_indices, test_indices in kfold.split(data, labels):
    # Split data
    X_train = [data[idx] for idx in train_indices]
    y_train = [labels[idx] for idx in train_indices]
    X_test = [data[idx] for idx in test_indices]
    y_test = [labels[idx] for idx in test_indices]

    # Pipe feature selection and classifier together
    anova = SelectPercentile(f_classif)
    NN = KerasClassifier(build_fn=create_baseline, epochs=1000, verbose=0)
    clf = Pipeline([('anova', anova), ('NN', NN)])      

    # Train model
    clf = GridSearchCV(clf, tuned_parameters, scoring='balanced_accuracy', n_jobs=-1, cv=kfold)
    clf.fit(X_train, y_train)
    # Test model
    y_true, y_pred = y_test, clf.predict(X_test)
like image 823
rph Avatar asked Oct 14 '25 14:10

rph


2 Answers

The solution I found was to declare a global variable of the transformed X during ANOVASelection and then access that variable when defining input_dim in create_model.

# Custom class to allow shape of transformed x to be known to classifier
class ANOVASelection(BaseEstimator, TransformerMixin):
    def __init__(self, percentile=10):
        self.percentile = percentile
        self.m = None
        self.X_new = None
        self.scores_ = None

    def fit(self, X, y):
        self.m = SelectPercentile(f_classif, self.percentile)
        self.m.fit(X,y)
        self.scores_ = self.m.scores_
        return self

    def transform(self, X):
        global X_new
        self.X_new = self.m.transform(X)
        X_new = self.X_new
        return self.X_new


# Define neural net architecture 
def create_model(init='normal', activation_1='relu', activation_2='relu', optimizer='SGD', decay=0.1):
    clear_session()
    # Determine nodes in hidden layers (Huang et al., 2003)
    m = 1 # number of ouput neurons
    N = np.shape(data)[0] # number of samples
    hn_1 = int(np.sum(np.sqrt((m+2)*N)+2*np.sqrt(N/(m+2))))
    hn_2 = int(m*np.sqrt(N/(m+2)))
    # Create layers
    model = Sequential()

    if optimizer == 'SGD':
        model.add(Dense(hn_1, input_dim=np.shape(X_new)[1], kernel_initializer=init,
                        kernel_regularizer=regularizers.l2(decay/2), activation=activation_1))
        model.add(Dense(hn_2, kernel_initializer=init, kernel_regularizer=regularizers.l2(decay/2),
                        activation=activation_2))
    elif optimizer == 'AdamW':
        model.add(Dense(hn_1, input_dim=np.shape(X_new)[1], kernel_initializer=init,
                        kernel_regularizer=regularizers.l2(decay), activation=activation_1))
        model.add(Dense(hn_2, kernel_initializer=init, kernel_regularizer=regularizers.l2(decay),
                        activation=activation_2))

    model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
    if optimizer == 'SGD':
        model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
    if optimizer == 'AdamW':
        model.compile(loss='binary_crossentropy', optimizer=AdamW(), metrics=["accuracy"])
    return model


tuned_parameters = dict(
                            ANOVA__percentile = [20, 40, 60, 80],
                            NN__optimizer = ['SGD', 'AdamW'],
                            NN__init = ['glorot_normal', 'glorot_uniform'],
                            NN__activation_1 = ['relu', 'sigmoid'],
                            NN__activation_2 = ['relu', 'sigmoid'],
                            NN__batch_size = [32, 64, 128, 256],
                            NN__decay = [10.0**i for i in range(-10,-0) if i%2 == 1]
                        )

kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=2)
for train_indices, test_indices in kfold.split(data, labels):
    # Ensure models from last iteration have been cleared.
    clear_session()

    # Learning Rate
    clr = CyclicLR(mode='triangular', base_lr=0.001, max_lr=0.6, step_size=5) 

    # Split data
    X_train = [data[idx] for idx in train_indices]
    y_train = [labels[idx] for idx in train_indices]
    X_test = [data[idx] for idx in test_indices]
    y_test = [labels[idx] for idx in test_indices]

    # Apply mean and variance center based on training fold
    scaler = StandardScaler().fit(X_train)
    X_train = scaler.transform(X_train)
    X_test = scaler.transform(X_test)

    # Memory handling
    cachedir = tempfile.mkdtemp()
    mem = Memory(location=cachedir, verbose=0)
    f_classif = mem.cache(f_classif)

    # Build and train model
    ANOVA = ANOVASelection(percentile=5)
    NN = KerasClassifier(build_fn=create_model, epochs=1000, verbose=0)
    clf = Pipeline([('ANOVA', ANOVA), ('NN', NN)])
    clf = GridSearchCV(clf, tuned_parameters, scoring='balanced_accuracy', n_jobs=28, cv=kfold)
    clf.fit(X_train, y_train, NN__callbacks=[clr])

    # Test model
    y_true, y_pred = y_test, clf.predict(X_test)
like image 195
rph Avatar answered Oct 18 '25 11:10

rph


One alternative solution, which worked for me, is to inherit from KerasClassifier and set the input_dim with set_params (documentation) in the fit function, before calling super().fit(X, y). This is working with scikit-learn 0.24.0 and keras 2.4.3.

Here is a full example:

First the inheriting class. This is what mainly has to be added to a normal usage:

from keras.wrappers.scikit_learn import KerasClassifier

class InputDimPredictingKerasClassifier(KerasClassifier):
    def fit(self, X, y):
        super().set_params(**{"input_dim": X.shape[1]})
        return super().fit(X, y)

The normal use, with which the model is then build using the class InputDimPredictingKerasClassifier:

import keras
from keras.layers import Dense
from keras.models import Sequential


def build_mlp(
    input_dim: int=23, # just a default value
    output_dim: int=6, 
) -> KerasClassifier:
    model = Sequential()
    model.add(keras.Input(shape=(input_dim,)))
    model.add(Dense(11, activation="relu"))
    model.add(Dense(output_dim, activation="softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="adam")
    return model


def get_mlp(num_of_classes: int) -> InputDimPredictingKerasClassifier:
    model = InputDimPredictingKerasClassifier(
        build_fn=build_mlp,
        output_dim=num_of_classes,
    )
    return model
like image 42
flyingdutchman Avatar answered Oct 18 '25 11:10

flyingdutchman



Donate For Us

If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!