Module imodelsx.kan.kan_sklearn

Expand source code
import torch
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.utils.multiclass import check_classification_targets
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from imodelsx.kan.kan_modules import KANModule, KANGAMModule
from sklearn.datasets import make_classification, make_regression
from sklearn.metrics import accuracy_score
from typing import List


class KAN(BaseEstimator):
    def __init__(self,
                 hidden_layer_size: int = 64,
                 hidden_layer_sizes: List[int] = None,
                 regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0,
                 device: str = 'cpu',
                 **kwargs):
        '''
        Params
        ------
        hidden_layer_size : int
            If int, number of neurons in the hidden layer (assumes single hidden layer)
        hidden_layer_sizes: List with length (n_layers - 2)
            The ith element represents the number of neurons in the ith hidden layer.
            If this is passed, will override hidden_layer_size
            e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units
            (input and output shape are inferred by the data passed)
        regularize_activation: float
            Activation regularization parameter
        regularize_entropy: float
            Entropy regularization parameter
        regularize_ridge: float
            Ridge regularization parameter (only applies to KANGAM)
        kwargs can be any of these more detailed KAN parameters
            grid_size=5,
            spline_order=3,
            scale_noise=0.1,
            scale_base=1.0,
            scale_spline=1.0,
            base_activation=torch.nn.SiLU,
            grid_eps=0.02,
            grid_range=[-1, 1],
        '''
        if hidden_layer_sizes is not None:
            self.hidden_layer_sizes = hidden_layer_sizes
        else:
            self.hidden_layer_sizes = [hidden_layer_size]
        self.device = device
        self.regularize_activation = regularize_activation
        self.regularize_entropy = regularize_entropy
        self.regularize_ridge = regularize_ridge
        self.kwargs = kwargs

    def fit(self, X, y, batch_size=512, lr=1e-3, weight_decay=1e-4, gamma=0.8):
        if isinstance(self, ClassifierMixin):
            check_classification_targets(y)
            self.classes_, y = np.unique(y, return_inverse=True)
            num_outputs = len(self.classes_)
            y = torch.tensor(y, dtype=torch.long)
        else:
            num_outputs = 1
            y = torch.tensor(y, dtype=torch.float32)
        X = torch.tensor(X, dtype=torch.float32)
        num_features = X.shape[1]

        if isinstance(self, (KANGAMClassifier, KANGAMRegressor)):
            self.model = KANGAMModule(
                num_features=num_features,
                layers_hidden=self.hidden_layer_sizes,
                n_classes=num_outputs,
                **self.kwargs
            ).to(self.device)
        else:
            self.model = KANModule(
                layers_hidden=[num_features] +
                self.hidden_layer_sizes + [num_outputs],
            ).to(self.device)

        X_train, X_tune, y_train, y_tune = train_test_split(
            X, y, test_size=0.2, random_state=42)

        dset_train = torch.utils.data.TensorDataset(X_train, y_train)
        dset_tune = torch.utils.data.TensorDataset(X_tune, y_tune)
        loader_train = DataLoader(
            dset_train, batch_size=batch_size, shuffle=True)
        loader_tune = DataLoader(
            dset_tune, batch_size=batch_size, shuffle=False)

        optimizer = optim.AdamW(self.model.parameters(),
                                lr=lr, weight_decay=weight_decay)
        scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma)

        # Define loss
        if isinstance(self, ClassifierMixin):
            criterion = nn.CrossEntropyLoss()
        else:
            criterion = nn.MSELoss()
        tune_losses = []
        for epoch in tqdm(range(100)):
            self.model.train()
            for x, labs in loader_train:
                x = x.view(-1, num_features).to(self.device)
                optimizer.zero_grad()
                output = self.model(x).squeeze()
                loss = criterion(output, labs.to(self.device).squeeze())
                if isinstance(self, (KANGAMClassifier, KANGAMRegressor)):
                    loss += self.model.regularization_loss(
                        self.regularize_activation, self.regularize_entropy, self.regularize_ridge)
                else:
                    loss += self.model.regularization_loss(
                        self.regularize_activation, self.regularize_entropy)

                loss.backward()
                optimizer.step()

            # Validation
            self.model.eval()
            tune_loss = 0
            with torch.no_grad():
                for x, labs in loader_tune:
                    x = x.view(-1, num_features).to(self.device)
                    output = self.model(x).squeeze()
                    tune_loss += criterion(output,
                                           labs.to(self.device).squeeze()).item()
            tune_loss /= len(loader_tune)
            tune_losses.append(tune_loss)
            scheduler.step()

            # apply early stopping
            if len(tune_losses) > 3 and tune_losses[-1] > tune_losses[-2]:
                print("\tEarly stopping")
                return self

        return self

    @torch.no_grad()
    def predict(self, X):
        X = torch.tensor(X, dtype=torch.float32).to(self.device)
        output = self.model(X)
        if isinstance(self, ClassifierMixin):
            return self.classes_[output.argmax(dim=1).cpu().numpy()]
        else:
            return output.cpu().numpy()


class KANClassifier(KAN, ClassifierMixin):
    @torch.no_grad()
    def predict_proba(self, X):
        X = torch.tensor(X, dtype=torch.float32).to(self.device)
        output = self.model(X)
        return torch.nn.functional.softmax(output, dim=1).cpu().numpy()


class KANRegressor(KAN, RegressorMixin):
    pass


class KANGAMClassifier(KANClassifier):
    pass


class KANGAMRegressor(KANRegressor):
    pass


if __name__ == '__main__':
    # classification
    X, y = make_classification(n_samples=1000, n_features=8, n_informative=2)
    for m in [KANClassifier, KANGAMClassifier]:
        model = m(hidden_layer_size=64, device='cpu',
                  regularize_activation=1.0, regularize_entropy=1.0)
        model.fit(X, y)
        y_pred = model.predict(X)
        print('Test acc', accuracy_score(y, y_pred), m)

    # regression
    X, y = make_regression(n_samples=1000, n_features=8, n_informative=2)
    for m in [KANRegressor, KANGAMRegressor]:
        model = m(hidden_layer_size=64, device='cpu',
                  regularize_activation=1.0, regularize_entropy=1.0)
        model.fit(X, y)
        y_pred = model.predict(X)
        print('Test correlation', np.corrcoef(y, y_pred.flatten())[0, 1], m)

Classes

class KAN (hidden_layer_size: int = 64, hidden_layer_sizes: List[int] = None, regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0, device: str = 'cpu', **kwargs)

Base class for all estimators in scikit-learn.

Inheriting from this class provides default implementations of:

  • setting and getting parameters used by GridSearchCV and friends;
  • textual and HTML representation displayed in terminals and IDEs;
  • estimator serialization;
  • parameters validation;
  • data validation;
  • feature names validation.

Read more in the :ref:User Guide <rolling_your_own_estimator>.

Notes

All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).

Examples

>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
...     def __init__(self, *, param=1):
...         self.param = param
...     def fit(self, X, y=None):
...         self.is_fitted_ = True
...         return self
...     def predict(self, X):
...         return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])

Params

hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],

Expand source code
class KAN(BaseEstimator):
    def __init__(self,
                 hidden_layer_size: int = 64,
                 hidden_layer_sizes: List[int] = None,
                 regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0,
                 device: str = 'cpu',
                 **kwargs):
        '''
        Params
        ------
        hidden_layer_size : int
            If int, number of neurons in the hidden layer (assumes single hidden layer)
        hidden_layer_sizes: List with length (n_layers - 2)
            The ith element represents the number of neurons in the ith hidden layer.
            If this is passed, will override hidden_layer_size
            e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units
            (input and output shape are inferred by the data passed)
        regularize_activation: float
            Activation regularization parameter
        regularize_entropy: float
            Entropy regularization parameter
        regularize_ridge: float
            Ridge regularization parameter (only applies to KANGAM)
        kwargs can be any of these more detailed KAN parameters
            grid_size=5,
            spline_order=3,
            scale_noise=0.1,
            scale_base=1.0,
            scale_spline=1.0,
            base_activation=torch.nn.SiLU,
            grid_eps=0.02,
            grid_range=[-1, 1],
        '''
        if hidden_layer_sizes is not None:
            self.hidden_layer_sizes = hidden_layer_sizes
        else:
            self.hidden_layer_sizes = [hidden_layer_size]
        self.device = device
        self.regularize_activation = regularize_activation
        self.regularize_entropy = regularize_entropy
        self.regularize_ridge = regularize_ridge
        self.kwargs = kwargs

    def fit(self, X, y, batch_size=512, lr=1e-3, weight_decay=1e-4, gamma=0.8):
        if isinstance(self, ClassifierMixin):
            check_classification_targets(y)
            self.classes_, y = np.unique(y, return_inverse=True)
            num_outputs = len(self.classes_)
            y = torch.tensor(y, dtype=torch.long)
        else:
            num_outputs = 1
            y = torch.tensor(y, dtype=torch.float32)
        X = torch.tensor(X, dtype=torch.float32)
        num_features = X.shape[1]

        if isinstance(self, (KANGAMClassifier, KANGAMRegressor)):
            self.model = KANGAMModule(
                num_features=num_features,
                layers_hidden=self.hidden_layer_sizes,
                n_classes=num_outputs,
                **self.kwargs
            ).to(self.device)
        else:
            self.model = KANModule(
                layers_hidden=[num_features] +
                self.hidden_layer_sizes + [num_outputs],
            ).to(self.device)

        X_train, X_tune, y_train, y_tune = train_test_split(
            X, y, test_size=0.2, random_state=42)

        dset_train = torch.utils.data.TensorDataset(X_train, y_train)
        dset_tune = torch.utils.data.TensorDataset(X_tune, y_tune)
        loader_train = DataLoader(
            dset_train, batch_size=batch_size, shuffle=True)
        loader_tune = DataLoader(
            dset_tune, batch_size=batch_size, shuffle=False)

        optimizer = optim.AdamW(self.model.parameters(),
                                lr=lr, weight_decay=weight_decay)
        scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma)

        # Define loss
        if isinstance(self, ClassifierMixin):
            criterion = nn.CrossEntropyLoss()
        else:
            criterion = nn.MSELoss()
        tune_losses = []
        for epoch in tqdm(range(100)):
            self.model.train()
            for x, labs in loader_train:
                x = x.view(-1, num_features).to(self.device)
                optimizer.zero_grad()
                output = self.model(x).squeeze()
                loss = criterion(output, labs.to(self.device).squeeze())
                if isinstance(self, (KANGAMClassifier, KANGAMRegressor)):
                    loss += self.model.regularization_loss(
                        self.regularize_activation, self.regularize_entropy, self.regularize_ridge)
                else:
                    loss += self.model.regularization_loss(
                        self.regularize_activation, self.regularize_entropy)

                loss.backward()
                optimizer.step()

            # Validation
            self.model.eval()
            tune_loss = 0
            with torch.no_grad():
                for x, labs in loader_tune:
                    x = x.view(-1, num_features).to(self.device)
                    output = self.model(x).squeeze()
                    tune_loss += criterion(output,
                                           labs.to(self.device).squeeze()).item()
            tune_loss /= len(loader_tune)
            tune_losses.append(tune_loss)
            scheduler.step()

            # apply early stopping
            if len(tune_losses) > 3 and tune_losses[-1] > tune_losses[-2]:
                print("\tEarly stopping")
                return self

        return self

    @torch.no_grad()
    def predict(self, X):
        X = torch.tensor(X, dtype=torch.float32).to(self.device)
        output = self.model(X)
        if isinstance(self, ClassifierMixin):
            return self.classes_[output.argmax(dim=1).cpu().numpy()]
        else:
            return output.cpu().numpy()

Ancestors

  • sklearn.base.BaseEstimator
  • sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
  • sklearn.utils._metadata_requests._MetadataRequester

Subclasses

Methods

def fit(self, X, y, batch_size=512, lr=0.001, weight_decay=0.0001, gamma=0.8)
Expand source code
def fit(self, X, y, batch_size=512, lr=1e-3, weight_decay=1e-4, gamma=0.8):
    if isinstance(self, ClassifierMixin):
        check_classification_targets(y)
        self.classes_, y = np.unique(y, return_inverse=True)
        num_outputs = len(self.classes_)
        y = torch.tensor(y, dtype=torch.long)
    else:
        num_outputs = 1
        y = torch.tensor(y, dtype=torch.float32)
    X = torch.tensor(X, dtype=torch.float32)
    num_features = X.shape[1]

    if isinstance(self, (KANGAMClassifier, KANGAMRegressor)):
        self.model = KANGAMModule(
            num_features=num_features,
            layers_hidden=self.hidden_layer_sizes,
            n_classes=num_outputs,
            **self.kwargs
        ).to(self.device)
    else:
        self.model = KANModule(
            layers_hidden=[num_features] +
            self.hidden_layer_sizes + [num_outputs],
        ).to(self.device)

    X_train, X_tune, y_train, y_tune = train_test_split(
        X, y, test_size=0.2, random_state=42)

    dset_train = torch.utils.data.TensorDataset(X_train, y_train)
    dset_tune = torch.utils.data.TensorDataset(X_tune, y_tune)
    loader_train = DataLoader(
        dset_train, batch_size=batch_size, shuffle=True)
    loader_tune = DataLoader(
        dset_tune, batch_size=batch_size, shuffle=False)

    optimizer = optim.AdamW(self.model.parameters(),
                            lr=lr, weight_decay=weight_decay)
    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma)

    # Define loss
    if isinstance(self, ClassifierMixin):
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.MSELoss()
    tune_losses = []
    for epoch in tqdm(range(100)):
        self.model.train()
        for x, labs in loader_train:
            x = x.view(-1, num_features).to(self.device)
            optimizer.zero_grad()
            output = self.model(x).squeeze()
            loss = criterion(output, labs.to(self.device).squeeze())
            if isinstance(self, (KANGAMClassifier, KANGAMRegressor)):
                loss += self.model.regularization_loss(
                    self.regularize_activation, self.regularize_entropy, self.regularize_ridge)
            else:
                loss += self.model.regularization_loss(
                    self.regularize_activation, self.regularize_entropy)

            loss.backward()
            optimizer.step()

        # Validation
        self.model.eval()
        tune_loss = 0
        with torch.no_grad():
            for x, labs in loader_tune:
                x = x.view(-1, num_features).to(self.device)
                output = self.model(x).squeeze()
                tune_loss += criterion(output,
                                       labs.to(self.device).squeeze()).item()
        tune_loss /= len(loader_tune)
        tune_losses.append(tune_loss)
        scheduler.step()

        # apply early stopping
        if len(tune_losses) > 3 and tune_losses[-1] > tune_losses[-2]:
            print("\tEarly stopping")
            return self

    return self
def predict(self, X)
Expand source code
@torch.no_grad()
def predict(self, X):
    X = torch.tensor(X, dtype=torch.float32).to(self.device)
    output = self.model(X)
    if isinstance(self, ClassifierMixin):
        return self.classes_[output.argmax(dim=1).cpu().numpy()]
    else:
        return output.cpu().numpy()
def set_fit_request(self: KAN, *, batch_size: Union[bool, ForwardRef(None), str] = '$UNCHANGED$', gamma: Union[bool, ForwardRef(None), str] = '$UNCHANGED$', lr: Union[bool, ForwardRef(None), str] = '$UNCHANGED$', weight_decay: Union[bool, ForwardRef(None), str] = '$UNCHANGED$') ‑> KAN

Request metadata passed to the fit method.

Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works.

The options for each parameter are:

  • True: metadata is requested, and passed to fit if provided. The request is ignored if metadata is not provided.

  • False: metadata is not requested and the meta-estimator will not pass it to fit.

  • None: metadata is not requested, and the meta-estimator will raise an error if the user provides it.

  • str: metadata should be passed to the meta-estimator with this given alias instead of the original name.

The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others.

Added in version: 1.3

Note

This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:~sklearn.pipeline.Pipeline. Otherwise it has no effect.

Parameters

batch_size : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for batch_size parameter in fit.
gamma : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for gamma parameter in fit.
lr : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for lr parameter in fit.
weight_decay : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for weight_decay parameter in fit.

Returns

self : object
The updated object.
Expand source code
def func(**kw):
    """Updates the request for provided parameters

    This docstring is overwritten below.
    See REQUESTER_DOC for expected functionality
    """
    if not _routing_enabled():
        raise RuntimeError(
            "This method is only available when metadata routing is enabled."
            " You can enable it using"
            " sklearn.set_config(enable_metadata_routing=True)."
        )

    if self.validate_keys and (set(kw) - set(self.keys)):
        raise TypeError(
            f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
            f" are: {set(self.keys)}"
        )

    requests = instance._get_metadata_request()
    method_metadata_request = getattr(requests, self.name)

    for prop, alias in kw.items():
        if alias is not UNCHANGED:
            method_metadata_request.add_request(param=prop, alias=alias)
    instance._metadata_request = requests

    return instance
class KANClassifier (hidden_layer_size: int = 64, hidden_layer_sizes: List[int] = None, regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0, device: str = 'cpu', **kwargs)

Base class for all estimators in scikit-learn.

Inheriting from this class provides default implementations of:

  • setting and getting parameters used by GridSearchCV and friends;
  • textual and HTML representation displayed in terminals and IDEs;
  • estimator serialization;
  • parameters validation;
  • data validation;
  • feature names validation.

Read more in the :ref:User Guide <rolling_your_own_estimator>.

Notes

All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).

Examples

>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
...     def __init__(self, *, param=1):
...         self.param = param
...     def fit(self, X, y=None):
...         self.is_fitted_ = True
...         return self
...     def predict(self, X):
...         return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])

Params

hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],

Expand source code
class KANClassifier(KAN, ClassifierMixin):
    @torch.no_grad()
    def predict_proba(self, X):
        X = torch.tensor(X, dtype=torch.float32).to(self.device)
        output = self.model(X)
        return torch.nn.functional.softmax(output, dim=1).cpu().numpy()

Ancestors

  • KAN
  • sklearn.base.BaseEstimator
  • sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
  • sklearn.utils._metadata_requests._MetadataRequester
  • sklearn.base.ClassifierMixin

Subclasses

Methods

def predict_proba(self, X)
Expand source code
@torch.no_grad()
def predict_proba(self, X):
    X = torch.tensor(X, dtype=torch.float32).to(self.device)
    output = self.model(X)
    return torch.nn.functional.softmax(output, dim=1).cpu().numpy()
def set_score_request(self: KANClassifier, *, sample_weight: Union[bool, ForwardRef(None), str] = '$UNCHANGED$') ‑> KANClassifier

Request metadata passed to the score method.

Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works.

The options for each parameter are:

  • True: metadata is requested, and passed to score if provided. The request is ignored if metadata is not provided.

  • False: metadata is not requested and the meta-estimator will not pass it to score.

  • None: metadata is not requested, and the meta-estimator will raise an error if the user provides it.

  • str: metadata should be passed to the meta-estimator with this given alias instead of the original name.

The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others.

Added in version: 1.3

Note

This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:~sklearn.pipeline.Pipeline. Otherwise it has no effect.

Parameters

sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for sample_weight parameter in score.

Returns

self : object
The updated object.
Expand source code
def func(**kw):
    """Updates the request for provided parameters

    This docstring is overwritten below.
    See REQUESTER_DOC for expected functionality
    """
    if not _routing_enabled():
        raise RuntimeError(
            "This method is only available when metadata routing is enabled."
            " You can enable it using"
            " sklearn.set_config(enable_metadata_routing=True)."
        )

    if self.validate_keys and (set(kw) - set(self.keys)):
        raise TypeError(
            f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
            f" are: {set(self.keys)}"
        )

    requests = instance._get_metadata_request()
    method_metadata_request = getattr(requests, self.name)

    for prop, alias in kw.items():
        if alias is not UNCHANGED:
            method_metadata_request.add_request(param=prop, alias=alias)
    instance._metadata_request = requests

    return instance

Inherited members

class KANGAMClassifier (hidden_layer_size: int = 64, hidden_layer_sizes: List[int] = None, regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0, device: str = 'cpu', **kwargs)

Base class for all estimators in scikit-learn.

Inheriting from this class provides default implementations of:

  • setting and getting parameters used by GridSearchCV and friends;
  • textual and HTML representation displayed in terminals and IDEs;
  • estimator serialization;
  • parameters validation;
  • data validation;
  • feature names validation.

Read more in the :ref:User Guide <rolling_your_own_estimator>.

Notes

All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).

Examples

>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
...     def __init__(self, *, param=1):
...         self.param = param
...     def fit(self, X, y=None):
...         self.is_fitted_ = True
...         return self
...     def predict(self, X):
...         return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])

Params

hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],

Expand source code
class KANGAMClassifier(KANClassifier):
    pass

Ancestors

  • KANClassifier
  • KAN
  • sklearn.base.BaseEstimator
  • sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
  • sklearn.utils._metadata_requests._MetadataRequester
  • sklearn.base.ClassifierMixin

Inherited members

class KANGAMRegressor (hidden_layer_size: int = 64, hidden_layer_sizes: List[int] = None, regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0, device: str = 'cpu', **kwargs)

Base class for all estimators in scikit-learn.

Inheriting from this class provides default implementations of:

  • setting and getting parameters used by GridSearchCV and friends;
  • textual and HTML representation displayed in terminals and IDEs;
  • estimator serialization;
  • parameters validation;
  • data validation;
  • feature names validation.

Read more in the :ref:User Guide <rolling_your_own_estimator>.

Notes

All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).

Examples

>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
...     def __init__(self, *, param=1):
...         self.param = param
...     def fit(self, X, y=None):
...         self.is_fitted_ = True
...         return self
...     def predict(self, X):
...         return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])

Params

hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],

Expand source code
class KANGAMRegressor(KANRegressor):
    pass

Ancestors

  • KANRegressor
  • KAN
  • sklearn.base.BaseEstimator
  • sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
  • sklearn.utils._metadata_requests._MetadataRequester
  • sklearn.base.RegressorMixin

Inherited members

class KANRegressor (hidden_layer_size: int = 64, hidden_layer_sizes: List[int] = None, regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0, device: str = 'cpu', **kwargs)

Base class for all estimators in scikit-learn.

Inheriting from this class provides default implementations of:

  • setting and getting parameters used by GridSearchCV and friends;
  • textual and HTML representation displayed in terminals and IDEs;
  • estimator serialization;
  • parameters validation;
  • data validation;
  • feature names validation.

Read more in the :ref:User Guide <rolling_your_own_estimator>.

Notes

All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).

Examples

>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
...     def __init__(self, *, param=1):
...         self.param = param
...     def fit(self, X, y=None):
...         self.is_fitted_ = True
...         return self
...     def predict(self, X):
...         return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])

Params

hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],

Expand source code
class KANRegressor(KAN, RegressorMixin):
    pass

Ancestors

  • KAN
  • sklearn.base.BaseEstimator
  • sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
  • sklearn.utils._metadata_requests._MetadataRequester
  • sklearn.base.RegressorMixin

Subclasses

Methods

def set_score_request(self: KANRegressor, *, sample_weight: Union[bool, ForwardRef(None), str] = '$UNCHANGED$') ‑> KANRegressor

Request metadata passed to the score method.

Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works.

The options for each parameter are:

  • True: metadata is requested, and passed to score if provided. The request is ignored if metadata is not provided.

  • False: metadata is not requested and the meta-estimator will not pass it to score.

  • None: metadata is not requested, and the meta-estimator will raise an error if the user provides it.

  • str: metadata should be passed to the meta-estimator with this given alias instead of the original name.

The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others.

Added in version: 1.3

Note

This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:~sklearn.pipeline.Pipeline. Otherwise it has no effect.

Parameters

sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for sample_weight parameter in score.

Returns

self : object
The updated object.
Expand source code
def func(**kw):
    """Updates the request for provided parameters

    This docstring is overwritten below.
    See REQUESTER_DOC for expected functionality
    """
    if not _routing_enabled():
        raise RuntimeError(
            "This method is only available when metadata routing is enabled."
            " You can enable it using"
            " sklearn.set_config(enable_metadata_routing=True)."
        )

    if self.validate_keys and (set(kw) - set(self.keys)):
        raise TypeError(
            f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
            f" are: {set(self.keys)}"
        )

    requests = instance._get_metadata_request()
    method_metadata_request = getattr(requests, self.name)

    for prop, alias in kw.items():
        if alias is not UNCHANGED:
            method_metadata_request.add_request(param=prop, alias=alias)
    instance._metadata_request = requests

    return instance

Inherited members