Module imodelsx.kan.kan_sklearn
Classes
class KAN (hidden_layer_size: int = 64,
hidden_layer_sizes: List[int] = None,
regularize_activation: float = 1.0,
regularize_entropy: float = 1.0,
regularize_ridge: float = 0.0,
test_size=0.2,
random_state=42,
shuffle=True,
device: str = 'cpu',
**kwargs)-
Expand source code
class KAN(BaseEstimator): def __init__(self, hidden_layer_size: int = 64, hidden_layer_sizes: List[int] = None, regularize_activation: float = 1.0, regularize_entropy: float = 1.0, regularize_ridge: float = 0.0, test_size=0.2, random_state=42, shuffle=True, device: str = 'cpu', **kwargs): ''' Params ------ hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1], ''' if hidden_layer_sizes is not None: self.hidden_layer_sizes = hidden_layer_sizes else: self.hidden_layer_sizes = [hidden_layer_size] self.device = device self.regularize_activation = regularize_activation self.regularize_entropy = regularize_entropy self.regularize_ridge = regularize_ridge self.kwargs = kwargs def fit(self, X, y, batch_size=512, lr=1e-3, weight_decay=1e-4, gamma=0.8): if isinstance(self, ClassifierMixin): check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) num_outputs = len(self.classes_) y = torch.tensor(y, dtype=torch.long) else: num_outputs = 1 y = torch.tensor(y, dtype=torch.float32) X = torch.tensor(X, dtype=torch.float32) num_features = X.shape[1] if isinstance(self, (KANGAMClassifier, KANGAMRegressor)): self.model = KANGAMModule( num_features=num_features, layers_hidden=self.hidden_layer_sizes, n_classes=num_outputs, **self.kwargs ).to(self.device) else: self.model = KANModule( layers_hidden=[num_features] + self.hidden_layer_sizes + [num_outputs], ).to(self.device) X_train, X_tune, y_train, y_tune = train_test_split( X, y, test_size=test_size, random_state=random_state, shuffle=shuffle) dset_train = torch.utils.data.TensorDataset(X_train, y_train) dset_tune = torch.utils.data.TensorDataset(X_tune, y_tune) loader_train = DataLoader( dset_train, batch_size=batch_size, shuffle=True) loader_tune = DataLoader( dset_tune, batch_size=batch_size, shuffle=False) optimizer = optim.AdamW(self.model.parameters(), lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma) # Define loss if isinstance(self, ClassifierMixin): criterion = nn.CrossEntropyLoss() else: criterion = nn.MSELoss() tune_losses = [] for epoch in tqdm(range(100)): self.model.train() for x, labs in loader_train: x = x.view(-1, num_features).to(self.device) optimizer.zero_grad() output = self.model(x).squeeze() loss = criterion(output, labs.to(self.device).squeeze()) if isinstance(self, (KANGAMClassifier, KANGAMRegressor)): loss += self.model.regularization_loss( self.regularize_activation, self.regularize_entropy, self.regularize_ridge) else: loss += self.model.regularization_loss( self.regularize_activation, self.regularize_entropy) loss.backward() optimizer.step() # Validation self.model.eval() tune_loss = 0 with torch.no_grad(): for x, labs in loader_tune: x = x.view(-1, num_features).to(self.device) output = self.model(x).squeeze() tune_loss += criterion(output, labs.to(self.device).squeeze()).item() tune_loss /= len(loader_tune) tune_losses.append(tune_loss) scheduler.step() # apply early stopping if len(tune_losses) > 3 and tune_losses[-1] > tune_losses[-2]: print("\tEarly stopping") return self return self @torch.no_grad() def predict(self, X): X = torch.tensor(X, dtype=torch.float32).to(self.device) output = self.model(X) if isinstance(self, ClassifierMixin): return self.classes_[output.argmax(dim=1).cpu().numpy()] else: return output.cpu().numpy()
Base class for all estimators in scikit-learn.
Inheriting from this class provides default implementations of:
- setting and getting parameters used by
GridSearchCV
and friends; - textual and HTML representation displayed in terminals and IDEs;
- estimator serialization;
- parameters validation;
- data validation;
- feature names validation.
Read more in the :ref:
User Guide <rolling_your_own_estimator>
.Notes
All estimators should specify all the parameters that can be set at the class level in their
__init__
as explicit keyword arguments (no*args
or**kwargs
).Examples
>>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3])
Params
hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],
Ancestors
- sklearn.base.BaseEstimator
- sklearn.utils._repr_html.base.ReprHTMLMixin
- sklearn.utils._repr_html.base._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
Subclasses
Methods
def fit(self, X, y, batch_size=512, lr=0.001, weight_decay=0.0001, gamma=0.8)
-
Expand source code
def fit(self, X, y, batch_size=512, lr=1e-3, weight_decay=1e-4, gamma=0.8): if isinstance(self, ClassifierMixin): check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) num_outputs = len(self.classes_) y = torch.tensor(y, dtype=torch.long) else: num_outputs = 1 y = torch.tensor(y, dtype=torch.float32) X = torch.tensor(X, dtype=torch.float32) num_features = X.shape[1] if isinstance(self, (KANGAMClassifier, KANGAMRegressor)): self.model = KANGAMModule( num_features=num_features, layers_hidden=self.hidden_layer_sizes, n_classes=num_outputs, **self.kwargs ).to(self.device) else: self.model = KANModule( layers_hidden=[num_features] + self.hidden_layer_sizes + [num_outputs], ).to(self.device) X_train, X_tune, y_train, y_tune = train_test_split( X, y, test_size=test_size, random_state=random_state, shuffle=shuffle) dset_train = torch.utils.data.TensorDataset(X_train, y_train) dset_tune = torch.utils.data.TensorDataset(X_tune, y_tune) loader_train = DataLoader( dset_train, batch_size=batch_size, shuffle=True) loader_tune = DataLoader( dset_tune, batch_size=batch_size, shuffle=False) optimizer = optim.AdamW(self.model.parameters(), lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma) # Define loss if isinstance(self, ClassifierMixin): criterion = nn.CrossEntropyLoss() else: criterion = nn.MSELoss() tune_losses = [] for epoch in tqdm(range(100)): self.model.train() for x, labs in loader_train: x = x.view(-1, num_features).to(self.device) optimizer.zero_grad() output = self.model(x).squeeze() loss = criterion(output, labs.to(self.device).squeeze()) if isinstance(self, (KANGAMClassifier, KANGAMRegressor)): loss += self.model.regularization_loss( self.regularize_activation, self.regularize_entropy, self.regularize_ridge) else: loss += self.model.regularization_loss( self.regularize_activation, self.regularize_entropy) loss.backward() optimizer.step() # Validation self.model.eval() tune_loss = 0 with torch.no_grad(): for x, labs in loader_tune: x = x.view(-1, num_features).to(self.device) output = self.model(x).squeeze() tune_loss += criterion(output, labs.to(self.device).squeeze()).item() tune_loss /= len(loader_tune) tune_losses.append(tune_loss) scheduler.step() # apply early stopping if len(tune_losses) > 3 and tune_losses[-1] > tune_losses[-2]: print("\tEarly stopping") return self return self
def predict(self, X)
-
Expand source code
@torch.no_grad() def predict(self, X): X = torch.tensor(X, dtype=torch.float32).to(self.device) output = self.model(X) if isinstance(self, ClassifierMixin): return self.classes_[output.argmax(dim=1).cpu().numpy()] else: return output.cpu().numpy()
def set_fit_request(self: KAN,
*,
batch_size: bool | str | None = '$UNCHANGED$',
gamma: bool | str | None = '$UNCHANGED$',
lr: bool | str | None = '$UNCHANGED$',
weight_decay: bool | str | None = '$UNCHANGED$') ‑> KAN-
Expand source code
def func(*args, **kw): """Updates the `_metadata_request` attribute of the consumer (`instance`) for the parameters provided as `**kw`. This docstring is overwritten below. See REQUESTER_DOC for expected functionality. """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)} in {self.name}. " f"Accepted arguments are: {set(self.keys)}" ) # This makes it possible to use the decorated method as an unbound method, # for instance when monkeypatching. # https://github.com/scikit-learn/scikit-learn/issues/28632 if instance is None: _instance = args[0] args = args[1:] else: _instance = instance # Replicating python's behavior when positional args are given other than # `self`, and `self` is only allowed if this method is unbound. if args: raise TypeError( f"set_{self.name}_request() takes 0 positional argument but" f" {len(args)} were given" ) requests = _instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) _instance._metadata_request = requests return _instance
Configure whether metadata should be requested to be passed to the
fit
method.Note that this method is only relevant when this estimator is used as a sub-estimator within a :term:`meta-estimator` and metadata routing is enabled with ``enable_metadata_routing=True`` (see :func:<code>sklearn.set\_config</code>). Please check the :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - <code>True</code>: metadata is requested, and passed to <code>fit</code> if provided. The request is ignored if metadata is not provided. - <code>False</code>: metadata is not requested and the meta-estimator will not pass it to <code>fit</code>. - <code>None</code>: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - <code>str</code>: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (<code>sklearn.utils.metadata\_routing.UNCHANGED</code>) retains the existing request. This allows you to change the request for some parameters and not others. !!! versionadded "Added in version: 1.3" Parameters ---------- batch_size : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for <code>batch\_size</code> parameter in <code>fit</code>. gamma : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for <code>gamma</code> parameter in <code>fit</code>. lr : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for <code>lr</code> parameter in <code>fit</code>. weight_decay : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for <code>weight\_decay</code> parameter in <code>fit</code>. Returns ------- self : object The updated object.
- setting and getting parameters used by
class KANClassifier (hidden_layer_size: int = 64,
hidden_layer_sizes: List[int] = None,
regularize_activation: float = 1.0,
regularize_entropy: float = 1.0,
regularize_ridge: float = 0.0,
test_size=0.2,
random_state=42,
shuffle=True,
device: str = 'cpu',
**kwargs)-
Expand source code
class KANClassifier(KAN, ClassifierMixin): @torch.no_grad() def predict_proba(self, X): X = torch.tensor(X, dtype=torch.float32).to(self.device) output = self.model(X) return torch.nn.functional.softmax(output, dim=1).cpu().numpy()
Base class for all estimators in scikit-learn.
Inheriting from this class provides default implementations of:
- setting and getting parameters used by
GridSearchCV
and friends; - textual and HTML representation displayed in terminals and IDEs;
- estimator serialization;
- parameters validation;
- data validation;
- feature names validation.
Read more in the :ref:
User Guide <rolling_your_own_estimator>
.Notes
All estimators should specify all the parameters that can be set at the class level in their
__init__
as explicit keyword arguments (no*args
or**kwargs
).Examples
>>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3])
Params
hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],
Ancestors
- KAN
- sklearn.base.BaseEstimator
- sklearn.utils._repr_html.base.ReprHTMLMixin
- sklearn.utils._repr_html.base._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.ClassifierMixin
Subclasses
Methods
def predict_proba(self, X)
-
Expand source code
@torch.no_grad() def predict_proba(self, X): X = torch.tensor(X, dtype=torch.float32).to(self.device) output = self.model(X) return torch.nn.functional.softmax(output, dim=1).cpu().numpy()
def set_score_request(self: KANClassifier,
*,
sample_weight: bool | str | None = '$UNCHANGED$') ‑> KANClassifier-
Expand source code
def func(*args, **kw): """Updates the `_metadata_request` attribute of the consumer (`instance`) for the parameters provided as `**kw`. This docstring is overwritten below. See REQUESTER_DOC for expected functionality. """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)} in {self.name}. " f"Accepted arguments are: {set(self.keys)}" ) # This makes it possible to use the decorated method as an unbound method, # for instance when monkeypatching. # https://github.com/scikit-learn/scikit-learn/issues/28632 if instance is None: _instance = args[0] args = args[1:] else: _instance = instance # Replicating python's behavior when positional args are given other than # `self`, and `self` is only allowed if this method is unbound. if args: raise TypeError( f"set_{self.name}_request() takes 0 positional argument but" f" {len(args)} were given" ) requests = _instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) _instance._metadata_request = requests return _instance
Configure whether metadata should be requested to be passed to the
score
method.Note that this method is only relevant when this estimator is used as a sub-estimator within a :term:`meta-estimator` and metadata routing is enabled with ``enable_metadata_routing=True`` (see :func:<code>sklearn.set\_config</code>). Please check the :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - <code>True</code>: metadata is requested, and passed to <code>score</code> if provided. The request is ignored if metadata is not provided. - <code>False</code>: metadata is not requested and the meta-estimator will not pass it to <code>score</code>. - <code>None</code>: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - <code>str</code>: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (<code>sklearn.utils.metadata\_routing.UNCHANGED</code>) retains the existing request. This allows you to change the request for some parameters and not others. !!! versionadded "Added in version: 1.3" Parameters ---------- sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for <code>sample\_weight</code> parameter in <code>score</code>. Returns ------- self : object The updated object.
Inherited members
- setting and getting parameters used by
class KANGAMClassifier (hidden_layer_size: int = 64,
hidden_layer_sizes: List[int] = None,
regularize_activation: float = 1.0,
regularize_entropy: float = 1.0,
regularize_ridge: float = 0.0,
test_size=0.2,
random_state=42,
shuffle=True,
device: str = 'cpu',
**kwargs)-
Expand source code
class KANGAMClassifier(KANClassifier): pass
Base class for all estimators in scikit-learn.
Inheriting from this class provides default implementations of:
- setting and getting parameters used by
GridSearchCV
and friends; - textual and HTML representation displayed in terminals and IDEs;
- estimator serialization;
- parameters validation;
- data validation;
- feature names validation.
Read more in the :ref:
User Guide <rolling_your_own_estimator>
.Notes
All estimators should specify all the parameters that can be set at the class level in their
__init__
as explicit keyword arguments (no*args
or**kwargs
).Examples
>>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3])
Params
hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],
Ancestors
- KANClassifier
- KAN
- sklearn.base.BaseEstimator
- sklearn.utils._repr_html.base.ReprHTMLMixin
- sklearn.utils._repr_html.base._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.ClassifierMixin
Inherited members
- setting and getting parameters used by
class KANGAMRegressor (hidden_layer_size: int = 64,
hidden_layer_sizes: List[int] = None,
regularize_activation: float = 1.0,
regularize_entropy: float = 1.0,
regularize_ridge: float = 0.0,
test_size=0.2,
random_state=42,
shuffle=True,
device: str = 'cpu',
**kwargs)-
Expand source code
class KANGAMRegressor(KANRegressor): pass
Base class for all estimators in scikit-learn.
Inheriting from this class provides default implementations of:
- setting and getting parameters used by
GridSearchCV
and friends; - textual and HTML representation displayed in terminals and IDEs;
- estimator serialization;
- parameters validation;
- data validation;
- feature names validation.
Read more in the :ref:
User Guide <rolling_your_own_estimator>
.Notes
All estimators should specify all the parameters that can be set at the class level in their
__init__
as explicit keyword arguments (no*args
or**kwargs
).Examples
>>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3])
Params
hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],
Ancestors
- KANRegressor
- KAN
- sklearn.base.BaseEstimator
- sklearn.utils._repr_html.base.ReprHTMLMixin
- sklearn.utils._repr_html.base._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.RegressorMixin
Inherited members
- setting and getting parameters used by
class KANRegressor (hidden_layer_size: int = 64,
hidden_layer_sizes: List[int] = None,
regularize_activation: float = 1.0,
regularize_entropy: float = 1.0,
regularize_ridge: float = 0.0,
test_size=0.2,
random_state=42,
shuffle=True,
device: str = 'cpu',
**kwargs)-
Expand source code
class KANRegressor(KAN, RegressorMixin): pass
Base class for all estimators in scikit-learn.
Inheriting from this class provides default implementations of:
- setting and getting parameters used by
GridSearchCV
and friends; - textual and HTML representation displayed in terminals and IDEs;
- estimator serialization;
- parameters validation;
- data validation;
- feature names validation.
Read more in the :ref:
User Guide <rolling_your_own_estimator>
.Notes
All estimators should specify all the parameters that can be set at the class level in their
__init__
as explicit keyword arguments (no*args
or**kwargs
).Examples
>>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3])
Params
hidden_layer_size : int If int, number of neurons in the hidden layer (assumes single hidden layer) hidden_layer_sizes: List with length (n_layers - 2) The ith element represents the number of neurons in the ith hidden layer. If this is passed, will override hidden_layer_size e.g. [32, 64] would have a layer with 32 hidden units followed by a layer with 64 hidden units (input and output shape are inferred by the data passed) regularize_activation: float Activation regularization parameter regularize_entropy: float Entropy regularization parameter regularize_ridge: float Ridge regularization parameter (only applies to KANGAM) kwargs can be any of these more detailed KAN parameters grid_size=5, spline_order=3, scale_noise=0.1, scale_base=1.0, scale_spline=1.0, base_activation=torch.nn.SiLU, grid_eps=0.02, grid_range=[-1, 1],
Ancestors
- KAN
- sklearn.base.BaseEstimator
- sklearn.utils._repr_html.base.ReprHTMLMixin
- sklearn.utils._repr_html.base._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.RegressorMixin
Subclasses
Methods
def set_score_request(self: KANRegressor,
*,
sample_weight: bool | str | None = '$UNCHANGED$') ‑> KANRegressor-
Expand source code
def func(*args, **kw): """Updates the `_metadata_request` attribute of the consumer (`instance`) for the parameters provided as `**kw`. This docstring is overwritten below. See REQUESTER_DOC for expected functionality. """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)} in {self.name}. " f"Accepted arguments are: {set(self.keys)}" ) # This makes it possible to use the decorated method as an unbound method, # for instance when monkeypatching. # https://github.com/scikit-learn/scikit-learn/issues/28632 if instance is None: _instance = args[0] args = args[1:] else: _instance = instance # Replicating python's behavior when positional args are given other than # `self`, and `self` is only allowed if this method is unbound. if args: raise TypeError( f"set_{self.name}_request() takes 0 positional argument but" f" {len(args)} were given" ) requests = _instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) _instance._metadata_request = requests return _instance
Configure whether metadata should be requested to be passed to the
score
method.Note that this method is only relevant when this estimator is used as a sub-estimator within a :term:`meta-estimator` and metadata routing is enabled with ``enable_metadata_routing=True`` (see :func:<code>sklearn.set\_config</code>). Please check the :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - <code>True</code>: metadata is requested, and passed to <code>score</code> if provided. The request is ignored if metadata is not provided. - <code>False</code>: metadata is not requested and the meta-estimator will not pass it to <code>score</code>. - <code>None</code>: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - <code>str</code>: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (<code>sklearn.utils.metadata\_routing.UNCHANGED</code>) retains the existing request. This allows you to change the request for some parameters and not others. !!! versionadded "Added in version: 1.3" Parameters ---------- sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for <code>sample\_weight</code> parameter in <code>score</code>. Returns ------- self : object The updated object.
Inherited members
- setting and getting parameters used by