Expand source code
import numpy as np
from typing import List
from imodels.rule_set.rule_fit import RuleFit
from imodels.util.score import score_linear
from sklearn.base import ClassifierMixin, RegressorMixin, BaseEstimator
from .util import extract_ensemble
class StableLinear(RuleFit):
def __init__(self,
weak_learners: List[BaseEstimator],
max_complexity: int,
min_mult: int = 1,
penalty='l1',
n_estimators=100,
tree_size=4,
sample_fract='default',
max_rules=30,
memory_par=0.01,
tree_generator=None,
lin_trim_quantile=0.025,
lin_standardise=True,
exp_rand_tree_size=True,
include_linear=False,
alpha=None,
random_state=None):
super().__init__(n_estimators,
tree_size,
sample_fract,
max_rules,
memory_par,
tree_generator,
lin_trim_quantile,
lin_standardise,
exp_rand_tree_size,
include_linear,
alpha,
random_state)
self.max_complexity = max_complexity
self.weak_learners = weak_learners
self.penalty = penalty
self.min_mult = min_mult
def fit(self, X, y=None, feature_names=None):
super().fit(X, y, feature_names=feature_names)
return self
def _extract_rules(self, X, y) -> List[str]:
return extract_ensemble(self.weak_learners, X, y, self.min_mult)
def _score_rules(self, X, y, rules):
X_concat = np.zeros([X.shape[0], 0])
# standardise linear variables if requested (for regression model only)
if self.include_linear:
# standard deviation and mean of winsorized features
self.winsorizer.train(X)
winsorized_X = self.winsorizer.trim(X)
self.stddev = np.std(winsorized_X, axis=0)
self.mean = np.mean(winsorized_X, axis=0)
if self.lin_standardise:
self.friedscale.train(X)
X_regn = self.friedscale.scale(X)
else:
X_regn = X.copy()
X_concat = np.concatenate((X_concat, X_regn), axis=1)
X_rules = self.transform(X, rules)
if X_rules.shape[0] > 0:
X_concat = np.concatenate((X_concat, X_rules), axis=1)
# no rules fit and self.include_linear == False
if X_concat.shape[1] == 0:
return [], [], 0
return score_linear(X_concat, y, rules,
alpha=self.alpha,
penalty=self.penalty,
prediction_task=self.prediction_task,
max_rules=self.max_rules, random_state=self.random_state)
class StableLinearRegressor(StableLinear, RegressorMixin):
def _init_prediction_task(self):
self.prediction_task = 'regression'
class StableLinearClassifier(StableLinear, ClassifierMixin):
def _init_prediction_task(self):
self.prediction_task = 'classification'
Classes
class StableLinear (weak_learners: List[sklearn.base.BaseEstimator], max_complexity: int, min_mult: int = 1, penalty='l1', n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=False, alpha=None, random_state=None)
-
Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
tree_size
:Number
ofterminal nodes in generated trees. If exp_rand_tree_size=True,
- this will be the mean number of terminal nodes.
sample_fract
:fraction
ofrandomly chosen training observations used to produce each tree.
- FP 2004 (Sec. 2)
max_rules
:total number
ofterms included in the final model (both linear and rules)
- approximate total number of candidate rules generated for fitting also is based on this Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par
:scale multiplier (shrinkage factor) applied to each new tree when
- sequentially induced. FP 2004 (Sec. 2)
lin_standardise
:If True, the linear terms will be standardised as per Friedman Sec 3.2
- by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile
:If lin_standardise is True, this quantile will be used to trim linear
- terms before standardisation.
exp_rand_tree_size
:If True, each boosted tree will have a different maximum number
of- terminal nodes based on an exponential distribution about tree_size. (Friedman Sec 3.3)
include_linear
:Include linear terms as opposed to only rules
alpha
:Regularization strength, will override max_rules parameter
cv
:Whether to use cross-validation scores to select the regularization strength
- the final regularization value out of all that satisfy max_rules. If False, the least regularization possible is used.
- random_state: Integer to initialise random objects and provide repeatability.
tree_generator
:Optional: this object will be used as provided to generate the rules.
- This will override almost all the other properties above. Must be GradientBoostingRegressor(), GradientBoostingClassifier(), or RandomForestRegressor()
Attributes
rule_ensemble
:RuleEnsemble
- The rule ensemble
feature_names
:list
ofstrings
, optional(default=None)
- The names of the features (columns)
Expand source code
class StableLinear(RuleFit): def __init__(self, weak_learners: List[BaseEstimator], max_complexity: int, min_mult: int = 1, penalty='l1', n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=False, alpha=None, random_state=None): super().__init__(n_estimators, tree_size, sample_fract, max_rules, memory_par, tree_generator, lin_trim_quantile, lin_standardise, exp_rand_tree_size, include_linear, alpha, random_state) self.max_complexity = max_complexity self.weak_learners = weak_learners self.penalty = penalty self.min_mult = min_mult def fit(self, X, y=None, feature_names=None): super().fit(X, y, feature_names=feature_names) return self def _extract_rules(self, X, y) -> List[str]: return extract_ensemble(self.weak_learners, X, y, self.min_mult) def _score_rules(self, X, y, rules): X_concat = np.zeros([X.shape[0], 0]) # standardise linear variables if requested (for regression model only) if self.include_linear: # standard deviation and mean of winsorized features self.winsorizer.train(X) winsorized_X = self.winsorizer.trim(X) self.stddev = np.std(winsorized_X, axis=0) self.mean = np.mean(winsorized_X, axis=0) if self.lin_standardise: self.friedscale.train(X) X_regn = self.friedscale.scale(X) else: X_regn = X.copy() X_concat = np.concatenate((X_concat, X_regn), axis=1) X_rules = self.transform(X, rules) if X_rules.shape[0] > 0: X_concat = np.concatenate((X_concat, X_rules), axis=1) # no rules fit and self.include_linear == False if X_concat.shape[1] == 0: return [], [], 0 return score_linear(X_concat, y, rules, alpha=self.alpha, penalty=self.penalty, prediction_task=self.prediction_task, max_rules=self.max_rules, random_state=self.random_state)
Ancestors
- RuleFit
- sklearn.base.BaseEstimator
- sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.TransformerMixin
- sklearn.utils._set_output._SetOutputMixin
- RuleSet
Subclasses
Inherited members
class StableLinearClassifier (weak_learners: List[sklearn.base.BaseEstimator], max_complexity: int, min_mult: int = 1, penalty='l1', n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=False, alpha=None, random_state=None)
-
Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
tree_size
:Number
ofterminal nodes in generated trees. If exp_rand_tree_size=True,
- this will be the mean number of terminal nodes.
sample_fract
:fraction
ofrandomly chosen training observations used to produce each tree.
- FP 2004 (Sec. 2)
max_rules
:total number
ofterms included in the final model (both linear and rules)
- approximate total number of candidate rules generated for fitting also is based on this Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par
:scale multiplier (shrinkage factor) applied to each new tree when
- sequentially induced. FP 2004 (Sec. 2)
lin_standardise
:If True, the linear terms will be standardised as per Friedman Sec 3.2
- by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile
:If lin_standardise is True, this quantile will be used to trim linear
- terms before standardisation.
exp_rand_tree_size
:If True, each boosted tree will have a different maximum number
of- terminal nodes based on an exponential distribution about tree_size. (Friedman Sec 3.3)
include_linear
:Include linear terms as opposed to only rules
alpha
:Regularization strength, will override max_rules parameter
cv
:Whether to use cross-validation scores to select the regularization strength
- the final regularization value out of all that satisfy max_rules. If False, the least regularization possible is used.
- random_state: Integer to initialise random objects and provide repeatability.
tree_generator
:Optional: this object will be used as provided to generate the rules.
- This will override almost all the other properties above. Must be GradientBoostingRegressor(), GradientBoostingClassifier(), or RandomForestRegressor()
Attributes
rule_ensemble
:RuleEnsemble
- The rule ensemble
feature_names
:list
ofstrings
, optional(default=None)
- The names of the features (columns)
Expand source code
class StableLinearClassifier(StableLinear, ClassifierMixin): def _init_prediction_task(self): self.prediction_task = 'classification'
Ancestors
- StableLinear
- RuleFit
- sklearn.base.BaseEstimator
- sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.TransformerMixin
- sklearn.utils._set_output._SetOutputMixin
- RuleSet
- sklearn.base.ClassifierMixin
Methods
def set_score_request(self: StableLinearClassifier, *, sample_weight: Union[bool, ForwardRef(None), str] = '$UNCHANGED$') ‑> StableLinearClassifier
-
Request metadata passed to the
score
method.Note that this method is only relevant if
enable_metadata_routing=True
(see :func:sklearn.set_config
). Please see :ref:User Guide <metadata_routing>
on how the routing mechanism works.The options for each parameter are:
-
True
: metadata is requested, and passed toscore
if provided. The request is ignored if metadata is not provided. -
False
: metadata is not requested and the meta-estimator will not pass it toscore
. -
None
: metadata is not requested, and the meta-estimator will raise an error if the user provides it. -
str
: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (
sklearn.utils.metadata_routing.UNCHANGED
) retains the existing request. This allows you to change the request for some parameters and not others.Added in version: 1.3
Note
This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:
~sklearn.pipeline.Pipeline
. Otherwise it has no effect.Parameters
sample_weight
:str, True, False,
orNone
, default=sklearn.utils.metadata_routing.UNCHANGED
- Metadata routing for
sample_weight
parameter inscore
.
Returns
self
:object
- The updated object.
Expand source code
def func(*args, **kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)} in {self.name}. " f"Accepted arguments are: {set(self.keys)}" ) # This makes it possible to use the decorated method as an unbound method, # for instance when monkeypatching. # https://github.com/scikit-learn/scikit-learn/issues/28632 if instance is None: _instance = args[0] args = args[1:] else: _instance = instance # Replicating python's behavior when positional args are given other than # `self`, and `self` is only allowed if this method is unbound. if args: raise TypeError( f"set_{self.name}_request() takes 0 positional argument but" f" {len(args)} were given" ) requests = _instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) _instance._metadata_request = requests return _instance
-
Inherited members
class StableLinearRegressor (weak_learners: List[sklearn.base.BaseEstimator], max_complexity: int, min_mult: int = 1, penalty='l1', n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=False, alpha=None, random_state=None)
-
Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
tree_size
:Number
ofterminal nodes in generated trees. If exp_rand_tree_size=True,
- this will be the mean number of terminal nodes.
sample_fract
:fraction
ofrandomly chosen training observations used to produce each tree.
- FP 2004 (Sec. 2)
max_rules
:total number
ofterms included in the final model (both linear and rules)
- approximate total number of candidate rules generated for fitting also is based on this Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par
:scale multiplier (shrinkage factor) applied to each new tree when
- sequentially induced. FP 2004 (Sec. 2)
lin_standardise
:If True, the linear terms will be standardised as per Friedman Sec 3.2
- by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile
:If lin_standardise is True, this quantile will be used to trim linear
- terms before standardisation.
exp_rand_tree_size
:If True, each boosted tree will have a different maximum number
of- terminal nodes based on an exponential distribution about tree_size. (Friedman Sec 3.3)
include_linear
:Include linear terms as opposed to only rules
alpha
:Regularization strength, will override max_rules parameter
cv
:Whether to use cross-validation scores to select the regularization strength
- the final regularization value out of all that satisfy max_rules. If False, the least regularization possible is used.
- random_state: Integer to initialise random objects and provide repeatability.
tree_generator
:Optional: this object will be used as provided to generate the rules.
- This will override almost all the other properties above. Must be GradientBoostingRegressor(), GradientBoostingClassifier(), or RandomForestRegressor()
Attributes
rule_ensemble
:RuleEnsemble
- The rule ensemble
feature_names
:list
ofstrings
, optional(default=None)
- The names of the features (columns)
Expand source code
class StableLinearRegressor(StableLinear, RegressorMixin): def _init_prediction_task(self): self.prediction_task = 'regression'
Ancestors
- StableLinear
- RuleFit
- sklearn.base.BaseEstimator
- sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
- sklearn.utils._metadata_requests._MetadataRequester
- sklearn.base.TransformerMixin
- sklearn.utils._set_output._SetOutputMixin
- RuleSet
- sklearn.base.RegressorMixin
Methods
def set_score_request(self: StableLinearRegressor, *, sample_weight: Union[bool, ForwardRef(None), str] = '$UNCHANGED$') ‑> StableLinearRegressor
-
Request metadata passed to the
score
method.Note that this method is only relevant if
enable_metadata_routing=True
(see :func:sklearn.set_config
). Please see :ref:User Guide <metadata_routing>
on how the routing mechanism works.The options for each parameter are:
-
True
: metadata is requested, and passed toscore
if provided. The request is ignored if metadata is not provided. -
False
: metadata is not requested and the meta-estimator will not pass it toscore
. -
None
: metadata is not requested, and the meta-estimator will raise an error if the user provides it. -
str
: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (
sklearn.utils.metadata_routing.UNCHANGED
) retains the existing request. This allows you to change the request for some parameters and not others.Added in version: 1.3
Note
This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:
~sklearn.pipeline.Pipeline
. Otherwise it has no effect.Parameters
sample_weight
:str, True, False,
orNone
, default=sklearn.utils.metadata_routing.UNCHANGED
- Metadata routing for
sample_weight
parameter inscore
.
Returns
self
:object
- The updated object.
Expand source code
def func(*args, **kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)} in {self.name}. " f"Accepted arguments are: {set(self.keys)}" ) # This makes it possible to use the decorated method as an unbound method, # for instance when monkeypatching. # https://github.com/scikit-learn/scikit-learn/issues/28632 if instance is None: _instance = args[0] args = args[1:] else: _instance = instance # Replicating python's behavior when positional args are given other than # `self`, and `self` is only allowed if this method is unbound. if args: raise TypeError( f"set_{self.name}_request() takes 0 positional argument but" f" {len(args)} were given" ) requests = _instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) _instance._metadata_request = requests return _instance
-
Inherited members