The oneR algorithm returns a rule list that splits on only one (usually continuous) feature It works by building a greedy rule list using only one feature at a time, and then returning the rule list with the highest accuracy

Expand source code
'''The oneR algorithm returns a rule list that splits on only one (usually continuous) feature
It works by building a greedy rule list using only one feature at a time, and then returning
the rule list with the highest accuracy
'''

import math
import numpy as np
from copy import deepcopy
from sklearn.base import BaseEstimator
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.utils.multiclass import check_classification_targets, unique_labels
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted

from imodels import GreedyRuleListClassifier
from imodels.rule_list.rule_list import RuleList
from imodels.util.arguments import check_fit_arguments


class OneRClassifier(GreedyRuleListClassifier):
    def __init__(self, max_depth=5, class_weight=None, criterion='gini'):
        self.max_depth = max_depth
        self.feature_names_ = None
        self.class_weight = class_weight
        self.criterion = criterion
        self._estimator_type = 'classifier'

    def fit(self, X, y, feature_names=None):
        """Fit oneR
        """
        X, y, feature_names = check_fit_arguments(self, X, y, feature_names)

        ms = []
        accs = np.zeros(X.shape[1])
        for col_idx in range(X.shape[1]):
            x = X[:, col_idx].reshape(-1, 1)
            m = GreedyRuleListClassifier(max_depth=self.max_depth, class_weight=self.class_weight,
                                         criterion=self.criterion)
            feat_names_single = [self.feature_names_[col_idx]]
            m.fit(x, y, feature_names=feat_names_single)
            accs[col_idx] = np.mean(m.predict(x) == y)
            ms.append(m)
            # print('acc', feat_names_single[0], f'{accs[col_idx]:0.2f}')
        col_idx_best = np.argmax(accs)
        self.rules_ = ms[col_idx_best].rules_
        self.complexity_ = len(self.rules_)

        # need to adjust index_col since was fitted with only 1 col
        for rule in self.rules_:
            if 'index_col' in rule:
                rule['index_col'] += col_idx_best
        self.depth = len(self.rules_)
        return self

Classes

class OneRClassifier (max_depth=5, class_weight=None, criterion='gini')

Base class for all estimators in scikit-learn.

Inheriting from this class provides default implementations of:

  • setting and getting parameters used by GridSearchCV and friends;
  • textual and HTML representation displayed in terminals and IDEs;
  • estimator serialization;
  • parameters validation;
  • data validation;
  • feature names validation.

Read more in the :ref:User Guide <rolling_your_own_estimator>.

Notes

All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).

Examples

>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> class MyEstimator(BaseEstimator):
...     def __init__(self, *, param=1):
...         self.param = param
...     def fit(self, X, y=None):
...         self.is_fitted_ = True
...         return self
...     def predict(self, X):
...         return np.full(shape=X.shape[0], fill_value=self.param)
>>> estimator = MyEstimator(param=2)
>>> estimator.get_params()
{'param': 2}
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> y = np.array([1, 0, 1])
>>> estimator.fit(X, y).predict(X)
array([2, 2, 2])
>>> estimator.set_params(param=3).fit(X, y).predict(X)
array([3, 3, 3])

Params

max_depth Maximum depth the list can achieve criterion: str Criterion used to split 'gini', 'entropy', or 'log_loss'

Expand source code
class OneRClassifier(GreedyRuleListClassifier):
    def __init__(self, max_depth=5, class_weight=None, criterion='gini'):
        self.max_depth = max_depth
        self.feature_names_ = None
        self.class_weight = class_weight
        self.criterion = criterion
        self._estimator_type = 'classifier'

    def fit(self, X, y, feature_names=None):
        """Fit oneR
        """
        X, y, feature_names = check_fit_arguments(self, X, y, feature_names)

        ms = []
        accs = np.zeros(X.shape[1])
        for col_idx in range(X.shape[1]):
            x = X[:, col_idx].reshape(-1, 1)
            m = GreedyRuleListClassifier(max_depth=self.max_depth, class_weight=self.class_weight,
                                         criterion=self.criterion)
            feat_names_single = [self.feature_names_[col_idx]]
            m.fit(x, y, feature_names=feat_names_single)
            accs[col_idx] = np.mean(m.predict(x) == y)
            ms.append(m)
            # print('acc', feat_names_single[0], f'{accs[col_idx]:0.2f}')
        col_idx_best = np.argmax(accs)
        self.rules_ = ms[col_idx_best].rules_
        self.complexity_ = len(self.rules_)

        # need to adjust index_col since was fitted with only 1 col
        for rule in self.rules_:
            if 'index_col' in rule:
                rule['index_col'] += col_idx_best
        self.depth = len(self.rules_)
        return self

Ancestors

  • GreedyRuleListClassifier
  • sklearn.base.BaseEstimator
  • sklearn.utils._estimator_html_repr._HTMLDocumentationLinkMixin
  • sklearn.utils._metadata_requests._MetadataRequester
  • RuleList
  • sklearn.base.ClassifierMixin

Methods

def fit(self, X, y, feature_names=None)

Fit oneR

Expand source code
def fit(self, X, y, feature_names=None):
    """Fit oneR
    """
    X, y, feature_names = check_fit_arguments(self, X, y, feature_names)

    ms = []
    accs = np.zeros(X.shape[1])
    for col_idx in range(X.shape[1]):
        x = X[:, col_idx].reshape(-1, 1)
        m = GreedyRuleListClassifier(max_depth=self.max_depth, class_weight=self.class_weight,
                                     criterion=self.criterion)
        feat_names_single = [self.feature_names_[col_idx]]
        m.fit(x, y, feature_names=feat_names_single)
        accs[col_idx] = np.mean(m.predict(x) == y)
        ms.append(m)
        # print('acc', feat_names_single[0], f'{accs[col_idx]:0.2f}')
    col_idx_best = np.argmax(accs)
    self.rules_ = ms[col_idx_best].rules_
    self.complexity_ = len(self.rules_)

    # need to adjust index_col since was fitted with only 1 col
    for rule in self.rules_:
        if 'index_col' in rule:
            rule['index_col'] += col_idx_best
    self.depth = len(self.rules_)
    return self
def set_fit_request(self: OneRClassifier, *, feature_names: Union[bool, ForwardRef(None), str] = '$UNCHANGED$') ‑> OneRClassifier

Request metadata passed to the fit method.

Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works.

The options for each parameter are:

  • True: metadata is requested, and passed to fit if provided. The request is ignored if metadata is not provided.

  • False: metadata is not requested and the meta-estimator will not pass it to fit.

  • None: metadata is not requested, and the meta-estimator will raise an error if the user provides it.

  • str: metadata should be passed to the meta-estimator with this given alias instead of the original name.

The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others.

Added in version: 1.3

Note

This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:~sklearn.pipeline.Pipeline. Otherwise it has no effect.

Parameters

feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for feature_names parameter in fit.

Returns

self : object
The updated object.
Expand source code
def func(*args, **kw):
    """Updates the request for provided parameters

    This docstring is overwritten below.
    See REQUESTER_DOC for expected functionality
    """
    if not _routing_enabled():
        raise RuntimeError(
            "This method is only available when metadata routing is enabled."
            " You can enable it using"
            " sklearn.set_config(enable_metadata_routing=True)."
        )

    if self.validate_keys and (set(kw) - set(self.keys)):
        raise TypeError(
            f"Unexpected args: {set(kw) - set(self.keys)} in {self.name}. "
            f"Accepted arguments are: {set(self.keys)}"
        )

    # This makes it possible to use the decorated method as an unbound method,
    # for instance when monkeypatching.
    # https://github.com/scikit-learn/scikit-learn/issues/28632
    if instance is None:
        _instance = args[0]
        args = args[1:]
    else:
        _instance = instance

    # Replicating python's behavior when positional args are given other than
    # `self`, and `self` is only allowed if this method is unbound.
    if args:
        raise TypeError(
            f"set_{self.name}_request() takes 0 positional argument but"
            f" {len(args)} were given"
        )

    requests = _instance._get_metadata_request()
    method_metadata_request = getattr(requests, self.name)

    for prop, alias in kw.items():
        if alias is not UNCHANGED:
            method_metadata_request.add_request(param=prop, alias=alias)
    _instance._metadata_request = requests

    return _instance

Inherited members