How to apply polynomial transformation to a subset of functions in scikitlearn

Scikitlearn PolynomialFeatures makes it easy to create polynomial features.

Here is a simple example:

import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures

# Example data:
X = np.arange(6).reshape(3, 2)


# Works fine
poly = PolynomialFeatures(2)
pd.DataFrame(poly.fit_transform(X))

   0  1  2   3   4   5
0  1  0  1   0   0   1
1  1  2  3   4   6   9
2  1  4  5  16  20  25

Question: Are there any possibilities only for converting a polynomial into a specified list of functions?

eg.

# Use previous dataframe
X2 = X.copy()

# Categorical feature will be handled 
# by a one hot encoder in another feature generation step
X2['animal'] = ['dog', 'dog', 'cat']

# Don't try to poly transform the animal column
poly2 = PolynomialFeatures(2, cols=[1,2]) # <-- ("cols" not an actual param)

# desired outcome:
pd.DataFrame(poly2.fit_transform(X))
   0  1  2   3   4   5   'animal'
0  1  0  1   0   0   1   'dog'
1  1  2  3   4   6   9   'dog'
2  1  4  5  16  20  25   'cat'

This would be especially useful when using the Pipeline function to combine a long series of function generation and model training code.

One option would be to convert your own transformer (a great example of Michelle Fulwood), but I thought someone else stumbled before this use case.

+4
source share
3 answers

- , . ( , . , cookie.)

. , . , ColumnExtractor BaseEstimator TransformerMixin, , sklearn.

. pd.get_dummies, , . , (), y ; - , .

Enjoy.

.

import pandas as pd
import numpy as np
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin

X = pd.DataFrame({'cat': ['a', 'b', 'c'], 'n1': [1, 2, 3], 'n2':[5, 7, 9] })

   cat  n1  n2
0   a   1   5
1   b   2   7
2   c   3   9

# original version had class ColumnExtractor(object)
# estimators need to inherit from these classes to play nicely with others
class ColumnExtractor(BaseEstimator, TransformerMixin):
    def __init__(self, columns=None):
        self.columns = columns
    def fit(self, X, y=None):
        return self
    def transform(self, X):
        X_cols = X[self.columns]
        return X_cols

# Using pandas get dummies to make pipeline a bit simpler by
# avoiding one-hot and label encoder.     
# Build the pipeline from a FeatureUnion that processes 
# numerical and one-hot encoded separately.
# FeatureUnion puts them back together when it done.
pipe2nvars = Pipeline([
    ('features', FeatureUnion([('num', 
                                Pipeline([('extract', 
                                           ColumnExtractor(columns=['n1', 'n2'])),
                                          ('poly', 
                                           PolynomialFeatures())  ])),
                               ('cat_var', 
                                ColumnExtractor(columns=['cat_b','cat_c']))])
    )])    

# now show it working...
for p in range(1, 4):
    pipe2nvars.set_params(features__num__poly__degree=p)
    res = pipe2nvars.fit_transform(pd.get_dummies(X, drop_first=True))
    print('polynomial degree: {}; shape: {}'.format(p, res.shape))
    print(res)

polynomial degree: 1; shape: (3, 5)
[[1. 1. 5. 0. 0.]
 [1. 2. 7. 1. 0.]
 [1. 3. 9. 0. 1.]]
polynomial degree: 2; shape: (3, 8)
[[ 1.  1.  5.  1.  5. 25.  0.  0.]
 [ 1.  2.  7.  4. 14. 49.  1.  0.]
 [ 1.  3.  9.  9. 27. 81.  0.  1.]]
polynomial degree: 3; shape: (3, 12)
[[  1.   1.   5.   1.   5.  25.   1.   5.  25. 125.   0.   0.]
 [  1.   2.   7.   4.  14.  49.   8.  28.  98. 343.   1.   0.]
 [  1.   3.   9.   9.  27.  81.  27.  81. 243. 729.   0.   1.]]
+1

PolynomialFeatures, sklearn, , , , , .

FeatureUnion , dataframe .

:

from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import PolynomialFeatures, OneHotEncoder, LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline


X = pd.DataFrame({'cat_var': ['a', 'b', 'c'], 'num_var': [1, 2, 3]})


class ColumnExtractor(object):
    def __init__(self, columns=None):
        self.columns = columns

    def fit(self, X, y=None):
        return self

    def transform(self, X):
        X_cols = X[self.columns]

    return X_cols


pipeline = Pipeline([
    ('features', FeatureUnion([
        ('num_var', Pipeline([
            ('extract', ColumnExtractor(columns=['num_var'])),
            ('poly', PolynomialFeatures(degree=2))
        ])),
        ('cat_var', Pipeline([
            ('extract', ColumnExtractor(columns=['cat_var'])),
            ('le', LabelEncoder()),
            ('ohe', OneHotEncoder()),
        ]))
    ])),
    ('estimator', LogisticRegression())
])
+5

Yes there is sklearn-pandas

This should work (there should be a more elegant solution, but it cannot check it now):

from sklearn.preprocessing import PolynomialFeatures
from sklearn_pandas import DataFrameMapper

X2.columns = ['col0', 'col1', 'col2', 'col3', 'col4', 'col5', 'animal']

mapper = DataFrameMapper([
('col0', PolynomialFeatures(2)),
('col1', PolynomialFeatures(2)),
('col2', PolynomialFeatures(2)),
('col3', PolynomialFeatures(2)),
('col4', PolynomialFeatures(2)),
('col5', PolynomialFeatures(2)),
('Animal', None)])

X3 = mapper.fit_transform(X2)
+3
source

Source: https://habr.com/ru/post/1690296/


All Articles