use*_*789 5 python scikit-learn
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,StandardScaler
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.linear_model import LinearRegression
df = pd.DataFrame({'brand' : ['aaaa', 'asdfasdf', 'sadfds', 'NaN'],
'category' : ['asdf','asfa','asdfas','as'],
'num1' : [1, 1, 0, 0] ,
'target' : [0.2,0.11,1.34,1.123]})
train_continuous_cols = df.select_dtypes(include=["int64","float64"]).columns.tolist()
train_categorical_cols = df.select_dtypes(include=["object"]).columns.tolist()
preprocess = make_column_transformer(
(StandardScaler(),train_continuous_cols),
(OneHotEncoder(), train_categorical_cols)
)
df= preprocess.fit_transform(df)
Run Code Online (Sandbox Code Playgroud)
只是想获取所有功能名称:
preprocess.get_feature_names()
Run Code Online (Sandbox Code Playgroud)
收到此错误:
Transformer standardscaler (type StandardScaler) does not provide get_feature_names
Run Code Online (Sandbox Code Playgroud)
我该如何解决?在线示例使用管道,我试图避免这种情况。
以下 ColumnTransformer 的重新实现返回一个 pandas DataFrame。请注意,仅当您将 pandas DataFrame 输入到管道中时才应使用它。
所有荣誉都归功于 Johannes Haupt,他提供了get_feature_names()对没有此功能的转换器具有弹性的功能(请参阅博客文章从 ColumnTransformer 中提取列名称)。我注释掉了这些警告,因为我不想要它们,并且还预先将转换步骤预先准备到列名称;但很容易根据需要取消评论。
#import warnings
import sklearn
import pandas as pd
class ColumnTransformerWithNames(ColumnTransformer):
def get_feature_names(column_transformer):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
# Remove the internal helper function
#check_is_fitted(column_transformer)
# Turn loopkup into function for better handling with pipeline later
def get_names(trans):
# >> Original get_feature_names() method
if trans == 'drop' or (
hasattr(column, '__len__') and not len(column)):
return []
if trans == 'passthrough':
if hasattr(column_transformer, '_df_columns'):
if ((not isinstance(column, slice))
and all(isinstance(col, str) for col in column)):
return column
else:
return column_transformer._df_columns[column]
else:
indices = np.arange(column_transformer._n_features)
return ['x%d' % i for i in indices[column]]
if not hasattr(trans, 'get_feature_names'):
# >>> Change: Return input column names if no method avaiable
# Turn error into a warning
# warnings.warn("Transformer %s (type %s) does not "
# "provide get_feature_names. "
# "Will return input column names if available"
# % (str(name), type(trans).__name__))
# For transformers without a get_features_names method, use the input
# names to the column transformer
if column is None:
return []
else:
return [#name + "__" +
f for f in column]
return [#name + "__" +
f for f in trans.get_feature_names()]
### Start of processing
feature_names = []
# Allow transformers to be pipelines. Pipeline steps are named differently, so preprocessing is needed
if type(column_transformer) == sklearn.pipeline.Pipeline:
l_transformers = [(name, trans, None, None) for step, name, trans in column_transformer._iter()]
else:
# For column transformers, follow the original method
l_transformers = list(column_transformer._iter(fitted=True))
for name, trans, column, _ in l_transformers:
if type(trans) == sklearn.pipeline.Pipeline:
# Recursive call on pipeline
_names = column_transformer.get_feature_names(trans)
# if pipeline has no transformer that returns names
if len(_names)==0:
_names = [#name + "__" +
f for f in column]
feature_names.extend(_names)
else:
feature_names.extend(get_names(trans))
return feature_names
def transform(self, X):
indices = X.index.values.tolist()
original_columns = X.columns.values.tolist()
X_mat = super().transform(X)
new_cols = self.get_feature_names()
new_X = pd.DataFrame(X_mat.toarray(), index=indices, columns=new_cols)
return new_X
def fit_transform(self, X, y=None):
super().fit_transform(X, y)
return self.transform(X)
Run Code Online (Sandbox Code Playgroud)
然后您可以将调用替换为ColumnTransformerto ColumnTransformerWithNames。输出是一个 DataFrame,此步骤现在有一个有效的get_feature_names().
我假设您正在寻找访问变压器结果的方法,这会产生一个 numpy 数组。
ColumnTransfomer 有一个名为transformers_:`的属性
从文档中:
Run Code Online (Sandbox Code Playgroud)transformers_ : list The collection of fitted transformers as tuples of (name, fitted_transformer, column). `fitted_transformer` can be an estimator, 'drop', or 'passthrough'. In case there were no columns selected, this will be the unfitted transformer. If there are remaining columns, the final element is a tuple of the form: ('remainder', transformer, remaining_columns) corresponding to the ``remainder`` parameter. If there are remaining columns, then ``len(transformers_)==len(transformers)+1``, otherwise ``len(transformers_)==len(transformers)``.
因此,遗憾的是,这仅提供了有关变压器本身及其所应用的列的信息,但不提供有关结果数据位置的信息,以下内容除外:
注意:转换后的特征矩阵中的列顺序遵循列表中指定列的顺序
transformers。
所以我们知道输出列的顺序与转换器列表中指定列的顺序相同。另外,我们还知道我们的转换器步骤会产生多少列,因为 StandardScaler() 产生的列数与原始数据相同,而 OneHotEncoder() 产生的列数等于类别数。
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,StandardScaler
from sklearn.compose import ColumnTransformer, make_column_transformer
df = pd.DataFrame({'brand' : ['aaaa', 'asdfasdf', 'sadfds', 'NaN'],
'category' : ['asdf','asfa','asdfas','asd'],
'num1' : [1, 1, 0, 0] ,
'target' : [0.2,0.11,1.34,1.123]})
train_continuous_cols = df.select_dtypes(include=["int64","float64"]).columns.tolist()
train_categorical_cols = df.select_dtypes(include=["object"]).columns.tolist()
# get n_categories for categorical features
n_categories = [df[x].nunique() for x in train_categorical_cols]
preprocess = make_column_transformer(
(StandardScaler(),train_continuous_cols),
(OneHotEncoder(), train_categorical_cols)
)
preprocessed_df = preprocess.fit_transform(df)
# the scaler yield 1 column each
indexes_scaler = list(range(0,len(train_continuous_cols)))
# the encoder yields a number of columns equal to the number of categories in the data
cum_index_encoder = [0] + list(np.cumsum(n_categories))
# the encoder indexes come after the scaler indexes
start_index_encoder = indexes_scaler[-1]+1
indexes_encoder = [x + start_index_encoder for x in cum_index_encoder]
# get both lower and uper bound of index
index_pairs= zip (indexes_encoder[:-1],indexes_encoder[1:])
Run Code Online (Sandbox Code Playgroud)
这会产生以下输出:
print ('Transformed {} continious cols resulting in a df with shape:'.format(len(train_continuous_cols)))
print (preprocessed_df[: , indexes_scaler].shape)
Run Code Online (Sandbox Code Playgroud)
转换 2 个连续列,得到形状为 (4, 2) 的 df
for column, (start_id, end_id) in zip (train_categorical_cols,index_pairs):
print('Transformed column {} resulted in a df with shape:'.format(column))
print(preprocessed_df[:, start_id:end_id].shape)
Run Code Online (Sandbox Code Playgroud)
转换后的列品牌产生了形状为 (4, 4) 的 df
转换后的列类别产生形状为 (4, 4) 的 df
| 归档时间: |
|
| 查看次数: |
862 次 |
| 最近记录: |