在数据分析和机器学习项目中,经常遇到数据缺失的问题。缺失值的处理是一个重要的步骤,因为它可以显著影响模型的性能。本文将探讨几种常用的数据缺失值处理技术,包括使用常数0填充、均值填充、k最近邻填充和迭代填充。将使用两个数据集进行实验:糖尿病数据集和加利福尼亚房价数据集。
糖尿病数据集包含了442个样本,每个样本有10个特征变量,目标是预测糖尿病的进展。加利福尼亚房价数据集更大,包含了20640个样本和8个特征变量,目标是预测加利福尼亚各地区的房价中位数。由于这两个数据集本身没有缺失值,将人为地删除一些值以创建包含人工缺失数据的新版本。然后,将比较RandomForestRegressor在完整原始数据集上的性能与在经过不同填充技术处理后的人工缺失数据集上的性能。
首先,需要下载这两个数据集。糖尿病数据集随scikit-learn一起提供,而加利福尼亚房价数据集需要下载。为了加快计算速度,只使用前400个样本,但也可以使用整个数据集。
import numpy as np
from sklearn.datasets import fetch_california_housing, load_diabetes
rng = np.random.RandomState(42)
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_california, y_california = fetch_california_housing(return_X_y=True)
X_california = X_california[:300]
y_california = y_california[:300]
X_diabetes = X_diabetes[:300]
y_diabetes = y_diabetes[:300]
接下来,定义一个函数来向数据集中添加缺失值。将在75%的行中添加缺失值。
def add_missing_values(X_full, y_full):
n_samples, n_features = X_full.shape
missing_rate = 0.75
n_missing_samples = int(n_samples * missing_rate)
missing_samples = np.zeros(n_samples, dtype=bool)
missing_samples[:n_missing_samples] = True
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
X_missing = X_full.copy()
X_missing[missing_samples, missing_features] = np.nan
y_missing = y_full.copy()
return X_missing, y_missing
X_miss_california, y_miss_california = add_missing_values(X_california, y_california)
X_miss_diabetes, y_miss_diabetes = add_missing_values(X_diabetes, y_diabetes)
现在,将编写一个函数来评分不同填充数据的结果。将分别查看每个填充器。
rng = np.random.RandomState(0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
N_SPLITS = 4
regressor = RandomForestRegressor(random_state=0)
除了填充缺失值外,填充器还有一个add_indicator参数,用于标记缺失的值,这可能携带一些信息。
def get_scores_for_imputer(imputer, X_missing, y_missing):
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring="neg_mean_squared_error", cv=N_SPLITS)
return impute_scores
首先,希望在原始数据上估计评分。
def get_full_score(X_full, y_full):
full_scores = cross_val_score(regressor, X_full, y_full, scoring="neg_mean_squared_error", cv=N_SPLITS)
return full_scores.mean(), full_scores.std()
现在,将估计在用0替换缺失值的数据上的评分。
def get_impute_zero_score(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True, strategy="constant", fill_value=0)
zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return zero_impute_scores.mean(), zero_impute_scores.std()
接下来,将估计使用k最近邻填充缺失值的评分。
def get_impute_knn_score(X_missing, y_missing):
imputer = KNNImputer(missing_values=np.nan, add_indicator=True)
knn_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return knn_impute_scores.mean(), knn_impute_scores.std()
然后,将估计使用均值填充缺失值的评分。
def get_impute_mean(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, strategy="mean", add_indicator=True)
mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return mean_impute_scores.mean(), mean_impute_scores.std()
最后,将估计使用迭代填充缺失值的评分。
def get_impute_iterative(X_missing, y_missing):
imputer = IterativeImputer(missing_values=np.nan, add_indicator=True, random_state=0, n_nearest_features=3, max_iter=1, sample_posterior=True)
iterative_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return iterative_impute_scores.mean(), iterative_impute_scores.std()
最后,将可视化评分结果。
import matplotlib.pyplot as plt
n_bars = len(mses_diabetes)
xval = np.arange(n_bars)
colors = ["r", "g", "b", "orange", "black"]
plt.figure(figsize=(12, 6))
ax1 = plt.subplot(121)
for j in xval:
ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j], color=colors[j], alpha=0.6, align="center")
ax1.set_title("Imputation Techniques with Diabetes Data")
ax1.set_xlim(left=np.min(mses_diabetes)*0.9, right=np.max(mses_diabetes)*1.1)
ax1.set_yticks(xval)
ax1.set_xlabel("MSE")
ax1.invert_yaxis()
ax1.set_yticklabels(x_labels)
ax2 = plt.subplot(122)
for j in xval:
ax2.barh(j, mses_california[j], xerr=stds_california[j], color=colors[j], alpha=0.6, align="center")
ax2.set_title("Imputation Techniques with California Data")
ax2.set_yticks(xval)
ax2.set_xlabel("MSE")
ax2.invert_yaxis()
ax2.set_yticklabels([""]*n_bars)
plt.show()