kaggle Partial_Dependence_Plots
# Partial dependence plots
# 改变单变量对最终预测结果的影响
# 先fit出一种模型,然后取一行,不断改变某一特征,看它对最终结果的印象。
# 但是,只使用一行不具有典型性
# 所以对所有行执行上述操作,求均值
import pandas as pd from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier from sklearn.ensemble.partial_dependence import partial_dependence, plot_partial_dependence from sklearn.preprocessing import Imputer import matplotlib.pyplot as plt train_path = r"C:\Users\cbattle\Desktop\train.csv" test_path = r"C:\Users\cbattle\Desktop\test.csv" out_path = r"C:\Users\cbattle\Desktop\out.csv" def get_some_data(): data = pd.read_csv(train_path) y = data.SalePrice cols_to_use = ['YearBuilt', 'GrLivArea', 'TotRmsAbvGrd'] X = data[cols_to_use] my_imputer = Imputer() imputed_X = my_imputer.fit_transform(X) return imputed_X, y X, y = get_some_data() my_model = GradientBoostingRegressor() my_model.fit(X, y) my_plots = plot_partial_dependence(my_model, features=[0,2], X=X, feature_names=cols_to_use, grid_resolution=10) plt.show() # print('ok') # There is a function called partial_dependence to get the raw data making up this plot, rather than making the visual plot itself. # This is useful if you want to control how it is visualized using a plotting package like Seaborn. With moderate effort, you could # make much nicer looking plots.