"""
Created on Wed Aug 23 13:36:42 2017
@author: Administrator
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import AdaBoostRegressor
from xgboost import XGBRegressor
train_df = pd.read_excel("train.xls",index_col = 0)
test_df = pd.read_excel('test.xls',index_col = 0)
y_train = np.log1p(train_df.pop('SalePrice'))
all_df = pd.concat((train_df,test_df),axis = 0)
mean_cols = all_df.mean()
print (mean_cols.head(10))
all_dummy_df=all_df
all_dummy_df = all_dummy_df.fillna(mean_cols)
print (all_dummy_df.isnull().sum().sum())
numeric_cols = all_df.columns[all_df.dtypes != 'object']
print (numeric_cols)
numeric_col_means = all_dummy_df.loc[:,numeric_cols].mean()
numeric_col_std = all_dummy_df.loc[:,numeric_cols].std()
all_dummy_df.loc[:,numeric_cols] = (all_dummy_df.loc[:,numeric_cols] - numeric_col_means) / numeric_col_std
dummy_train_df = all_dummy_df.loc[train_df.index]
dummy_test_df = all_dummy_df.loc[test_df.index]
print (dummy_train_df.shape,dummy_test_df.shape)
X_train = dummy_train_df.values
X_test = dummy_test_df.values
params = [1,2,3,4,5,6]
test_scores = []
for param in params:
clf = XGBRegressor(max_depth = param)
test_score = np.sqrt(-cross_val_score(clf,X_train,y_train,cv = 100,scoring = 'neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
plt.plot(params,test_scores)
plt.title('max_depth vs CV Error')
plt.show()
xgb = XGBRegressor(max_depth =50)
xgb.fit(X_train, y_train)
y_final = np.expm1(xgb.predict(X_test))
submission_df = pd.DataFrame(data = {'Id':test_df.index,'SalePrice':y_final})
print (submission_df)
submission_df.to_csv('submission_xgboosting.csv',columns = ['Id','SalePrice'],index = False)
代码链接