K-近邻(KNN)的python实现
# 输入CSV文件,输出平均均方根误差
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
def rmse_metric(actual, predicted):
sum_err = 0.0
for i in range(len(actual)):
err = predicted[i] - actual[i]
sum_err += err ** 2
mean_err = sum_err / (len(actual)-1)
return np.sqrt(mean_err)
if __name__ == '__main__':
dataset = np.array(pd.read_csv("abalone.csv", sep=',', header=None))
k_Cross = KFold(n_splits=5, random_state=0, shuffle=True)
index = 0
scores = np.array([])
Scaler = MinMaxScaler()
data,label = dataset[:,:-1],dataset[:,-1]
data = Scaler.fit_transform(data)
for train_index, test_index in k_Cross.split(dataset):
train_data, train_label = data[train_index, :], label[train_index]
test_data, test_label = data[test_index, :], label[test_index]
model = KNeighborsClassifier(n_neighbors=5)
model.fit(train_data, train_label)
pred = model.predict(test_data)
score = rmse_metric(test_label, pred)
scores = np.append(scores,score)
print('score[{}] = {}'.format(index,score))
index+=1
print('mean_rmse = {}'.format(np.mean(scores)))
本文作者:tiansz
本文链接:https://www.cnblogs.com/tiansz/p/16319618.html
版权声明:本作品采用知识共享署名-非商业性使用-禁止演绎 2.5 中国大陆许可协议进行许可。
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步