KNN算法的实现

Knn.h

#pragma once

class Knn
{
private:
 double** trainingDataset;
 double* arithmeticMean;
 double* standardDeviation;
 int m, n;

 void RescaleDistance(double* row);
 void RescaleTrainingDataset();
 void ComputeArithmeticMean();
 void ComputeStandardDeviation();

 double Distance(double* x, double* y);
public:
 Knn(double** trainingDataset, int m, int n);
 ~Knn();
 double Vote(double* test, int k);
};

 

Knn.cpp

 

#include "Knn.h"
#include 
#include 

using namespace std;

Knn::Knn(double** trainingDataset, int m, int n)
{
 this->trainingDataset = trainingDataset;
 this->m = m;
 this->n = n;
 ComputeArithmeticMean();
 ComputeStandardDeviation();
 RescaleTrainingDataset();
}

void Knn::ComputeArithmeticMean()
{
 arithmeticMean = new double[n - 1];

 double sum;

 for(int i = 0; i < n - 1; i++)
 {
  sum = 0;
  for(int j = 0; j < m; j++)
  {
   sum += trainingDataset[j][i];
  }

  arithmeticMean[i] = sum / n;
 }
}

void Knn::ComputeStandardDeviation()
{
 standardDeviation = new double[n - 1];

 double sum, temp;

 for(int i = 0; i < n - 1; i++)
 {
  sum = 0;
  for(int j = 0; j < m; j++)
  {
   temp = trainingDataset[j][i] - arithmeticMean[i];
   sum += temp * temp;
  }

  standardDeviation[i] = sqrt(sum / n);
 }
}

void Knn::RescaleDistance(double* row)
{
 for(int i = 0; i < n - 1; i++)
 {
  row[i] = (row[i] - arithmeticMean[i]) / standardDeviation[i];
 }
}

void Knn::RescaleTrainingDataset()
{
 for(int i = 0; i < m; i++)
 {
  RescaleDistance(trainingDataset[i]);
 }
}

Knn::~Knn()
{
 delete[] arithmeticMean;
 delete[] standardDeviation;
}

double Knn::Distance(double* x, double* y)
{
 double sum = 0, temp;
 for(int i = 0; i < n - 1; i++)
 {
  temp = (x[i] - y[i]);
  sum += temp * temp;
 }

 return sqrt(sum);
}

double Knn::Vote(double* test, int k)
{
 RescaleDistance(test);

 double distance;

 map::iterator max;

 map mins;

 for(int i = 0; i < m; i++)
 {
  distance = Distance(test, trainingDataset[i]);
  if(mins.size() < k)
   mins.insert(map::value_type(i, distance));
  else
  {
   max = mins.begin();
   for(map::iterator it = mins.begin(); it != mins.end(); it++)
   {
    if(it->second > max->second)
     max = it;
   }
   if(distance < max->second)
   {
    mins.erase(max);
    mins.insert(map::value_type(i, distance));
   }
  }
 }

 map votes;
 double temp;

 for(map::iterator it = mins.begin(); it != mins.end(); it++)
 {
  temp = trainingDataset[it->first][n-1];
  map::iterator voteIt = votes.find(temp);
  if(voteIt != votes.end())
   voteIt->second ++;
  else
   votes.insert(map::value_type(temp, 1));
 }

 map::iterator maxVote = votes.begin();

 for(map::iterator it = votes.begin(); it != votes.end(); it++)
 {
  if(it->second > maxVote->second)
   maxVote = it;
 }

 test[n-1] = maxVote->first;

 return maxVote->first;
}

 

main.cpp

 

#include 
#include "Knn.h"

using namespace std;

int main(const int& argc, const char* argv[])
{
 double** train = new double* [14];
 for(int i = 0; i < 14; i ++)
  train[i] = new double[5];
 double trainArray[14][5] =
 {
  {0, 0, 0, 0, 0},
  {0, 0, 0, 1, 0},
  {1, 0, 0, 0, 1},
  {2, 1, 0, 0, 1},
  {2, 2, 1, 0, 1},
  {2, 2, 1, 1, 0},
  {1, 2, 1, 1, 1},
  {0, 1, 0, 0, 0},
  {0, 2, 1, 0, 1},
  {2, 1, 1, 0, 1},
  {0, 1, 1, 1, 1},
  {1, 1, 0, 1, 1},
  {1, 0, 1, 0, 1},
  {2, 1, 0, 1, 0}
 };

 for(int i = 0; i < 14; i ++)
  for(int j = 0; j < 5; j ++)
   train[i][j] = trainArray[i][j];

 Knn knn(train, 14, 5);

 double test[5] = {2, 2, 0, 1, 0};
 cout<

posted on   小橋流水  阅读(161)  评论(0编辑  收藏  举报

编辑推荐:
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
阅读排行:
· 周边上新:园子的第一款马克杯温暖上架
· Open-Sora 2.0 重磅开源!
· 分享 3 个 .NET 开源的文件压缩处理库,助力快速实现文件压缩解压功能!
· Ollama——大语言模型本地部署的极速利器
· [AI/GPT/综述] AI Agent的设计模式综述

导航

统计

点击右上角即可分享
微信分享提示