感知器算法 C++

 We can estimate the weight values for our training data using stochastic gradient descent.

Stochastic gradient descent requires two parameters:

  • Learning Rate: Used to limit the amount each weight is corrected each time it is updated.
  • Epochs: The number of times to run through the training data while updating the weight.

These, along with the training data will be the arguments to the function.

There are 3 loops we need to perform in the function:

  1. Loop over each epoch.
  2. Loop over each row in the training data for an epoch.
  3. Loop over each weight and update it for a row in an epoch.

As you can see, we update each weight for each row in the training data, each epoch.

The loop is over until:

  the iteration error is less than a user-specified error threshold or

  a predetermined number of iterations have been completed.

Weights are updated based on the error the model made. The error is calculated as the difference between the expected output value and the prediction made with the candidate weights.

Notice that learning only occurs when an error is made, otherwise the weights are left unchanged.

#include <iostream>
#include <string>
#include <fstream>
#include <sstream>
#include <vector>
#include <cmath>

//the sign function
template <typename DataType, typename WeightType>
double sign(typename::std::vector<DataType> &data, typename::std::vector<WeightType> &weights) {
    double result=0.0;

    for(size_t i=0; i<weights.size(); ++i) {
        result += data.at(i)*weights.at(i);
    }

    if(result >= 0.0)
        return 1.0;
    else
        return 0.0;
}

template <typename DataType, typename WeightType>
void trainW(typename::std::vector<std::vector<DataType> > &vv, typename::std::vector<WeightType> &weights, const double& l_rate, const int& n_epoch) {
    std::vector<DataType> v_data;

    for(size_t i=0; i<weights.size(); ++i) {
        weights.at(i)=0.0;
    }

    for(size_t i=0; i<n_epoch; ++i) {
        double sum_error=0.0;

        for(size_t j=0; j<vv.size(); ++j) {
            v_data.clear();
            for(size_t k=0; k<weights.size(); ++k) {
                v_data.push_back(vv[j][k]);
            }

            for(typename::std::vector<DataType>::iterator it=v_data.begin();it!=v_data.end();++it) {
                std::cout<<*it<<" ";
            }

            std::cout<<std::endl;

            double prediction=sign(v_data, weights);
            double error=vv[j].back()-prediction;
            std::cout<<"expected: "<<vv[j].back()<<" prediction: "<<prediction<<" error: "<<error<<std::endl;

            sum_error+=pow(error, 2.0);

            for(size_t k=0; k<weights.size(); ++k) {
                weights.at(k)=weights.at(k)+l_rate*error*vv[j][k];
            }
        }
        std::cout<<"epoch = "<<i<<" error = "<<sum_error<<std::endl;
    }

    for(size_t i=0; i<weights.size(); ++i) {
        std::cout<<weights.at(i)<<" ";
    }
    std::cout<<std::endl;
}

//make a prediction with weights, appended to the last column
template <typename DataType, typename WeightType>
void predictTestData(typename::std::vector<std::vector<DataType> > &vv, typename::std::vector<WeightType> &weights) {
    std::vector<DataType> v_data;

    for(size_t i=0;i<vv.size();++i) {
        v_data.clear();
        for(size_t j=0;j<weights.size();++j) {
            v_data.push_back(vv[i][j]);
        }

        double signResult=sign(v_data,weights);
        vv[i].push_back(signResult);
    }
}

//display the data
template <typename DataType>
void DisplayData(typename::std::vector<std::vector<DataType> > &vv) {
    std::cout<<"the number of data: "<<vv.size()<<std::endl;

    for(size_t i=0; i<vv.size(); ++i) {
        for(typename::std::vector<DataType>::iterator it=vv[i].begin(); it!=vv[i].end(); ++it) {
            std::cout<<*it<<" ";
        }
        std::cout<<std::endl;
    }
}

int main() {
    std::ifstream infile_feat("PLA.txt");
    std::string feature;
    float feat_onePoint;
    std::vector<float> lines;
    std::vector<std::vector<float> > lines_feat;
    lines_feat.clear();

    std::vector<float> v_weights;
    v_weights.clear();
    v_weights.push_back(-0.1);
    v_weights.push_back(0.206);
    v_weights.push_back(-0.234);

    while(!infile_feat.eof()) {
        getline(infile_feat, feature);
            if(feature.empty())
                break;
        std::stringstream stringin(feature);
        lines.clear();

        lines.push_back(1);
        while(stringin >> feat_onePoint) {
            lines.push_back(feat_onePoint);
        }
        lines_feat.push_back(lines);
    }

    infile_feat.close();

    std::cout<<"display train data: "<<std::endl;

    DisplayData(lines_feat);

    double l_rate=0.1;

    int n_epoch=5;

    trainW(lines_feat, v_weights, l_rate, n_epoch);

    //predictTestData(lines_feat, v_weights);

    //std::cout<<"the predicted: "<<std::endl;
    //DisplayData(lines_feat);

    return 0;
}

posted @   东宫得臣  阅读(347)  评论(0编辑  收藏  举报
编辑推荐:
· AI与.NET技术实操系列:基于图像分类模型对图像进行分类
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
阅读排行:
· 分享一个免费、快速、无限量使用的满血 DeepSeek R1 模型,支持深度思考和联网搜索!
· 基于 Docker 搭建 FRP 内网穿透开源项目(很简单哒)
· 25岁的心里话
· ollama系列01:轻松3步本地部署deepseek,普通电脑可用
· 按钮权限的设计及实现
点击右上角即可分享
微信分享提示