分类学习####
分类算法各有不同
knn
naivebyes
regression
dnn
sklearn.linear_modlel 线性函数
sklearn.preprocessing 非线性函数
分类##
基本分类模型###
knn####
sklearn.neighbors.KNeighborsClassifier
n_neighbors
weights 设置选择k个点对分类效果的影响权重
uniform,distance
algothrim 计算临近点的方法 ball_tree kd_tree brute auto
...
- 实例介绍
neigh=KNeightborsClassifier(neigh)
neigh.fit(X,y)
neigh.predict([[1.1]])
决策树####
构造好的分类决策树
sklearn.tree.DecisionTreeClassifier
criterion : 用于选择属性的准侧,可以传入gini代表基尼系数,entropy代表信息增益
max_features: 表示节点分裂时,从所少个特征中选择最优特征,可以设定固定数木,百分比,或其他个数,默认是所有特征
人体姿态预测###
这个数据集好偏啊,运行效果并不好。
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 17:41:04 2017
@author: sfzyk
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
import os
def load_dataset(feature_paths,label_paths):
feature=np.ndarray(shape=(0,41))
label=np.ndarray(shape=(0,1))
for file in feature_paths:
# 使用逗号分隔符读取特征数据,将问号替换标记为缺失值,不包含表头
df=pd.read_table(file,delimiter=',',na_values='?',header=None)
imp=Imputer(missing_values='NaN',strategy='mean',axis=0)
#imp 是一个Imputer实例 用于预处理数据
imp.fit(df)
df=imp.transform(df)
#进行特征合并
feature=np.concatenate((feature,df))
for file in label_paths:
df=pd.read_table(file,header=None)
label=np.concatenate((label,df))
label=np.ravel(label)
return feature,label
if __name__=='__main__':
os.chdir(r"D:\mechine_learning\mooc_data\bodydata")
feature_paths=['A.feature','B.feature','C.feature','D.feature','E.feature',]
#label_paths=['A.label','B.label','C.label']
#x_train,y_train=load_dataset(feature_paths[:2],label_paths[:2])
#x_test,y_test=load_dataset(feature_paths[2:],label_paths[2:])
label_paths=['A.label','B.label','C.label','D.label','E.label',]
x_train,y_train=load_dataset(feature_paths[:4],label_paths[:4])
x_test,y_test=load_dataset(feature_paths[4:],label_paths[4:])
x_train,x_,y_train,y_=train_test_split(x_train,y_train,test_size=0.0)
'''
mask=np.random.permutation(x_test.shape[0])[0:10000]
x_test=x_test[mask,:]
y_test=y_test[mask]
x_train,y_train=x_train[0:100000,:],y_train[0:100000]
'''
#只是随机打乱
'''
print("Start training knn")
knn=KNeighborsClassifier(algorithm='kd_tree')
knn.fit(x_train,y_train,)
print("begin predic")
answer_knn=knn.predict(x_test)
print("knn are done")
print("DT are training")
DT=DecisionTreeClassifier()
DT.fit(x_train,y_train)
print("begin predic")
answer_DT=DT.predict(x_test)
print("DT are done")
print("naive bayes are training")
nb=GaussianNB()
nb.fit(x_train,y_train)
print("begin predic")
answer_nb=nb.predict(x_test)
print("naive nb are done!")
print("knn")
print(classification_report(y_test,answer_knn))
print("DT")
print(classification_report(y_test,answer_DT))
print("nb")
print(classification_report(y_test,answer_nb))
'''
股票涨跌预测###
也是一个效果微乎其微的预测
就算只是预测单天是涨是跌,也只能做到53%左右的准确度。
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 18:59:17 2017
@author: sfzyk
"""
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn import cross_validation
import os
os.chdir("D:\mechine_learning\mooc_data\stock")
data=pd.read_csv("000777.csv",encoding='gbk',parse_dates=[0],index_col=0)
data.sort_index(0,ascending=True,inplace=True)
dayfeature=150
featurenum=5*dayfeature
x=np.zeros((data.shape[0]-dayfeature,featurenum+1))
y=np.zeros((data.shape[0]-dayfeature))
for i in range(0,data.shape[0]-dayfeature):
x[i,0:featurenum]=np.array(data[i:i+dayfeature][['收盘价','最低价','最高价','开盘价','成交量']]).reshape((1,featurenum))
x[i,featurenum]=data.ix[i+dayfeature]['开盘价']
for i in range(0,data.shape[0]-dayfeature):
if data.ix[i+dayfeature]['收盘价']>=data.ix[i+dayfeature]['开盘价']:
y[i]=1
else:
y[i]=0
clf=svm.SVC(kernel='rbf')
result=[]
for i in range(5):
x_train,x_test,y_train,y_test=cross_validation.train_test_split(x,y,test_size=0.2)
clf.fit(x_train,y_train)
result.append(np.mean(y_test==clf.predict(x_test)))