fj911

# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 15:14:07 2017

@author: Administrator
"""

import pymongo
from pymongo import MongoClient
import numpy as np
import pandas as  pd
from pandas import DataFrame,Series
from numpy import row_stack,column_stack
from dateutil.parser import parse
from matplotlib.pylab import date2num
import random
from collections import Counter



cy_rg=["上海","杨浦"]
dirtic_list=["大运盛城","凤城三村","凤城六村",
             "凤城花园","东方名园"]

#导入经度和纬度,把经纬度转化成DataFrame

client1 = MongoClient('192.168.0.136',27017)
db1 = client1.fangjia
seaweed1 = db1.seaweed

#print(seaweed.find_one({"city":"上海","region":"浦东","name":"康桥半岛二期"},{"lat2":1,"lng2":1}))
'''
cy_rg=["上海","杨浦"]
dirtic_list=["凤城四村","凤城三村","大运盛城",
             "新时代富嘉花园","新时代花园"]
'''


query1 = {"status":0,"cat":"district","city":cy_rg[0],"region":cy_rg[1], "name":{"$in":dirtic_list}}
fields1 = {"lat2":1,"lng2":1, "city":1,"region":1,"cat":1,"name":1}

lct= list()
for s in seaweed1.find(query1, fields1):
    lct.append(s)

lf=DataFrame(lct)

le=lf    

le.index=le['name'] 

lr=le[['lng2','lat2']]


#从公司的数据库中导入数据
client = MongoClient('192.168.10.88',27777)
db = client.fangjia
seawater = db.seawater
seawater.find_one()

# 索引数据库里的数据
query = {"city":cy_rg[0],"cat":"sell","region":cy_rg[1],
         "district_name":{"$in":dirtic_list},


         "p_date":{"$gt":20170815}}


lt= seawater.count(query)
print(lt)
pos = list()
#数据转化为数组,数组的元素为字典
for s in seawater.find(query).limit(lt-1):
    pos.append(s)

#将数据转化为  DataFrame
data=DataFrame(pos)

'''
p1=pd.DataFrame(Series(pos[1]))  

for i in range(1,42):
    s=pos[i]
    p2=pd.DataFrame(Series(s))   
    p1 = pd.concat([p1,p2],axis=1)

'''



data.to_excel('data.xls')

#需要提取的特征
choose_class=['total_price','area','height','room','build_date',
             'direction','hall','toilet','fitment','district_name','p_date']

dc=data[choose_class]

#增加两列,分别是经度和纬度
dc['lng2']=0
dc['lat2']=1


for i in range(dc.shape[0]):

    for j in range(lr.shape[0]):

        if dc['district_name'][i]==lr.index[j]:

            dc['lng2'][i]=lr['lng2'][j]
            dc['lat2'][i]=lr['lat2'][j]

#将'total_price' 转化为均价,并把均价赋值给'total_price'
mean_price=dc['total_price']/dc['area']

dc['total_price']=mean_price #将'total_price' 转化为均价

'''
#这段代码用于把时间转化成一个连续的数,至于是否有效有待观察
####################
h=dc['p_date']
for i in range(1,len(h)):   
    a=int(h[i])    
    b=str(a)    
    c=parse(b)        
    e = date2num(c)    
    h[i]=e 

dc['p_date']=h
###################
''' 
dc.to_excel('dc.xls')



def turn_str_to_number(dc):

    h=dc['p_date']
    for i in range(1,len(h)):   
        a=int(h[i])    
        b=str(a)    
        c=parse(b)        
        e = date2num(c)    
        h[i]=e 
        dc['p_date']=h
    return dc  

dc=turn_str_to_number(dc)



'''
for i in dc['direction'].index:

    if ('南' in str(dc['direction'][i])) :
        dc['direction'][i]=0
    elif('透' in str(dc['direction'][i])):

        dc['direction'][i]=1
    else:

        dc['direction'][i]=2

'''

def op_direc(dc):
    for i in dc['direction'].index:

        if ('南' in str(dc['direction'][i])) :
           dc['direction'][i]=0
        elif('透' in str(dc['direction'][i])):

           dc['direction'][i]=1
        else:

           dc['direction'][i]=2  

    return dc  

dc= op_direc(dc)


def op_fitmen(dc):
    for i in dc['fitment'].index:
        if ('豪' or '精') in str(dc['fitment'][i]) :
           dc['fitment'][i]=0

        else :
           dc['fitment'][i]=1

    return dc


dc = op_fitmen(dc)


'''       
for i in dc['fitment'].index:
    if ('豪' or '精') in str(dc['fitment'][i]) :
        dc['fitment'][i]=0

    else :
        dc['fitment'][i]=1

'''



'''

dc=dc.fillna({'height':dc['height'].mean(),
              'room':dc['room'].mean(),
              'toilet':dc['toilet'].mean(),
              'hall':dc['hall'].mean(),
              })

'''

dc=dc.fillna(method='ffill')

#提取字符串中的数字日期,如'1988年' 提取为1988


import re
def numbertake(stnum):

    lg = str(stnum)
    lgttcc = re.sub("\D", "", lg)
    return int(lgttcc)

al=list(dc['build_date'].values)
dc=dc.fillna(Counter(al).most_common(1)[0][0])

for i in range(dc.shape[0]):
    dc['build_date'][i:i+1]=numbertake(dc['build_date'][i:i+1].values[0])




ds=dc.drop('district_name',axis=1)

'''
def take_number(dc):
    import re
    def numbertake(stnum):

        lg = str(stnum)
        lgttcc = re.sub("\D", "", lg)
        return int(lgttcc)

    al=list(dc['build_date'].values)
    dc=dc.fillna(Counter(al).most_common(1)[0][0])
    for i in range(dc.shape[0]):
        dc['build_date'][i:i+1]=numbertake(dc['build_date'][i:i+1].values[0])
    return dc

dc=take_number(dc)
'''
ds = ds.drop([0],axis=0)




def isolate_point(dc):
    lll=list()
    for j in dirtic_list:
        fg=dc[dc['district_name']==j]
        hh=fg['total_price'].values
        hmean=hh.mean()
        hstd=hh.std()
        lg=list(fg['total_price'].index)


        for i in lg:
            if (fg['total_price'][i]<(hmean-1.645*hstd))or(fg['total_price'][i]>(hmean+1.645*hstd)):

               lll.append(i)
        return lll

lll=isolate_point(dc)

'''
#####################################################################
#取出离群点的索引
lll=list()
for j in dirtic_list:
    fg=dc[dc['district_name']==j]
    hh=fg['total_price'].values
    hmean=hh.mean()
    hstd=hh.std()
    lg=list(fg['total_price'].index)


    for i in lg:
        if (fg['total_price'][i]<(hmean-1.645*hstd))or(fg['total_price'][i]>(hmean+1.645*hstd)):

            lll.append(i)



######################################################################
'''

data_all = ds.drop(lll,axis=0)

#data_all = ds.drop([0],axis=0)

data_all.to_excel('data_all.xls')

#sample_number=data_all.shape[0]



#kk=int(0.08 *sample_number)


def divdata(dvt):


    listall=list(dvt.index)

    kk=int(0.08 *len(listall))

    list_index=[random.randint(1,len(listall)) for _ in range(kk)]

    test_label=[listall[x] for x in list_index]


    return test_label



#test_label=[random.randint(1,sample_number) for _ in range(kk)]

test_label=divdata(data_all)


data_train= data_all.drop(test_label,axis=0)
#data_train.to_excel('data_train.xls')

data_max = data_train.max()
data_min = data_train.min()

def normolize_data(data_train):
    data_train1 = (data_train-data_min)/(data_max-data_min+0.2)
    return data_train1

'''
data_max = data_train.max()
data_min = data_train.min()

data_train1 = (data_train-data_min)/(data_max-data_min+0.2) #数据标准化

#knife=int(0.95*(data_train.shape[0]))#用于切割数据80%用于训练,20%用于计算
'''

data_train1=normolize_data(data_train)

x_train = data_train1.iloc[:,1:12].as_matrix() #训练样本标签列
y_train = data_train1.iloc[:,0:1].as_matrix() #训练样本特征



from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation

model = Sequential() #建立模型
model.add(Dense(input_dim = 11, output_dim = 48)) #添加输入层、隐藏层的连接
model.add(Activation('relu')) #以Relu函数为激活函数

model.add(Dense(input_dim = 48, output_dim = 100)) #添加隐藏层、隐藏层的连接
model.add(Activation('relu')) #以Relu函数为激活函数


model.add(Dense(input_dim = 100, output_dim = 50)) #添加隐藏层、隐藏层的连接
model.add(Activation('relu')) #以Relu函数为激活函数

model.add(Dense(input_dim = 50, output_dim = 36)) #添加隐藏层、隐藏层的连接
model.add(Activation('relu')) #以Relu函数为激活函数

model.add(Dense(input_dim = 36, output_dim = 12)) #添加隐藏层、隐藏层的连接
model.add(Activation('relu')) #以Relu函数为激活函数
model.add(Dense(input_dim = 12, output_dim = 12)) #添加隐藏层、隐藏层的连接
model.add(Activation('relu')) #以Relu函数为激活函数


model.add(Dense(input_dim = 12, output_dim = 1)) #添加隐藏层、输出层的连接
model.add(Activation('sigmoid')) #以sigmoid函数为激活函数
#编译模型,损失函数为binary_crossentropy,用adam法求解
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, nb_epoch = 10, batch_size = 2) #训练模型

model.save_weights('net.model') #保存模型参数


test=data_all.ix[test_label,:]

test.to_excel('test.xls')

#test_max = test.max()
#test_min = test.min()
# = (test-data_min)/(data_max-data_min+0.2) 

data_test=normolize_data(test)

x_test = data_test.iloc[:,1:12].as_matrix()
y_test = data_test.iloc[:,0:1].as_matrix()


r = (model.predict(x_test))
rt=r*(data_max.values-data_min.values+0.2)+data_min.values
#print(rt.round(2))

#################################
p=rt[:,0:1].flatten()





#如果小区有房价等数据,则进行如下后处理,即控制房价相对于小区均价的波动幅度
'''
jk=dc.drop([0],axis=0)

cx=list(test.index)
p_dmean_ratio=list()
#p_dmean_ratio=list(range(len(cx)))
for j in range(len(cx)):

    pk=jk[jk['district_name']==jk['district_name'][cx[j]]]

    dmean=pk['total_price'].values.mean()

    pmn=p[j]/dmean

    p_dmean_ratio.append(pmn)

    if (pmn>1.19) or(pmn<0.81):
        p[j]=dmean
'''        

def wave_move(dc,p,test):
    jk=dc.drop([0],axis=0)

    cx=list(test.index)
    p_dmean_ratio=list()
#p_dmean_ratio=list(range(len(cx)))
    for j in range(len(cx)):

        pk=jk[jk['district_name']==jk['district_name'][cx[j]]]

        dmean=pk['total_price'].values.mean()

        pmn=p[j]/dmean

        p_dmean_ratio.append(pmn)

        if (pmn>1.19) or(pmn<0.81):
            p[j]=dmean
    return p



#如果没有小区房源信息,则进行如下算法,对小区进行多维聚类。

'''

#计算任意两条房源的欧氏距离

ppd=data_all.values

p0=ppd[0]
a=list()
for i in range(1,ppd.shape[0]):
    for j in range(i+1,ppd.shape[0]):
        X=np.vstack([ppd[i],ppd[j]])
        d2=pdist(X,'minkowski',p=2)
        a.append(list(d2))
print(max(a))

'''        
from scipy.spatial.distance import pdist
################################
def distence(data_all):
    ppd=data_all.values

    p0=ppd[0]
    a=list()
    for i in range(1,ppd.shape[0]):
        for j in range(i+1,ppd.shape[0]):
            X=np.vstack([ppd[i],ppd[j]])
            d2=pdist(X,'minkowski',p=2)
            a.append(list(d2))
    return a

dist=distence(data_train1)



p=wave_move(dc,p,test)

predict=np.array([p]).T
'''
realvalue= test.iloc[:,0:1].as_matrix()

error=abs((predict-realvalue)/realvalue)*100


pro=(np.array([p_dmean_ratio]).T-1)*100


gek=column_stack((predict,realvalue,error,pro))

#geek=DataFrame(gek,columns=['predict','realvalue','error'])
geek=DataFrame(gek,columns=['predict','realvalue','error','p_dmean_ratio'],
               index=test.index)



test_and_geek=pd.concat([test,geek],axis=1)

output_label=['total_price', 'area', 'height', 'room', 'direction', 'hall',
              'toilet','fitment', 'p_date',  'predict', 'realvalue', 'error','p_dmean_ratio']


tg=test_and_geek[output_label]

output_label1=['mean_price', 'area', 'height', 'room', 'direction', 'hall',
              'toilet','fitment', 'p_date',  'predict', 'realvalue', 'error','p_dmean_ratio']

tg.columns=output_label1

tg.to_excel('tg.xls')

print(tg)

print('平均计算误差:','%.2f'%error.mean(),'%')
'''



# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 16:22:27 2017

@author: Administrator
"""

import numpy as np
import pandas as pd 

dirtic_list=["大运盛城","凤城三村","凤城六村",
             "凤城花园","东方名园"]

lgttcc=pd.read_excel('dc1.xls')

'''
inner=lgttcc.loc[[0],:]
outer=lgttcc.loc[[0],:]

for i in range(lgttcc.shape[0]):
    if lgttcc['district_name'][i] in dirtic_list:
        inner=pd.concat([inner,lgttcc['district_name'][i]])

    else:
        outer=pd.concat([outer,lgttcc['district_name'][i]])

'''






lg1= op_direc(lgttcc)
lg2 = op_fitmen(lg1)

lg3=lg2.fillna(method='ffill')

dc=lg3
import re
def numbertake(stnum):

    lg = str(stnum)
    lgttcc = re.sub("\D", "", lg)
    return int(lgttcc)

al=list(dc['build_date'].values)
dc=dc.fillna(Counter(al).most_common(1)[0][0])

for i in range(dc.shape[0]):
    dc['build_date'][i:i+1]=numbertake(dc['build_date'][i:i+1].values[0])


dc1=turn_str_to_number(dc)

inner=dc1.loc[[0],:]
outer=dc1.loc[[0],:]

for i in range(dc1.shape[0]):
    if dc1['district_name'][i] in dirtic_list:
        inner=pd.concat([inner,dc1.loc[[i],:]])

    else:
        outer=pd.concat([outer,dc1.loc[[i],:]])


#dc1=dc.drop('district_name',axis=1)

inner = inner.drop([0],axis=0)
outer= outer.drop([0],axis=0)

outer=outer.drop('district_name',axis=1)

outer_normal=normolize_data(outer)

dist_np=np.array(dist)

dist_mean=dist_np.mean()
dist_std=dist_np.std()

data_train1

b=list()

for i in range(outer_normal.shape[0]):
    for j in range(data_train1.shape[0]):
        X=np.vstack([outer_normal.values[i],data_train1.values[j]])
        d2=pdist(X,'minkowski',p=2)

        if (d2<0.5):
        #if (d2<dist_mean+0.1*dist_std) and (d2>dist_mean-0.1*dist_std):

           b.append(data_train1.values[j][0])


























posted @ 2022-08-19 22:59  luoganttcc  阅读(15)  评论(0编辑  收藏  举报