GPU版线性回归
#import keras #import numpy as np #import matplotlib.pyplot as plt #from keras.models import Sequential #全连接层 #from keras.layers import Dense import numpy as np import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras.models import Sequential #全连接层 from tensorflow.keras.layers import Dense
x_data = np.random.rand(100) noise = np.random.normal(0,0.01,x_data.shape) y_data = x_data*0.1 + 0.2 + noise plt.scatter(x_data,y_data) plt.show() model = Sequential() #model = multi_gpu_model(model, 1) #GPU个数为2 model.add(Dense(units=1,input_dim=1)) #sgd 随机梯度下降 mse 均方误差 model.compile(optimizer='sgd',loss='mse') for step in range(5000): cost = model.train_on_batch(x_data, y_data) if step % 500 == 0: print('cost:',cost) W,b=model.layers[0].get_weights() print('w:',W,'b:',b) y_pred=model.predict(x_data) plt.scatter(x_data,y_data) plt.plot(x_data,y_pred,'r',lw=2) plt.show()