打赏

wsdream

import os
import urllib.request
import zipfile
from pprint import pprint
import numpy as np
import tensorflow as tf
import keras as k

def set_session(device_count=None, seed=0):
    gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
    if device_count is not None:
        config = tf.compat.v1.ConfigProto(gpu_options=gpu_options, device_count=device_count)
    else:
        config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
    sess = tf.compat.v1.Session(config=config)
    tf.compat.v1.keras.backend.set_session(sess)
    tf.compat.v1.set_random_seed(seed)
    return sess

def transform(idxs):
    return [idxs[:, i] for i in range(idxs.shape[1])]

def create_wsdream(shape, rank, nc):
    inputs = [k.Input(shape=(1,), dtype="int32") for i in range(len(shape))]
    embeds = [k.layers.Embedding(output_dim=rank, input_dim=shape[i])(inputs[i]) for i in range(len(shape))]
    x = k.layers.Concatenate(axis=1)(embeds)
    x = k.layers.Reshape(target_shape=(rank, len(shape), 1))(x)
    x = k.layers.Conv2D(
        nc,
        kernel_size=(1, len(shape)),
        activation='relu',
        padding='valid'
    )(x)
    x = k.layers.Conv2D(
        nc, 
        kernel_size=(rank, 1), 
        activation="relu", 
        padding="valid"
    )(x)
    x = k.layers.Flatten()(x)
    x = k.layers.Dense(nc, activation="relu")(x)
    outputs = k.layers.Dense(1, activation='relu')(x)
    model = k.Model(inputs=inputs, outputs=outputs)
    return model

# 计算均方绝对百分比误差
def mape_keras(y_true, y_pred, threshold=0.1):
    v = k.backend.clip(k.backend.abs(y_true), threshold, None)
    diff = k.backend.abs((y_true-y_pred)/v)
    return 100.0 * k.backend.mean(diff, axis=-1)

# 计算均方绝对误差
def mae(y_true, y_pred):
    return np.mean(np.abs(y_pred-y_true))

# 计算均方根误差
def rmse(y_true, y_pred):
    return np.sqrt(np.mean(np.square(y_pred-y_true)))

# 计算平均绝对百分比误差
def mape(y_true, y_pred, threshold=0.1):
    v = np.clip(np.abs(y_true), threshold, None)
    diff = np.abs((y_true-y_pred)/v)
    return 100.0 * np.mean(diff, axis=-1).mean()


def get_metrics(model, x, y, batch_size=1024):
    yp = model.predict(x, batch_size=batch_size, verbose=1).flatten()
    print('-------------------------')
    print('预测值:', yp)
    print('-------------------------')
    return {
        '均方根误差(rmse)': float(rmse(y, yp)),
        # '平均绝对百分比误差(mape)': float(mape(y, yp)),
        '均方绝对误差(mae)': float(mae(y, yp))
    }

if __name__ == '__main__':
    # 下载数据
    data_folder = 'dataset2'
    if not os.path.exists(data_folder):
        url = 'https://zenodo.org/record/1133476/files/wsdream_dataset2.zip?download=1'
        filedata = urllib.request.urlopen(url)

        with open('wsdream_dataset2.zip', 'wb') as f:
            f.write(filedata.read())

        with zipfile.ZipFile('wsdream_dataset2.zip', 'r') as f:
            f.extractall('.')

    # 加载数据
    # 用户ID |服务ID |时间片ID |响应时间(秒)
    # [142, 4500, 64, 20001]
    rt_data = np.loadtxt(os.path.join(data_folder, "rtdata.txt"))
    # print('rt_data:',rt_data)
    print(rt_data.shape)
    # 用户ID |服务ID |时间片ID |吞吐量(kbps)
    # [142, 4500, 64, 20001]
    tp_data = np.loadtxt(os.path.join(data_folder, "tpdata.txt"))
    print(tp_data.shape)
    # 设置参数
    lr = 1e-4
    # 模型嵌入维度
    rank = 20
    # 卷积层数量
    nc = rank
    epochs = 50
    batch_size = 256
    seed = 3
    verbose = 1

    set_session(device_count={"GPU": 0}, seed=seed)
    optim = k.optimizers.legacy.Adam(learning_rate=lr)

    shapes = [142, 4500, 64]
    # column = transform(rt_data)
    # for i in range(len(column)):
    #     unique_elements = np.unique(column[i])
    #     num_unique_elements = len(unique_elements)
    #     shapes.append(num_unique_elements)
    # shapes = [len(np.unique(transform(rt_data)[i])) for i in range(len(transform(tp_data)))]

    # print(shapes)
    # print(np.delete(rt_data, 3, axis=1))

    print('----------train_rt-----------')
    model_rt = create_wsdream(shapes, rank, nc)
    model_rt.compile(optim, loss=['mse'], metrics=['mae'])
    # 提取前三列
    pre_column_rt = np.delete(rt_data, 3, axis=1)
    hists_rt = model_rt.fit(
        x=transform(pre_column_rt),
        y=rt_data[:, 3],
        verbose=verbose,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.1,
        callbacks=[k.callbacks.EarlyStopping(
            monitor='val_loss',
            patience=10,
            restore_best_weights=True
        )]
    )
    print('----------train_tp-----------')
    model_tp = create_wsdream(shapes, rank, nc)
    model_tp.compile(optim, loss=['mse'], metrics=['mae'])
    pre_column_tp = np.delete(tp_data, 3, axis=1)
    hists_tp = model_tp.fit(
        x=transform(pre_column_tp),
        y=tp_data[:, 3],
        verbose=verbose,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.1,
        callbacks=[k.callbacks.EarlyStopping(
            monitor='loss',
            patience=10,
            restore_best_weights=True
        )]
    )
    rt_info = get_metrics(model_rt, transform(pre_column_rt), rt_data[:, 3])
    tp_info = get_metrics(model_tp, transform(pre_column_tp), tp_data[:, 3])

    pprint({'rt_train': rt_info, 'tp_train': tp_info})
D:\develop\Anaconda3\python.exe D:\develop\code\pythoncode\tensor-demo1\wsdream.py 
(30287611, 4)
(30287611, 4)
2023-10-03 08:43:28.892113: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
----------train_rt-----------
Epoch 1/50
106480/106480 [==============================] - 500s 5ms/step - loss: 12.1776 - mae: 1.7193 - val_loss: 12.4426 - val_mae: 1.7890
Epoch 2/50
106480/106480 [==============================] - 510s 5ms/step - loss: 9.4647 - mae: 1.4349 - val_loss: 12.3537 - val_mae: 1.9970
Epoch 3/50
106480/106480 [==============================] - 514s 5ms/step - loss: 8.8917 - mae: 1.3718 - val_loss: 12.8839 - val_mae: 2.2814
Epoch 4/50
106480/106480 [==============================] - 510s 5ms/step - loss: 8.6131 - mae: 1.3361 - val_loss: 12.7478 - val_mae: 2.2493
Epoch 5/50
106480/106480 [==============================] - 510s 5ms/step - loss: 8.4365 - mae: 1.3117 - val_loss: 13.3040 - val_mae: 2.4263
Epoch 6/50
106480/106480 [==============================] - 510s 5ms/step - loss: 8.2760 - mae: 1.2938 - val_loss: 14.6136 - val_mae: 2.7383
Epoch 7/50
106480/106480 [==============================] - 510s 5ms/step - loss: 8.1510 - mae: 1.2811 - val_loss: 14.8537 - val_mae: 2.7743
Epoch 8/50
106480/106480 [==============================] - 508s 5ms/step - loss: 8.0628 - mae: 1.2720 - val_loss: 15.9273 - val_mae: 2.9850
Epoch 9/50
106480/106480 [==============================] - 511s 5ms/step - loss: 8.0047 - mae: 1.2656 - val_loss: 15.5684 - val_mae: 2.9007
Epoch 10/50
106480/106480 [==============================] - 518s 5ms/step - loss: 7.9656 - mae: 1.2609 - val_loss: 16.0655 - val_mae: 2.9968
Epoch 11/50
106480/106480 [==============================] - 505s 5ms/step - loss: 7.9320 - mae: 1.2561 - val_loss: 15.8049 - val_mae: 2.9165
Epoch 12/50
106480/106480 [==============================] - 482s 5ms/step - loss: 7.9004 - mae: 1.2510 - val_loss: 15.4173 - val_mae: 2.8242
----------train_tp-----------
Epoch 1/50
106480/106480 [==============================] - 481s 4ms/step - loss: 1378.5265 - mae: 7.1852 - val_loss: 939.6042 - val_mae: 7.1062
Epoch 2/50
106480/106480 [==============================] - 480s 5ms/step - loss: 1271.7971 - mae: 6.7428 - val_loss: 1208.8379 - val_mae: 8.0226
Epoch 3/50
106480/106480 [==============================] - 482s 5ms/step - loss: 1215.8990 - mae: 6.4609 - val_loss: 1725.6975 - val_mae: 9.9964
Epoch 4/50
106480/106480 [==============================] - 480s 5ms/step - loss: 1148.8403 - mae: 6.3008 - val_loss: 2202.8054 - val_mae: 11.5921
Epoch 5/50
106480/106480 [==============================] - 483s 5ms/step - loss: 1022.9125 - mae: 5.9842 - val_loss: 3699.5852 - val_mae: 18.3614
Epoch 6/50
106480/106480 [==============================] - 484s 5ms/step - loss: 898.2462 - mae: 5.6059 - val_loss: 6434.6084 - val_mae: 29.0876
Epoch 7/50
106480/106480 [==============================] - 485s 5ms/step - loss: 778.8353 - mae: 5.3944 - val_loss: 9899.2461 - val_mae: 39.7254
Epoch 8/50
106480/106480 [==============================] - 482s 5ms/step - loss: 692.7132 - mae: 5.3191 - val_loss: 13453.2861 - val_mae: 48.3555
Epoch 9/50
106480/106480 [==============================] - 483s 5ms/step - loss: 656.1313 - mae: 5.2378 - val_loss: 17241.1367 - val_mae: 58.6533
Epoch 10/50
106480/106480 [==============================] - 490s 5ms/step - loss: 637.2543 - mae: 5.1963 - val_loss: 20967.4062 - val_mae: 68.2803
Epoch 11/50
106480/106480 [==============================] - 490s 5ms/step - loss: 626.0726 - mae: 5.1027 - val_loss: 24201.2812 - val_mae: 75.7040
Epoch 12/50
106480/106480 [==============================] - 489s 5ms/step - loss: 616.8686 - mae: 4.9631 - val_loss: 28294.9785 - val_mae: 87.6363
Epoch 13/50
106480/106480 [==============================] - 483s 5ms/step - loss: 609.8871 - mae: 4.9258 - val_loss: 34260.2852 - val_mae: 104.3545
Epoch 14/50
106480/106480 [==============================] - 478s 4ms/step - loss: 603.8393 - mae: 4.9058 - val_loss: 38412.8359 - val_mae: 114.8700
Epoch 15/50
106480/106480 [==============================] - 478s 4ms/step - loss: 598.2858 - mae: 4.8920 - val_loss: 42226.1484 - val_mae: 123.3122
Epoch 16/50
106480/106480 [==============================] - 477s 4ms/step - loss: 593.5031 - mae: 4.8814 - val_loss: 47046.7461 - val_mae: 133.9262
Epoch 17/50
106480/106480 [==============================] - 478s 4ms/step - loss: 587.7360 - mae: 4.8734 - val_loss: 48991.6758 - val_mae: 137.9644
Epoch 18/50
106480/106480 [==============================] - 484s 5ms/step - loss: 582.4016 - mae: 4.8572 - val_loss: 55302.9648 - val_mae: 153.0013
Epoch 19/50
106480/106480 [==============================] - 486s 5ms/step - loss: 575.1342 - mae: 4.8370 - val_loss: 59622.3984 - val_mae: 163.4787
Epoch 20/50
106480/106480 [==============================] - 486s 5ms/step - loss: 569.3209 - mae: 4.8102 - val_loss: 65997.8203 - val_mae: 177.3383
Epoch 21/50
106480/106480 [==============================] - 486s 5ms/step - loss: 563.6873 - mae: 4.7921 - val_loss: 66985.4375 - val_mae: 177.8791
Epoch 22/50
106480/106480 [==============================] - 492s 5ms/step - loss: 559.0012 - mae: 4.7772 - val_loss: 75053.5547 - val_mae: 193.5834
Epoch 23/50
106480/106480 [==============================] - 491s 5ms/step - loss: 554.1259 - mae: 4.7647 - val_loss: 78339.0000 - val_mae: 198.9039
Epoch 24/50
106480/106480 [==============================] - 478s 4ms/step - loss: 549.8729 - mae: 4.7524 - val_loss: 85172.2109 - val_mae: 209.6865
Epoch 25/50
106480/106480 [==============================] - 477s 4ms/step - loss: 545.7138 - mae: 4.7380 - val_loss: 88718.6641 - val_mae: 213.1269
Epoch 26/50
106480/106480 [==============================] - 479s 5ms/step - loss: 542.6530 - mae: 4.7231 - val_loss: 93529.7344 - val_mae: 220.5187
Epoch 27/50
106480/106480 [==============================] - 479s 5ms/step - loss: 539.4590 - mae: 4.7078 - val_loss: 99834.0703 - val_mae: 229.7221
Epoch 28/50
106480/106480 [==============================] - 485s 5ms/step - loss: 536.5281 - mae: 4.7022 - val_loss: 97984.0391 - val_mae: 223.5083
Epoch 29/50
106480/106480 [==============================] - 482s 5ms/step - loss: 532.8864 - mae: 4.6476 - val_loss: 98402.1719 - val_mae: 222.2281
Epoch 30/50
106480/106480 [==============================] - 483s 5ms/step - loss: 530.5935 - mae: 4.5944 - val_loss: 94052.8125 - val_mae: 211.1614
Epoch 31/50
106480/106480 [==============================] - 488s 5ms/step - loss: 528.2220 - mae: 4.5799 - val_loss: 94506.7578 - val_mae: 209.9658
Epoch 32/50
106480/106480 [==============================] - 487s 5ms/step - loss: 524.6554 - mae: 4.5661 - val_loss: 97446.9219 - val_mae: 212.7048
Epoch 33/50
106480/106480 [==============================] - 486s 5ms/step - loss: 521.7175 - mae: 4.5544 - val_loss: 91423.6797 - val_mae: 199.9731
Epoch 34/50
106480/106480 [==============================] - 489s 5ms/step - loss: 518.9676 - mae: 4.5405 - val_loss: 96669.5625 - val_mae: 206.9002
Epoch 35/50
106480/106480 [==============================] - 491s 5ms/step - loss: 516.8112 - mae: 4.5358 - val_loss: 102512.6797 - val_mae: 214.2163
Epoch 36/50
106480/106480 [==============================] - 481s 5ms/step - loss: 515.1762 - mae: 4.5365 - val_loss: 99282.8750 - val_mae: 207.6818
Epoch 37/50
106480/106480 [==============================] - 484s 5ms/step - loss: 513.0922 - mae: 4.5355 - val_loss: 98533.6328 - val_mae: 204.6014
Epoch 38/50
106480/106480 [==============================] - 490s 5ms/step - loss: 510.4915 - mae: 4.5359 - val_loss: 95473.1562 - val_mae: 197.7470
Epoch 39/50
106480/106480 [==============================] - 482s 5ms/step - loss: 509.0364 - mae: 4.5318 - val_loss: 96084.5703 - val_mae: 197.4163
Epoch 40/50
106480/106480 [==============================] - 481s 5ms/step - loss: 506.4710 - mae: 4.5296 - val_loss: 91405.6484 - val_mae: 187.5800
Epoch 41/50
106480/106480 [==============================] - 483s 5ms/step - loss: 503.6175 - mae: 4.5216 - val_loss: 94783.6328 - val_mae: 191.8085
Epoch 42/50
106480/106480 [==============================] - 482s 5ms/step - loss: 500.3831 - mae: 4.5173 - val_loss: 90646.5312 - val_mae: 183.2338
Epoch 43/50
106480/106480 [==============================] - 480s 5ms/step - loss: 496.8581 - mae: 4.5133 - val_loss: 92157.2812 - val_mae: 184.7237
Epoch 44/50
106480/106480 [==============================] - 480s 5ms/step - loss: 493.1195 - mae: 4.5072 - val_loss: 89734.6641 - val_mae: 178.5845
Epoch 45/50
106480/106480 [==============================] - 485s 5ms/step - loss: 489.0894 - mae: 4.4981 - val_loss: 87790.2188 - val_mae: 172.9886
Epoch 46/50
106480/106480 [==============================] - 481s 5ms/step - loss: 486.2367 - mae: 4.4893 - val_loss: 90552.3438 - val_mae: 175.4971
Epoch 47/50
106480/106480 [==============================] - 485s 5ms/step - loss: 482.6807 - mae: 4.4810 - val_loss: 87289.1250 - val_mae: 168.4117
Epoch 48/50
106480/106480 [==============================] - 483s 5ms/step - loss: 480.0930 - mae: 4.4716 - val_loss: 87140.7734 - val_mae: 164.4452
Epoch 49/50
106480/106480 [==============================] - 479s 5ms/step - loss: 477.1743 - mae: 4.4598 - val_loss: 91186.8047 - val_mae: 170.3880
Epoch 50/50
106480/106480 [==============================] - 483s 5ms/step - loss: 474.6982 - mae: 4.4499 - val_loss: 91041.9766 - val_mae: 167.2914
29578/29578 [==============================] - 88s 3ms/step
-------------------------
预测值: [3.194442  1.6483551 1.5518781 ... 1.3785235 0.        0.6708013]
-------------------------
29578/29578 [==============================] - 95s 3ms/step
-------------------------
预测值: [  0.           0.5321417    0.96816015 ... 337.11823    386.74475
   0.        ]
-------------------------
{'rt_train': {'均方根误差(rmse)': 3.066253760583022,
              '均方绝对误差(mae)': 1.432953561166149},
 'tp_train': {'均方根误差(rmse)': 97.60345532124401,
              '均方绝对误差(mae)': 20.70587543913108}}

Process finished with exit code 0

 

posted @ 2023-10-03 17:25  不像话  阅读(28)  评论(0编辑  收藏  举报