dga model train and test code

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# _*_coding:UTF-8_*_
 
import operator
import tldextract
import random
import pickle
import os
import tflearn
 
from math import log
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_1d, max_pool_1d
from tflearn.layers.estimator import regression
from tflearn.layers.normalization import batch_normalization
from sklearn.model_selection import train_test_split
 
 
def get_cnn_model(max_len, volcab_size=None):
    if volcab_size is None:
        volcab_size = 10240000
 
    # Building convolutional network
    network = tflearn.input_data(shape=[None, max_len], name='input')
    network = tflearn.embedding(network, input_dim=volcab_size, output_dim=32)
 
    network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
 
    network = batch_normalization(network)
    network = fully_connected(network, 64, activation='relu')
    network = dropout(network, 0.5)
 
    network = fully_connected(network, 2, activation='softmax')
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    network = regression(network, optimizer=sgd, loss='categorical_crossentropy')
 
    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
 
 
def get_data_from(file_name):
    ans = []
    with open(file_name) as f:
        for line in f:
            domain_name = line.strip()
            ans.append(domain_name)
    return ans
 
 
def get_local_data(tag="labeled"):
    white_data = get_data_from(file_name="dga_360_sorted.txt")
    black_data = get_data_from(file_name="top-1m.csv")
    return black_data, white_data
 
 
def get_data():
    black_x, white_x = get_local_data()
    black_y, white_y = [1]*len(black_x), [0]*len(white_x)
 
    X = black_x + white_x
    labels = black_y + white_y
 
    # Generate a dictionary of valid characters
    valid_chars = {x:idx+1 for idx, x in enumerate(set(''.join(X)))}
 
    max_features = len(valid_chars) + 1
    print("max_features:", max_features)
    maxlen = max([len(x) for x in X])
    print("max_len:", maxlen)
    maxlen = min(maxlen, 256)
 
    # Convert characters to int and pad
    X = [[valid_chars[y] for y in x] for x in X]
    X = pad_sequences(X, maxlen=maxlen, value=0.)
 
    # Convert labels to 0-1
    Y = to_categorical(labels, nb_classes=2)
     
    volcab_file = "volcab.pkl"
    output = open(volcab_file, 'wb')
    # Pickle dictionary using protocol 0.
    data = {"valid_chars": valid_chars, "max_len": maxlen, "volcab_size": max_features}
    pickle.dump(data, output)
    output.close()
 
    return X, Y, maxlen, max_features
 
 
def train_model():
    X, Y, max_len, volcab_size = get_data()
 
    print("X len:", len(X), "Y len:", len(Y))
    trainX, testX, trainY, testY = train_test_split(X, Y, test_size=0.2, random_state=42)
    print(trainX[:1])
    print(trainY[:1])
    print(testX[-1:])
    print(testY[-1:])
 
    model = get_cnn_model(max_len, volcab_size)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True, batch_size=1024)
    
    filename = 'finalized_model.tflearn'
    model.save(filename)
 
    model.load(filename)
    print("Just review 3 sample data test result:")
    result = model.predict(testX[0:3])
    print(result)
 
 
def test_model():
    volcab_file = "volcab.pkl"
    assert os.path.exists(volcab_file)
    pkl_file = open(volcab_file, 'rb')
    data = pickle.load(pkl_file)
    valid_chars, max_document_length, max_features = data["valid_chars"], data["max_len"], data["volcab_size"]
 
    print("max_features:", max_features)
    print("max_len:", max_document_length)
 
    cnn_model = get_cnn_model(max_document_length, max_features)
    filename = 'finalized_model.tflearn'
    cnn_model.load(filename)
    print("predict domains:")
    bls = list()
 
     
    with open("dga_360_sorted.txt") as f:
    # with open("todo.txt") as f:
        lines = f.readlines()
        print("domain_list len:", len(lines))
        cnt = 1000
        for i in range(0, len(lines), cnt):
            lines2 = lines[i:i+cnt]
            domain_list = [line.strip() for line in lines2]
            #print("domain_list sample:", domain_list[:5])
         
            # Convert characters to int and pad
            X = [[valid_chars[y] if y in valid_chars else 0 for y in x] for x in domain_list]
            X = pad_sequences(X, maxlen=max_document_length, value=0.)
         
            result = cnn_model.predict(X)
            for i, domain in enumerate(domain_list):
                if result[i][1] > .5: #.95:
                    #print(lines2[i], domain + " is GDA")
                    print(lines2[i].strip() + "\t" + domain, result[i][1])
                    bls.append(domain)
                else:
                    #print(lines2[i], domain )
                    pass
            #print(bls)
        print(len(bls) , "dga found!")
 
 
if __name__ == "__main__":
    print("train model...")
    train_model()
    print("test model...")
    test_model()

 

posted @   bonelee  阅读(278)  评论(0编辑  收藏  举报
编辑推荐:
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· 没有源码,如何修改代码逻辑?
阅读排行:
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· DeepSeek 开源周回顾「GitHub 热点速览」
历史上的今天:
2018-11-29 示廓灯——也就是前后位置等开启方法 还有该死的刮水器是长这样的
2016-11-29 The Architecture of Open Source Applications——阅读笔记part 1
点击右上角即可分享
微信分享提示