深度学习-部分数据增强python代码实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
数据增强策略:
    1 在线模式--训练中
        随机裁剪(完全随机,四个角+中心)  crop
        def random_crop(img, scale=[0.8, 1.0], ratio=[3. / 4., 4. / 3.], resize_w=100, resize_h=100):
            """
            随机裁剪
            :param img:
            :param scale: 缩放
            :param ratio:
            :param resize_w:
            :param resize_h:
            :return:
            """
            aspect_ratio = math.sqrt(np.random.uniform(*ratio))
            w = 1. * aspect_ratio
            h = 1. / aspect_ratio
            src_h, src_w = img.shape[:2]
 
            bound = min((float(src_w) / src_h) / (w ** 2),
                        (float(src_h) / src_w) / (h ** 2))
            scale_max = min(scale[1], bound)
            scale_min = min(scale[0], bound)
 
            target_area = src_h * src_w * np.random.uniform(scale_min,
                                                            scale_max)
            target_size = math.sqrt(target_area)
            w = int(target_size * w)
            h = int(target_size * h)
 
            i = np.random.randint(0, src_w - w + 1)
            j = np.random.randint(0, src_h - h + 1)
 
            img = img[j:j + h, i:i + w]
            img = cv2.resize(img, (resize_w, resize_h))
            return img
 
 
        def rule_crop(img, box_ratio=(3. / 4, 3. / 4), location_type='LT', resize_w=100, resize_h=100):
            """
            按照一定规则进行裁剪, 直接在原图尺寸上操作,不对原图进行
            :param img:
            :param box_ratio: 剪切的 比例:  (宽度上的比例, 高度上的比例)
            :param location_type: 具体在=哪个位置: 以下其中一个:
                    LR : 左上角
                    RT : 右上角
                    LB : 左下角
                    RB : 右下角
                    CC : 中心
            :param resize_w: 输出图的width
            :param resize_h: 输出图的height
            :return:
            """
            assert location_type in ('LT', 'RT', 'LB', 'RB', 'CC'), 'must have a location .'
            is_gray = False
            if len(img.shape) == 3:
                h, w, c = img.shape
            elif len(img.shape) == 2:
                h, w = img.shape
                is_gray = True
 
            crop_w, crop_h = int(w * box_ratio[0]), int(h * box_ratio[1])
            crop_img = np.zeros([10, 10])
            if location_type == 'LT':
                crop_img = img[:crop_h, :crop_w, :] if not is_gray else img[:crop_h, :crop_w]
            elif location_type == 'RT':
                crop_img = img[:crop_h:, w - crop_w:, :] if not is_gray else img[:crop_h:, w - crop_w:]
            elif location_type == 'LB':
                crop_img = img[h - crop_h:, :crop_w, :] if not is_gray else img[h - crop_h:, :crop_w]
            elif location_type == 'RB':
                crop_img = img[h - crop_h:, w - crop_w:, :] if not is_gray else img[h - crop_h:, w - crop_w:]
            elif location_type == 'CC':
                start_h = (h - crop_h) // 2
                start_w = (w - crop_w) // 2
                crop_img = img[start_h:start_h + crop_h, start_w:start_w + crop_w, :] if not is_gray else img[
                                                                                                          start_h:start_h + crop_h,
                                                                                                          start_w:start_w + crop_w]
 
            resize = cv2.resize(crop_img, (resize_w, resize_h))
            return resize
        水平翻转  flip
        def random_flip(img, mode=1):
            """
            随机翻转
            :param img:
            :param model: 1=水平翻转 / 0=垂直 / -1=水平垂直
            :return:
            """
            assert mode in (0, 1, -1), "mode is not right"
            flip = np.random.choice(2) * 2 - 1  # -1 / 1
            if mode == 1:
                img = img[:, ::flip, :]
            elif mode == 0:
                img = img[::flip, :, :]
            elif mode == -1:
                img = img[::flip, ::flip, :]
 
            return img
 
 
        def flip(img, mode=1):
            """
            翻转
            :param img:
            :param mode: 1=水平翻转 / 0=垂直 / -1=水平垂直
            :return:
            """
            assert mode in (0, 1, -1), "mode is not right"
            return cv2.flip(img, flipCode=mode)
        随机锐化增强
        def random_USM(img, gamma=0.):
            """
            USM锐化增强算法可以去除一些细小的干扰细节和图像噪声,比一般直接使用卷积锐化算子得到的图像更可靠。
                output = 原图像−w∗高斯滤波(原图像)/(1−w)
                其中w为上面所述的系数,取值范围为0.1~0.9,一般取0.6。
            :param img:
            :param gamma:
            :return:
            """
            blur = cv2.GaussianBlur(img, (0, 0), 25)
            img_sharp = cv2.addWeighted(img, 1.5, blur, -0.3, gamma)
            return img_sharp
    2 离线模式
        2.1 随机扰动
            噪声(高斯、自定义)  noise
            def random_noise(img, rand_range=(3, 20)):
                """
                随机噪声
                :param img:
                :param rand_range: (min, max)
                :return:
                """
                img = np.asarray(img, np.float)
                sigma = random.randint(*rand_range)
                nosie = np.random.normal(0, sigma, size=img.shape)
                img += nosie
                img = np.uint8(np.clip(img, 0, 255))
                return img
 
            滤波(高斯、平滑、均值、中值、最大最小值、双边、引导、运动)
            # 各种滤波原理介绍:https://blog.csdn.net/hellocsz/article/details/80727972
            def gaussianBlue(img, ks=(7, 7), stdev=1.5):
                """
                高斯模糊, 可以对图像进行平滑处理,去除尖锐噪声
                :param img:
                :param ks:  卷积核
                :param stdev: 标准差
                :return:
                """
                return cv2.GaussianBlur(img, (7, 7), 1.5)
            # 随机滤波
            def ranndom_blur(img, ksize=(3, 3)):
                """
                随机滤波
                :param img:
                :param ksize:
                :return:
                """
                blur_types = ['gaussian', 'median', 'bilateral', 'mean', 'box']
                assert len(blur_types) > 0
                blur_func = None
                blur_index = random.choice(blur_types)
                if blur_index == 0# 高斯模糊, 比均值滤波更平滑,边界保留更加好
                    blur_func = cv2.GaussianBlur
                elif blur_index == 1# 中值滤波, 在边界保存方面好于均值滤波,但在模板变大的时候会存在一些边界的模糊。对于椒盐噪声有效
                    blur_func = cv2.medianBlur
                elif blur_index == 2# 双边滤波, 非线性滤波,保留较多的高频信息,不能干净的过滤高频噪声,对于低频滤波较好,不能去除脉冲噪声
                    blur_func = cv2.bilateralFilter
                elif blur_index == 3# 均值滤波, 在去噪的同时去除了很多细节部分,将图像变得模糊
                    blur_func = cv2.blur
                elif blur_index == 4# 盒滤波器
                    blur_func = cv2.boxFilter
 
                img_blur = blur_func(src=img, ksize=ksize)
                return img_blur
            # 直方图均衡化
            def equalize_hist(img):
                """
                直方图均衡化
                :param img:
                :return:
                """
                gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
                hist = cv2.equalizeHist(gray)
                rgb = cv2.cvtColor(hist, cv2.COLOR_GRAY2RGB)
                return rgb
        2.2 转换
            旋转  rorate
            def rotate(img, angle, scale=1.0):
                """
                旋转
                :param img:
                :param angle: 旋转角度, >0 表示逆时针,
                :param scale:
                :return:
                """
                height, width = img.shape[:2# 获取图像的高和宽
                center = (width / 2, height / 2# 取图像的中点
 
                M = cv2.getRotationMatrix2D(center, angle, scale)  # 获得图像绕着某一点的旋转矩阵
                # cv2.warpAffine()的第二个参数是变换矩阵,第三个参数是输出图像的大小
                rotated = cv2.warpAffine(img, M, (height, width))
                return rotated
 
 
            def random_rotate(img, angle_range=(-10, 10)):
                """
                随机旋转
                :param img:
                :param angle_range:  旋转角度范围 (min,max)   >0 表示逆时针,
                :return:
                """
                height, width = img.shape[:2# 获取图像的高和宽
                center = (width / 2, height / 2# 取图像的中点
                angle = random.randrange(*angle_range, 1)
                M = cv2.getRotationMatrix2D(center, angle, 1.0# 获得图像绕着某一点的旋转矩阵
                # cv2.warpAffine()的第二个参数是变换矩阵,第三个参数是输出图像的大小
                rotated = cv2.warpAffine(img, M, (height, width))
                return rotated
            偏移  shift
            def shift(img, x_offset, y_offset):
                """
                偏移,向右 向下
                :param img:
                :param x_offset:  >0表示向右偏移px, <0表示向左
                :param y_offset:  >0表示向下偏移px, <0表示向上
                :return:
                """
                h, w, _ = img.shape
                M = np.array([[1, 0, x_offset], [0, 1, y_offset]], dtype=np.float)
                return cv2.warpAffine(img, M, (w, h))
            倾斜  skew
            ...
            缩放  scale
            def resize_img(img, resize_w, resize_h):
                height, width = img.shape[:2# 获取图片的高和宽
                return cv2.resize(img, (resize_w, resize_h), interpolation=cv2.INTER_CUBIC)
            RGB/BGR->HSV
            def rgb2hsv_py(r, g, b):
                # from https://blog.csdn.net/weixin_43360384/article/details/84871521
                r, g, b = r/255.0, g/255.0, b/255.0
                mx = max(r, g, b)
                mn = min(r, g, b)
                m = mx-mn
                if mx == mn:
                    h = 0
                elif mx == r:
                    if g >= b:
                        h = ((g-b)/m)*60
                    else:
                        h = ((g-b)/m)*60 + 360
                elif mx == g:
                    h = ((b-r)/m)*60 + 120
                elif mx == b:
                    h = ((r-g)/m)*60 + 240
                if mx == 0:
                    s = 0
                else:
                    s = m/mx
                v = mx
                return h, s, v
            def rgb2hsv_cv(img):
                # from https://blog.csdn.net/qq_38332453/article/details/89258058
                h = img.shape[0]
                w = img.shape[1]
                H = np.zeros((h,w),np.float32)
                S = np.zeros((h, w), np.float32)
                V = np.zeros((h, w), np.float32)
                r,g,b = cv2.split(img)
                r, g, b = r/255.0, g/255.0, b/255.0
                for i in range(0, h):
                    for j in range(0, w):
                        mx = max((b[i, j], g[i, j], r[i, j]))
                        mn = min((b[i, j], g[i, j], r[i, j]))
                        V[i, j] = mx
                        if V[i, j] == 0:
                            S[i, j] = 0
                        else:
                            S[i, j] = (V[i, j] - mn) / V[i, j]
                        if mx == mn:
                            H[i, j] = 0
                        elif V[i, j] == r[i, j]:
                            if g[i, j] >= b[i, j]:
                                H[i, j] = (60 * ((g[i, j]) - b[i, j]) / (V[i, j] - mn))
                            else:
                                H[i, j] = (60 * ((g[i, j]) - b[i, j]) / (V[i, j] - mn))+360
                        elif V[i, j] == g[i, j]:
                            H[i, j] = 60 * ((b[i, j]) - r[i, j]) / (V[i, j] - mn) + 120
                        elif V[i, j] == b[i, j]:
                            H[i, j] = 60 * ((r[i, j]) - g[i, j]) / (V[i, j] - mn) + 240
                        H[i,j] = H[i,j] / 2
                return H, S, V
            图片叠加与融合
            def addWeight(src1, alpha, src2, beta, gamma):
                """
                g (x) = (1 − α)f0 (x) + αf1 (x)   #a→(0,1)不同的a值可以实现不同的效果
                dst = src1 * alpha + src2 * beta + gamma
                :param src1: img1
                :param alpha:
                :param src2: img2
                :param beta:
                :param gamma:
                :return:
                """
                assert src1.shap == src2.shape
                return cv2.addWeighted(src1, alpha, src2, beta, gamma)
            颜色抖动(亮度\色度\饱和度\对比度)  color jitter
            def adjust_contrast_bright(img, contrast=1.2, brightness=100):
                """
                调整亮度与对比度
                dst = img * contrast + brightness
                :param img:
                :param contrast: 对比度   越大越亮
                :param brightness: 亮度  0~100
                :return:
                """
                # 像素值会超过0-255, 因此需要截断
                return np.uint8(np.clip((contrast * img + brightness), 0, 255))
            def pytorch_color_jitter(img):
                return torchvision.transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0)
            # gamma 变换,
            def gamma_transform(img, gamma=1.0):
                """
                https://blog.csdn.net/zfjBIT/article/details/85113946
                伽马变换就是用来图像增强,其提升了暗部细节,简单来说就是通过非线性变换,
                让图像从暴光强度的线性响应变得更接近人眼感受的响应,即将漂白(相机曝光)或过暗(曝光不足)的图片,进行矫正
                :param img:
                :param gamma:
                    # gamma = random.random() * random.choice([0.5, 1, 3, 5])
                    >1, 变暗
                    <1, 漂白
                :return:
                """
                assert 0 < gamma < 25.
                # 具体做法先归一化到1,然后gamma作为指数值求出新的像素值再还原
                gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)]
                gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
                # 实现映射用的是Opencv的查表函数
                return cv2.LUT(img, gamma_table)
            # mix up 图片混合
            def mixup(batch_x, batch_y, alpha):
                """
                Returns mixed inputs, pairs of targets, and lambda
                :param batch_x:
                :param batch_y:
                :param alpha:
                :return:
                """
                if alpha > 0:
                    lam = np.random.beta(alpha, alpha)
                else:
                    lam = 1
 
                batch_size = batch_x.shape[0]
                index = [i for i in range(batch_size)]
                random.shuffle(index)
 
                mixed_x = lam * batch_x + (1 - lam) * batch_x[index, :]
                y_a, y_b = batch_y, batch_y[index]
                return mixed_x, y_a, y_b, lam
            3D几何变换
            ...
             
  

  

posted @   dangxusheng  阅读(7543)  评论(0编辑  收藏  举报
编辑推荐:
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· 没有源码,如何修改代码逻辑?
阅读排行:
· 全程不用写代码,我用AI程序员写了一个飞机大战
· DeepSeek 开源周回顾「GitHub 热点速览」
· 记一次.NET内存居高不下排查解决与启示
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· .NET10 - 预览版1新功能体验(一)
点击右上角即可分享
微信分享提示