暑假第六周

这周主要了解学习了,有关数字识别和验证码识别的Python环境TensorFlow构架的代码,具体如下:

一、数字识别:

# coding=utf-8
import tensorflow as tf
import input_data
mnist = input_data.read_data_sets('MNIST', one_hot=True)
weights = tf.Variable(tf.zeros([784, 10]))
biases = tf.Variable(tf.zeros([10]))
x = tf.placeholder("float", [None, 784])
y = tf.nn.softmax(tf.matmul(x, weights) + biases)
y_real = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_real * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_real:batch_ys})

    if i % 100 == 0:
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.arg_max(y_real, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print sess.run(accuracy, feed_dict={x: mnist.test.images, y_real: mnist.test.labels})

二、验证码识别

验证码生成:

from captcha.image import ImageCaptcha
import numpy as np
from PIL import Image
import sys
import random
number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def random_captcha_text(char_set=number, captcha_size=4):
    captcha_text = []
    for i in range(captcha_size):

        c = random.choice(char_set)
        captcha_text.append(c)
    return captcha_text
def gen_captcha_text_and_image():
    image = ImageCaptcha()
    captcha_text = random_captcha_text()
    captcha_text = ''.join(captcha_text)
    image.write(captcha_text, './image/' + captcha_text + '.png')  

num = 10000
if __name__ == '__main__':
    for i in range(num):
        gen_captcha_text_and_image()
        sys.stdout.write('\r>>creating images %d/%d' % (i + 1, num))
        sys.stdout.flush()
    sys.stdout.write('\n')
    sys.stdout.flush()
    print('生成完毕')

生成tfrecord:

import tensorflow as tf
import numpy as np
from PIL import Image
import os
import random
import sys
_NUM_TEST = 500
_RANDOM_SEED = 0
DATASET_DIR = './image/'
TFRECORD_DIR = './image/tfr/
def _dataset_exists(dataset_dir):
    for split_name in ['train', 'test']:
        output_filename = os.path.join(dataset_dir, split_name + 'tfrecords')
        if not tf.gfile.Exists(output_filename):
            return False
    return True
def _get_filenames_and_classes(dataset_dir):
    photo_filenames = []
    for filename in os.listdir(dataset_dir):
        path = os.path.join(dataset_dir, filename)
        photo_filenames.append(path)
    return photo_filenames
def int64_feature(values):
    if not isinstance(values, (tuple, list)):
        values = [values]
    return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def image_to_tfexample(image_data, label0, label1, label2, label3):
    return tf.train.Example(features=tf.train.Features(feature={
        'image': bytes_feature(image_data),
        'label0': int64_feature(label0),
        'label1': int64_feature(label1),
        'label2': int64_feature(label2),
        'label3': int64_feature(label3),
    }))
def _convert_dataset(split_name, filenames, dataset_dir):
    assert split_name in ['train', 'test']
    with tf.Session() as sess:
        output_filename = os.path.join(TFRECORD_DIR, split_name + '.tfrecords')
        with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
            for i, filename in enumerate(filenames):
                try:
                    sys.stdout.write('\r>>转换图片 %d / %d' % (i + 1, len(filenames)))
                    sys.stdout.flush()
                    image_data = Image.open(filename)
                    image_data = image_data.resize((224, 224))                 
                    image_data = np.array(image_data.convert('L'))
                    image_data = image_data.tobytes()             
                    labels = filename.split('/')[-1][0:4]
                    num_labels = []
                    for j in range(4):
                        num_labels.append(int(labels[j]))
                    example = image_to_tfexample(image_data, num_labels[0], num_labels[1], num_labels[2], num_labels[3])
                    tfrecord_writer.write(example.SerializeToString())
                except IOError as e:

        sys.stdout.flush()  
if _dataset_exists(DATASET_DIR):
    print('file already exists')
else:
    photo_filenames = _get_filenames_and_classes(DATASET_DIR)
    random.seed(_RANDOM_SEED)
    random.shuffle(photo_filenames)
    training_filenames = photo_filenames[_NUM_TEST:]
    testing_filenames = photo_filenames[:_NUM_TEST]
    _convert_dataset('train', training_filenames, DATASET_DIR)
    _convert_dataset('test', testing_filenames, DATASET_DIR)

验证码识别:

import os
import tensorflow as tf
from PIL import Image
import numpy as np
from nets import nets_factory
import matplotlib.pyplot as plt
CHAR_NUM = 10
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
BATCH_SIZE = 1
TFRECORD_FILE = "./yzm/tfr/test.tfrecords"
x = tf.placeholder(tf.float32, [None, 224, 224])
def read_and_decode(filename):
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example, features={'image': tf.FixedLenFeature([], tf.string),
                                                                     'label0': tf.FixedLenFeature([], tf.int64),
                                                                     'label1': tf.FixedLenFeature([], tf.int64),
                                                                     'label2': tf.FixedLenFeature([], tf.int64),
                                                                     'label3': tf.FixedLenFeature([], tf.int64)
                                                                     })
    image = tf.decode_raw(features['image'], tf.uint8)
    # 没有经过预处理的灰度图
    image_raw = tf.reshape(image, [224, 224])
    image = tf.reshape(image, [224, 224])
    image = tf.cast(image, tf.float32) / 255.0  # 加速处理
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    label0 = tf.cast(features['label0'], tf.int32)
    label1 = tf.cast(features['label1'], tf.int32)
    label2 = tf.cast(features['label2'], tf.int32)
    label3 = tf.cast(features['label3'], tf.int32)
    return image, image_raw, label0, label1, label2, label3
image, image_raw, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)
image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(
    [image, image_raw, label0, label1, label2, label3], \
    batch_size=BATCH_SIZE, \
    capacity=53, min_after_dequeue=50, \
    num_threads=1)
train_network_fn = nets_factory.get_network_fn(
    'alexnet_v2',
    num_classes=CHAR_NUM,
    weight_decay=0.0005,
    is_training=False)
with tf.Session() as sess:
    X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])
    logits0, logits1, logits2, logits3, end_pintos = train_network_fn(X)
    prediction0 = tf.reshape(logits0, [-1, CHAR_NUM])
    prediction0 = tf.argmax(prediction0, 1)
    prediction1 = tf.reshape(logits1, [-1, CHAR_NUM])
    prediction1 = tf.argmax(prediction1, 1)
    prediction2 = tf.reshape(logits2, [-1, CHAR_NUM])
    prediction2 = tf.argmax(prediction2, 1)
    prediction3 = tf.reshape(logits3, [-1, CHAR_NUM])
    prediction3 = tf.argmax(prediction3, 1)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, './ckpt/crack_captcha-10000.ckpt')
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    for i in range(5):
        b_image, b_image_raw, b_label0, b_label1, b_label2, b_label3 = sess.run([image_batch,
                                                                                 image_raw_batch,                                                                                 label_batch0,                                                                                 label_batch1,                                                                                 label_batch2,                                                                                 label_batch3])
        img = Image.fromarray(b_image_raw[0], 'L')
        plt.imshow(img)
        plt.axis('off')
        plt.show()
        print('label:', b_label0, b_label1, b_label2, b_label3)
        label0, label1, label2, label3 = sess.run([prediction0, prediction1, prediction2, prediction3], feed_dict={x: b_image})
        print('predict:', label0, label1, label2, label3)
    coord.request_stop()
    coord.join(threads)

 

posted @ 2019-08-18 20:13  或者活着  阅读(174)  评论(0编辑  收藏  举报