平均感知机实现词性标注

1. AveragedPerceptron.py

class AveragedPerceptron(object):
    '''An averaged perceptron, as implemented by Matthew Honnibal.
    See more implementation details here:
        http://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/
    '''

    def __init__(self):
        # Each feature gets its own weight vector, so weights is a dict-of-dicts
        self.weights = {}  # 每个'位置'拥有一个权值向量
        self.classes = set()
        # 累加的权值,用于计算平均权值
        # 生成了一个默认为0的带key的数据字典
        self._totals = defaultdict(int)
        # The last time the feature was changed, for the averaging. Also
        # keyed by feature/clas tuples
        # (tstamps is short for timestamps)
        self._tstamps = defaultdict(int)  # 上次更新权值时的i
        # 记录实例的数量
        self.i = 0
        self.iii = 0  # 自己测试用

    def predict(self, features):
        '''Dot-product the features and current weights and return the best label.'''
        scores = defaultdict(float)  # 生成一个默认dict,不存在的值返0.0
        for feat, value in features.items():
            if feat not in self.weights or value == 0:
                continue
            weights = self.weights[feat]
            for label, weight in weights.items():
                scores[label] += value * weight  # 每次预测一个特征值时该值都会重置
        # Do a secondary alphabetic sort, for stability
        return max(self.classes, key=lambda label: (scores[label], label))  # 返回得分最高的词性标签,如果得分相同取字母大的

    def update(self, truth, guess, features):
        '''Update the feature weights.'''

        def upd_feat(c, f, w, v):
            param = (f, c)
            self._totals[param] += (self.i - self._tstamps[param]) * w  # 累加:(此时的i - 上次更新该权值时的i)*权值
            self._tstamps[param] = self.i  # 记录更新此权值时的i
            self.weights[f][c] = w + v  # 更新权值

        self.i += 1  # 一个word对应于一个features,每处理一个word后i值+1
        if truth == guess:
            return None
        for f in features:  # 遍历特征值,对每个特征值都加入当前判断正确和错误的词性,以及各自权值
            weights = self.weights.setdefault(f, {})  # 如果字典中包含有给定键,则返回该键对应的值,否则返回为该键设置的值,并将键值加入字典中,注意和get方法的区别
            upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
            upd_feat(guess, f, weights.get(guess, 0.0), -1.0)

            '''以下为自己测试用'''
            f_tmp = "bias"
            cla = "zz1_np1@\n"
            param_tmp = (f_tmp, cla)
            if f == f_tmp:
                tmp = self.weights.get(f_tmp, None)
                logging.info(self._totals[("bias", "zz1_np1@\n")])
                if tmp != None:
                    logging.info(self.weights.get(f_tmp).get(cla))
                else:
                    logging.info(None)
                logging.info("*******************************" + str(self.i) + "****" + str(self._tstamps[param_tmp]))
                self.iii += 1
        return None

    def average_weights(self):
        '''Average weights from all iterations.'''
        for feat, weights in self.weights.items():
            new_feat_weights = {}
            for clas, weight in weights.items():
                param = (feat, clas)
                total = self._totals[param]
                total += (self.i - self._tstamps[param]) * weight
                averaged = round(total / float(self.i), 3)
                if averaged:
                    new_feat_weights[clas] = averaged  # 向字典中加入key-value
            self.weights[feat] = new_feat_weights
        return None

    def save(self, path):
        '''Save the pickled model weights.'''
        return pickle.dump(dict(self.weights), open(path, 'w'))

    def load(self, path):
        '''Load the pickled model weights.'''
        self.weights = pickle.load(open(path))
        return None


def train(nr_iter, examples):
    '''Return an averaged perceptron model trained on ``examples`` for
    ``nr_iter`` iterations.
    '''
    model = AveragedPerceptron()
    for i in range(nr_iter):
        random.shuffle(examples)
        for features, class_ in examples:
            scores = model.predict(features)
            guess, score = max(scores.items(), key=lambda i: i[1])
            if guess != class_:
                model.update(class_, guess, features)
    model.average_weights()
    return model

2.PerceptronTagger.py

from __future__ import absolute_import
import os
import random
from collections import defaultdict
import pickle
import logging

from AveragedPerceptron import AveragedPerceptron

PICKLE = "data/trontagger-0.1.0.pickle"


class PerceptronTagger():
    '''Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
    See more implementation details here:
        http://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/
    :param load: Load the pickled model upon instantiation.
    '''

    START = ['-START-', '-START2-']
    END = ['-END-', '-END2-']
    AP_MODEL_LOC = os.path.join(os.path.dirname(__file__), PICKLE)

    def __init__(self, load=True):
        self.model = AveragedPerceptron()
        self.tagdict = {}
        self.classes = set()
        if load:
            self.load(self.AP_MODEL_LOC)

    def tag(self, corpus):
        '''Tags a string `corpus`.'''
        # Assume untokenized corpus has \n between sentences and ' ' between words
        s_split = lambda t: t.split('\n')
        w_split = lambda s: s.split()

        def split_sents(corpus):
            for s in s_split(corpus):
                yield w_split(s)

        prev, prev2 = self.START
        tokens = []
        for words in split_sents(corpus):
            context = self.START + [self._normalize(w) for w in words] + self.END
            for i, word in enumerate(words):
                tag = self.tagdict.get(word)
                if not tag:
                    features = self._get_features(i, word, context, prev, prev2)
                    tag = self.model.predict(features)
                tokens.append((word, tag))
                prev2 = prev
                prev = tag
        return tokens

    def train(self, sentences, save_loc=None, nr_iter=5):
        '''Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
        controls the number of Perceptron training iterations.
        :param sentences: A list of (words, tags) tuples.
        :param save_loc: If not ``None``, saves a pickled model in this location.
        :param nr_iter: Number of training iterations.
        '''
        self._make_tagdict(sentences)
        self.model.classes = self.classes
        for iter_ in range(nr_iter):
            c = 0  # 预测正确的个数
            n = 0  # 总个数
            for words, tags in sentences:
                prev, prev2 = self.START
                context = self.START + [self._normalize(w) for w in words] \
                          + self.END
                for i, word in enumerate(words):
                    guess = self.tagdict.get(word)
                    if not guess:  # 如果word不在tagdict中进入if执行
                        feats = self._get_features(i, word, context, prev, prev2)
                        guess = self.model.predict(feats)
                        # 第一个参数是人工标注的,所以是正确的,第二个参数是预测值
                        self.model.update(tags[i], guess, feats)
                    prev2 = prev
                    prev = guess
                    c += guess == tags[i]
                    n += 1
            random.shuffle(sentences)  # 将序列的所有元素随机排序,shuffle()是不能直接访问的,需要导入 random 模块,然后通过 random 静态对象调用该方法
            logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
        self.model.average_weights()
        # Pickle as a binary file
        if save_loc is not None:
            pickle.dump((self.model.weights, self.tagdict, self.classes),
                        open(save_loc, 'wb'), -1)
        return None

    def load(self, loc):
        '''Load a pickled model.'''
        try:
            w_td_c = pickle.load(open(loc, 'rb'))
        except IOError:
            msg = ("Missing trontagger.pickle file.")
            raise IOError(msg)
        self.model.weights, self.tagdict, self.classes = w_td_c
        self.model.classes = self.classes
        return None

    def _normalize(self, word):
        '''Normalization used in pre-processing.
        - All words are lower cased
        - Digits in the range 1800-2100 are represented as !YEAR;
        - Other digits are represented as !DIGITS
        :rtype: str
        '''
        if '-' in word and word[0] != '-':
            return '!HYPHEN'
        elif word.isdigit() and len(word) == 4:
            return '!YEAR'
        elif word[0].isdigit():
            return '!DIGITS'
        else:
            return word.lower()

    def _get_features(self, i, word, context, prev, prev2):
        '''Map tokens into a feature representation, implemented as a
        {hashable: float} dict. If the features change, a new model must be
        trained.
        '''

        def add(name, *args):
            features[' '.join((name,) + tuple(args))] += 1

        i += len(self.START)
        features = defaultdict(int)
        # It's useful to have a constant feature, which acts sort of like a prior
        add('bias')
        add('i suffix', word[-3:])
        add('i pref1', word[0])
        add('i-1 tag', prev)
        add('i-2 tag', prev2)
        add('i tag+i-2 tag', prev, prev2)
        add('i word', context[i])
        add('i-1 tag+i word', prev, context[i])
        add('i-1 word', context[i - 1])
        add('i-1 suffix', context[i - 1][-3:])
        add('i-2 word', context[i - 2])
        add('i+1 word', context[i + 1])
        add('i+1 suffix', context[i + 1][-3:])
        add('i+2 word', context[i + 2])
        return features

    def _make_tagdict(self, sentences):  # 制作一个高频的单词-词性字典,tagdict形如{'good':'adj','man':'n'...}
        '''Make a tag dictionary for single-tag words.'''
        counts = defaultdict(lambda: defaultdict(int))
        for words, tags in sentences: # words形如['good','man'],tags形如['adj','n']
            for word, tag in zip(words, tags):
                counts[word][tag] += 1
                self.classes.add(tag) #将所有的词性分类标签加入到一个set中
        freq_thresh = 20
        ambiguity_thresh = 0.97
        for word, tag_freqs in counts.items():
            tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
            n = sum(tag_freqs.values())
            # Don't add rare words to the tag dictionary
            # Only add quite unambiguous words
            # 设置个阈值, 只记录高频的词性
            if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
                self.tagdict[word] = tag


def _pc(n, d):
    return (float(n) / d) * 100


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    tagger = PerceptronTagger(False)
    try:
        tagger.load(PICKLE)
        print(tagger.tag('how are you ?'))
        logging.info('Start testing...')
        right = 0.0
        total = 0.0
        sentence = ([], [])
        for line in open('data/test.txt'):
            params = line.split()
            if len(params) != 2: continue
            sentence[0].append(params[0])
            sentence[1].append(params[1])
            if params[0] == '.':
                text = ''
                words = sentence[0]
                tags = sentence[1]
                for i, word in enumerate(words):
                    text += word
                    if i < len(words): text += ' '
                outputs = tagger.tag(text)
                assert len(tags) == len(outputs)
                total += len(tags)
                for o, t in zip(outputs, tags):
                    if o[1].strip() == t: right += 1
                sentence = ([], [])
        logging.info("Precision : %f", right / total)
    except IOError:
        logging.info('Reading corpus...')
        training_data = []
        sentence = ([], [])
        for line in open('data/train.txt'):
            params = line.split('\t')
            sentence[0].append(params[0])
            sentence[1].append(params[1])
            if params[0] == '.':
                training_data.append(sentence)
                sentence = ([], [])
        logging.info('training corpus size : %d', len(training_data))
        logging.info('Start training...')
        tagger.train(training_data, save_loc=PICKLE)

 

posted @ 2022-09-29 18:23  車輪の唄  阅读(15)  评论(0编辑  收藏  举报  来源