import sys
from collections import MutableMapping
from .platform import PY3, IRONPYTHON
from .robottypes import is_dict_like, is_unicode
def normalize(string, ignore=(), caseless=True, spaceless=True):
"""Normalizes given string according to given spec.
By default string is turned to lower case and all whitespace is removed.
Additional characters can be removed by giving them in ``ignore`` list.
"""
empty = u'' if is_unicode(string) else b''
if PY3 and isinstance(ignore, bytes):
# Iterating bytes in Python3 yields integers.
ignore = [bytes([i]) for i in ignore]
if spaceless:
string = empty.join(string.split())
if caseless:
string = lower(string)
ignore = [lower(i) for i in ignore]
# both if statements below enhance performance a little
if ignore:
for ign in ignore:
if ign in string:
string = string.replace(ign, empty)
return string
# http://ironpython.codeplex.com/workitem/33133
if IRONPYTHON and sys.version_info < (2, 7, 5):
def lower(string):
return ('A' + string).lower()[1:]
else:
def lower(string):
return string.lower()
class NormalizedDict(MutableMapping):
"""Custom dictionary implementation automatically normalizing keys."""
def __init__(self, initial=None, ignore=(), caseless=True, spaceless=True):
"""Initialized with possible initial value and normalizing spec.
Initial values can be either a dictionary or an iterable of name/value
pairs. In the latter case items are added in the given order.
Normalizing spec has exact same semantics as with the :func:`normalize`
function.
"""
self._data = {}
self._keys = {}
self._normalize = lambda s: normalize(s, ignore, caseless, spaceless)
if initial:
self._add_initial(initial)
def _add_initial(self, initial):
items = initial.items() if hasattr(initial, 'items') else initial
for key, value in items:
self[key] = value
def __getitem__(self, key):
return self._data[self._normalize(key)]
def __setitem__(self, key, value):
norm_key = self._normalize(key)
self._data[norm_key] = value
self._keys.setdefault(norm_key, key)
def __delitem__(self, key):
norm_key = self._normalize(key)
del self._data[norm_key]
del self._keys[norm_key]
def __iter__(self):
return (self._keys[norm_key] for norm_key in sorted(self._keys))
def __len__(self):
return len(self._data)
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % (key, self[key]) for key in self)
def __eq__(self, other):
if not is_dict_like(other):
return False
if not isinstance(other, NormalizedDict):
other = NormalizedDict(other)
return self._data == other._data
def __ne__(self, other):
return not self == other
def copy(self):
copy = NormalizedDict()
copy._data = self._data.copy()
copy._keys = self._keys.copy()
copy._normalize = self._normalize
return copy
# Speed-ups. Following methods are faster than default implementations.
def __contains__(self, key):
return self._normalize(key) in self._data
def clear(self):
self._data.clear()
self._keys.clear()