diff --git a/v1/app.py b/v1/app.py index 61a21ba..c8612bb 100755 --- a/v1/app.py +++ b/v1/app.py @@ -5,12 +5,16 @@ from flask import Flask, jsonify, make_response, request, url_for, abort from flask_httpauth import HTTPBasicAuth from json import loads, dumps +import requests from werkzeug.security import generate_password_hash, \ check_password_hash from diskusage import diskUsage from uptime import timeSinceBoot +from cpuTemp import getCpuTemp + +from plexMovies import getSpecificMovieInfo app = Flask(__name__, static_url_path = "") auth = HTTPBasicAuth() @@ -22,6 +26,8 @@ users = { "test": "test" } +tmdbBaseURL = "https://api.themoviedb.org/3/" + # Flask function for checking password sent with http request # @auth.verify_password # def verify_password(email, password): @@ -64,10 +70,10 @@ def bad_request(error): @app.route('/api/v1/disks', methods=['GET']) @auth.login_required def get_diskUsage(): - try: - returningDiskUsage = diskUsage(request.args.get('dir')) + returningDiskUsage = diskUsage(request.args.get('dir')) + if returningDiskUsage != None: return jsonify(returningDiskUsage) - except: + else: abort(404) @@ -79,6 +85,56 @@ def get_uptimes(): except: abort(404) +@app.route('/api/v1/temps', methods=['GET']) +def get_temps(): + cpuTemp = getCpuTemp() + if cpuTemp != None: + return jsonify( {"Avg cpu temp": cpuTemp} ) + else: + return jsonify( {"Error":"Temp reading not supported for host machine."} ) + +# TODO PLEX +# Search, watching, +photo +@app.route('/api/v1/plex/request', methods=['GET']) +def get_movieRequest(): + if (request.args.get("query") != None): + requestType = "search/multi?" + requestAPI = "api_key=" + "9fa154f5355c37a1b9b57ac06e7d6712" + requestQuery = "&query=" + str(request.args.get('query')) + requestLanguage = "&language=en.US" + + url = tmdbBaseURL + requestType + requestAPI + requestQuery + requestLanguage + # url = "https://api.themoviedb.org/3/search/multi?include_adult=false&query=home%20alone&language=en-US&api_key=9fa154f5355c37a1b9b57ac06e7d6712" + + payload = "{}" + response = requests.request("GET", url, data=payload) + + print(response.text) + return response.text + + else: return jsonify ({ "Error": "Query not defined." }) + +@app.route('/api/v1/plex/movies', methods=['GET']) +@auth.login_required +def getPlexMovies(): + title = request.args.get('title') + + movieInfo = getSpecificMovieInfo(title) + if movieInfo != None: + return jsonify(movieInfo) + + abort(500) + +@app.route('/api/v1/plex/watchings', methods=['GET']) +@auth.login_required +def getPlexWatchings(): + r = requests.get('http://10.0.0.41:32400/status/sessions') + + return r.text + movieInfo = getSpecificMovieInfo(title) + if movieInfo != None: + return jsonify(movieInfo) + @app.route('/api/v1/uptimes/duration', methods=['GET']) @auth.login_required @@ -101,4 +157,4 @@ def get_uptimesLoad(): if __name__ == '__main__': - app.run(host='0.0.0.0', port=63588) + app.run(port=63590, debug=True) diff --git a/v1/cpu.py b/v1/cpu.py deleted file mode 100755 index 39673ba..0000000 --- a/v1/cpu.py +++ /dev/null @@ -1,9 +0,0 @@ -import linuxcpureader - -def main(): - cpu = linuxcpureader.LinuxCpuTemperatureReader() - print(cpu) - print(cpu.get_reader()) - print(', '.join("%s: %s" % item for item in cpu.items())) - -main() diff --git a/v1/cpuTemp.py b/v1/cpuTemp.py index 0c38b25..d2fcb82 100755 --- a/v1/cpuTemp.py +++ b/v1/cpuTemp.py @@ -1,15 +1,35 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# @Author: KevinMidboe -# @Date: 2017-01-28 13:56:48 -# @Last Modified by: KevinMidboe -# @Last Modified time: 2017-01-28 13:58:35 +import psutil -from pyspectator.processor import Cpu -from time import sleep +def getCpuTemp(): + # Check if sensors_temperatures exists + try: + # Define cpu as function of sensors_temperatures + cpu = psutil.sensors_temperatures() + except AttributeError: + error = "'sensors_temperatures' is not supported in this verison of psutil or your OS." + print(error) + return None -cpu = Cpu(monitoring_latency=1) -with cpu: - for _ in range(8): - cpu.load, cpu.temperature - sleep(1.1) + # Array for temps for each core. + curCpuTemps = [] + # Itterate through all cores of coretemps + for temp in cpu['coretemp']: + curCpuTemps.append(temp[1]) # Append to list + print(temp[0]+': '+str(temp[1])) # Print output + + # Check if len of curCpuTemps is something so not to + # calculate on a empty list + if len(curCpuTemps) > 0: + # Compute avg of curCpuTemps + avgCpuTemps = sum(curCpuTemps)/len(curCpuTemps) + return avgCpuTemps + print("Avg: " + str(avgCpuTemps)) + else: + print("Couldn't get cpu temp. (division by zero)") + return None + + +if __name__ == "__main__": + print(getCpuTemp()) \ No newline at end of file diff --git a/v1/flask/lib/python3.4/__future__.py b/v1/flask/lib/python3.4/__future__.py deleted file mode 120000 index f12a5d2..0000000 --- a/v1/flask/lib/python3.4/__future__.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/__future__.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/__future__.py b/v1/flask/lib/python3.4/__future__.py new file mode 100644 index 0000000..3b2d5ec --- /dev/null +++ b/v1/flask/lib/python3.4/__future__.py @@ -0,0 +1,134 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", + "barry_as_FLUFL", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names used by +# compile.h, so that an editor search will find them here. However, +# they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x2000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals +CO_FUTURE_BARRY_AS_BDFL = 0x40000 + +class _Feature: + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + """ + + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) + +barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), + (3, 9, 0, "alpha", 0), + CO_FUTURE_BARRY_AS_BDFL) diff --git a/v1/flask/lib/python3.4/_bootlocale.py b/v1/flask/lib/python3.4/_bootlocale.py deleted file mode 120000 index 7786fe6..0000000 --- a/v1/flask/lib/python3.4/_bootlocale.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_bootlocale.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/_bootlocale.py b/v1/flask/lib/python3.4/_bootlocale.py new file mode 100644 index 0000000..4bccac1 --- /dev/null +++ b/v1/flask/lib/python3.4/_bootlocale.py @@ -0,0 +1,34 @@ +"""A minimal subset of the locale module used at interpreter startup +(imported by the _io module), in order to reduce startup time. + +Don't import directly from third-party code; use the `locale` module instead! +""" + +import sys +import _locale + +if sys.platform.startswith("win"): + def getpreferredencoding(do_setlocale=True): + return _locale._getdefaultlocale()[1] +else: + try: + _locale.CODESET + except AttributeError: + def getpreferredencoding(do_setlocale=True): + # This path for legacy systems needs the more complex + # getdefaultlocale() function, import the full locale module. + import locale + return locale.getpreferredencoding(do_setlocale) + else: + def getpreferredencoding(do_setlocale=True): + assert not do_setlocale + result = _locale.nl_langinfo(_locale.CODESET) + if not result and sys.platform == 'darwin': + # nl_langinfo can return an empty string + # when the setting has an invalid value. + # Default to UTF-8 in that case because + # UTF-8 is the default charset on OSX and + # returning nothing will crash the + # interpreter. + result = 'UTF-8' + return result diff --git a/v1/flask/lib/python3.4/_collections_abc.py b/v1/flask/lib/python3.4/_collections_abc.py deleted file mode 120000 index 28537a9..0000000 --- a/v1/flask/lib/python3.4/_collections_abc.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_collections_abc.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/_collections_abc.py b/v1/flask/lib/python3.4/_collections_abc.py new file mode 100644 index 0000000..33b59ab --- /dev/null +++ b/v1/flask/lib/python3.4/_collections_abc.py @@ -0,0 +1,748 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Hashable", "Iterable", "Iterator", + "Sized", "Container", "Callable", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + +# This module has been renamed from collections.abc to _collections_abc to +# speed up interpreter startup. Some of the types such as MutableMapping are +# required early but collections module imports a lot of other modules. +# See issue #19218 +__name__ = "collections.abc" + +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types many not be distinct +# and they make have their own implementation specific types that +# are not included on this list. +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +mappingproxy = type(type.__dict__) + + +### ONE-TRICK PONIES ### + +class Hashable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + for B in C.__mro__: + if "__hash__" in B.__dict__: + if B.__dict__["__hash__"]: + return True + break + return NotImplemented + + +class Iterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + if any("__iter__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Iterator(Iterable): + + __slots__ = () + + @abstractmethod + def __next__(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + if (any("__next__" in B.__dict__ for B in C.__mro__) and + any("__iter__" in B.__dict__ for B in C.__mro__)): + return True + return NotImplemented + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + +class Sized(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + if any("__len__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Container(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + if any("__contains__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Callable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + if any("__call__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +### SETS ### + + +class Set(Sized, Iterable, Container): + + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + """ + + __slots__ = () + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) > len(other) and self.__ge__(other) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + __rand__ = __and__ + + def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + __ror__ = __or__ + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + __rxor__ = __xor__ + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + __slots__ = () + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Sized, Iterable, Container): + + __slots__ = () + + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + """ + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return KeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return ItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + +Mapping.register(mappingproxy) + + +class MappingView(Sized): + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + yield from self._mapping + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(dict_items) + + +class ValuesView(MappingView): + + def __contains__(self, value): + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + + __slots__ = () + + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + """ + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' + try: + key = next(iter(self)) + except StopIteration: + raise KeyError + value = self[key] + del self[key] + return key, value + + def clear(self): + 'D.clear() -> None. Remove all items from D.' + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' + if not args: + raise TypeError("descriptor 'update' of 'MutableMapping' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('update expected at most 1 arguments, got %d' % + len(args)) + if args: + other = args[0] + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Sized, Iterable, Container): + + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + __slots__ = () + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value): + '''S.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present. + ''' + for i, v in enumerate(self): + if v == value: + return i + raise ValueError + + def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' + return sum(1 for v in self if v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) +Sequence.register(memoryview) + + +class ByteString(Sequence): + + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + + __slots__ = () + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + + __slots__ = () + + """All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + """ + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + 'S.insert(index, value) -- insert value before index' + raise IndexError + + def append(self, value): + 'S.append(value) -- append value to the end of the sequence' + self.insert(len(self), value) + + def clear(self): + 'S.clear() -> None -- remove all items from S' + try: + while True: + self.pop() + except IndexError: + pass + + def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' + for v in values: + self.append(v) + + def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' + v = self[index] + del self[index] + return v + + def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/v1/flask/lib/python3.4/_dummy_thread.py b/v1/flask/lib/python3.4/_dummy_thread.py deleted file mode 120000 index 30c58e7..0000000 --- a/v1/flask/lib/python3.4/_dummy_thread.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_dummy_thread.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/_dummy_thread.py b/v1/flask/lib/python3.4/_dummy_thread.py new file mode 100644 index 0000000..b67cfb9 --- /dev/null +++ b/v1/flask/lib/python3.4/_dummy_thread.py @@ -0,0 +1,155 @@ +"""Drop-in replacement for the thread module. + +Meant to be used as a brain-dead substitute so that threaded code does +not need to be rewritten for when the thread module is not present. + +Suggested usage is:: + + try: + import _thread + except ImportError: + import _dummy_thread as _thread + +""" +# Exports only things specified by thread documentation; +# skipping obsolete synonyms allocate(), start_new(), exit_thread(). +__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', + 'interrupt_main', 'LockType'] + +# A dummy value +TIMEOUT_MAX = 2**31 + +# NOTE: this module can be imported early in the extension building process, +# and so top level imports of other modules should be avoided. Instead, all +# imports are done when needed on a function-by-function basis. Since threads +# are disabled, the import lock should not be an issue anyway (??). + +error = RuntimeError + +def start_new_thread(function, args, kwargs={}): + """Dummy implementation of _thread.start_new_thread(). + + Compatibility is maintained by making sure that ``args`` is a + tuple and ``kwargs`` is a dictionary. If an exception is raised + and it is SystemExit (which can be done by _thread.exit()) it is + caught and nothing is done; all other exceptions are printed out + by using traceback.print_exc(). + + If the executed function calls interrupt_main the KeyboardInterrupt will be + raised when the function returns. + + """ + if type(args) != type(tuple()): + raise TypeError("2nd arg must be a tuple") + if type(kwargs) != type(dict()): + raise TypeError("3rd arg must be a dict") + global _main + _main = False + try: + function(*args, **kwargs) + except SystemExit: + pass + except: + import traceback + traceback.print_exc() + _main = True + global _interrupt + if _interrupt: + _interrupt = False + raise KeyboardInterrupt + +def exit(): + """Dummy implementation of _thread.exit().""" + raise SystemExit + +def get_ident(): + """Dummy implementation of _thread.get_ident(). + + Since this module should only be used when _threadmodule is not + available, it is safe to assume that the current process is the + only thread. Thus a constant can be safely returned. + """ + return -1 + +def allocate_lock(): + """Dummy implementation of _thread.allocate_lock().""" + return LockType() + +def stack_size(size=None): + """Dummy implementation of _thread.stack_size().""" + if size is not None: + raise error("setting thread stack size not supported") + return 0 + +def _set_sentinel(): + """Dummy implementation of _thread._set_sentinel().""" + return LockType() + +class LockType(object): + """Class implementing dummy implementation of _thread.LockType. + + Compatibility is maintained by maintaining self.locked_status + which is a boolean that stores the state of the lock. Pickling of + the lock, though, should not be done since if the _thread module is + then used with an unpickled ``lock()`` from here problems could + occur from this class not having atomic methods. + + """ + + def __init__(self): + self.locked_status = False + + def acquire(self, waitflag=None, timeout=-1): + """Dummy implementation of acquire(). + + For blocking calls, self.locked_status is automatically set to + True and returned appropriately based on value of + ``waitflag``. If it is non-blocking, then the value is + actually checked and not set if it is already acquired. This + is all done so that threading.Condition's assert statements + aren't triggered and throw a little fit. + + """ + if waitflag is None or waitflag: + self.locked_status = True + return True + else: + if not self.locked_status: + self.locked_status = True + return True + else: + if timeout > 0: + import time + time.sleep(timeout) + return False + + __enter__ = acquire + + def __exit__(self, typ, val, tb): + self.release() + + def release(self): + """Release the dummy lock.""" + # XXX Perhaps shouldn't actually bother to test? Could lead + # to problems for complex, threaded code. + if not self.locked_status: + raise error + self.locked_status = False + return True + + def locked(self): + return self.locked_status + +# Used to signal that interrupt_main was called in a "thread" +_interrupt = False +# True when not executing in a "thread" +_main = True + +def interrupt_main(): + """Set _interrupt flag to True to have start_new_thread raise + KeyboardInterrupt upon exiting.""" + if _main: + raise KeyboardInterrupt + else: + global _interrupt + _interrupt = True diff --git a/v1/flask/lib/python3.4/_weakrefset.py b/v1/flask/lib/python3.4/_weakrefset.py deleted file mode 120000 index 3d48df1..0000000 --- a/v1/flask/lib/python3.4/_weakrefset.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_weakrefset.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/_weakrefset.py b/v1/flask/lib/python3.4/_weakrefset.py new file mode 100644 index 0000000..7f9923c --- /dev/null +++ b/v1/flask/lib/python3.4/_weakrefset.py @@ -0,0 +1,196 @@ +# Access WeakSet through the weakref module. +# This code is separated-out because it is needed +# by abc.py to load everything else at startup. + +from _weakref import ref + +__all__ = ['WeakSet'] + + +class _IterationGuard: + # This context manager registers itself in the current iterators of the + # weak container, such as to delay all removals until the context manager + # exits. + # This technique should be relatively thread-safe (since sets are). + + def __init__(self, weakcontainer): + # Don't create cycles + self.weakcontainer = ref(weakcontainer) + + def __enter__(self): + w = self.weakcontainer() + if w is not None: + w._iterating.add(self) + return self + + def __exit__(self, e, t, b): + w = self.weakcontainer() + if w is not None: + s = w._iterating + s.remove(self) + if not s: + w._commit_removals() + + +class WeakSet: + def __init__(self, data=None): + self.data = set() + def _remove(item, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(item) + else: + self.data.discard(item) + self._remove = _remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + if data is not None: + self.update(data) + + def _commit_removals(self): + l = self._pending_removals + discard = self.data.discard + while l: + discard(l.pop()) + + def __iter__(self): + with _IterationGuard(self): + for itemref in self.data: + item = itemref() + if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. + yield item + + def __len__(self): + return len(self.data) - len(self._pending_removals) + + def __contains__(self, item): + try: + wr = ref(item) + except TypeError: + return False + return wr in self.data + + def __reduce__(self): + return (self.__class__, (list(self),), + getattr(self, '__dict__', None)) + + def add(self, item): + if self._pending_removals: + self._commit_removals() + self.data.add(ref(item, self._remove)) + + def clear(self): + if self._pending_removals: + self._commit_removals() + self.data.clear() + + def copy(self): + return self.__class__(self) + + def pop(self): + if self._pending_removals: + self._commit_removals() + while True: + try: + itemref = self.data.pop() + except KeyError: + raise KeyError('pop from empty WeakSet') + item = itemref() + if item is not None: + return item + + def remove(self, item): + if self._pending_removals: + self._commit_removals() + self.data.remove(ref(item)) + + def discard(self, item): + if self._pending_removals: + self._commit_removals() + self.data.discard(ref(item)) + + def update(self, other): + if self._pending_removals: + self._commit_removals() + for element in other: + self.add(element) + + def __ior__(self, other): + self.update(other) + return self + + def difference(self, other): + newset = self.copy() + newset.difference_update(other) + return newset + __sub__ = difference + + def difference_update(self, other): + self.__isub__(other) + def __isub__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.difference_update(ref(item) for item in other) + return self + + def intersection(self, other): + return self.__class__(item for item in other if item in self) + __and__ = intersection + + def intersection_update(self, other): + self.__iand__(other) + def __iand__(self, other): + if self._pending_removals: + self._commit_removals() + self.data.intersection_update(ref(item) for item in other) + return self + + def issubset(self, other): + return self.data.issubset(ref(item) for item in other) + __le__ = issubset + + def __lt__(self, other): + return self.data < set(ref(item) for item in other) + + def issuperset(self, other): + return self.data.issuperset(ref(item) for item in other) + __ge__ = issuperset + + def __gt__(self, other): + return self.data > set(ref(item) for item in other) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.data == set(ref(item) for item in other) + + def symmetric_difference(self, other): + newset = self.copy() + newset.symmetric_difference_update(other) + return newset + __xor__ = symmetric_difference + + def symmetric_difference_update(self, other): + self.__ixor__(other) + def __ixor__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) + return self + + def union(self, other): + return self.__class__(e for s in (self, other) for e in s) + __or__ = union + + def isdisjoint(self, other): + return len(self.intersection(other)) == 0 diff --git a/v1/flask/lib/python3.4/abc.py b/v1/flask/lib/python3.4/abc.py deleted file mode 120000 index e620fe2..0000000 --- a/v1/flask/lib/python3.4/abc.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/abc.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/abc.py b/v1/flask/lib/python3.4/abc.py new file mode 100644 index 0000000..0358a46 --- /dev/null +++ b/v1/flask/lib/python3.4/abc.py @@ -0,0 +1,248 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) according to PEP 3119.""" + +from _weakrefset import WeakSet + + +def abstractmethod(funcobj): + """A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + """ + funcobj.__isabstractmethod__ = True + return funcobj + + +class abstractclassmethod(classmethod): + """ + A decorator indicating abstract classmethods. + + Similar to abstractmethod. + + Usage: + + class C(metaclass=ABCMeta): + @abstractclassmethod + def my_abstract_classmethod(cls, ...): + ... + + 'abstractclassmethod' is deprecated. Use 'classmethod' with + 'abstractmethod' instead. + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractstaticmethod(staticmethod): + """ + A decorator indicating abstract staticmethods. + + Similar to abstractmethod. + + Usage: + + class C(metaclass=ABCMeta): + @abstractstaticmethod + def my_abstract_staticmethod(...): + ... + + 'abstractstaticmethod' is deprecated. Use 'staticmethod' with + 'abstractmethod' instead. + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractproperty(property): + """ + A decorator indicating abstract properties. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract properties are overridden. + The abstract properties can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C(metaclass=ABCMeta): + @abstractproperty + def my_abstract_property(self): + ... + + This defines a read-only property; you can also define a read-write + abstract property using the 'long' form of property declaration: + + class C(metaclass=ABCMeta): + def getx(self): ... + def setx(self, value): ... + x = abstractproperty(getx, setx) + + 'abstractproperty' is deprecated. Use 'property' with 'abstractmethod' + instead. + """ + + __isabstractmethod__ = True + + +class ABCMeta(type): + + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + + """ + + # A global counter that is incremented each time a class is + # registered as a virtual subclass of anything. It forces the + # negative cache to be cleared before its next use. + # Note: this counter is private. Use `abc.get_cache_token()` for + # external code. + _abc_invalidation_counter = 0 + + def __new__(mcls, name, bases, namespace): + cls = super().__new__(mcls, name, bases, namespace) + # Compute set of abstract method names + abstracts = {name + for name, value in namespace.items() + if getattr(value, "__isabstractmethod__", False)} + for base in bases: + for name in getattr(base, "__abstractmethods__", set()): + value = getattr(cls, name, None) + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + cls.__abstractmethods__ = frozenset(abstracts) + # Set up inheritance registry + cls._abc_registry = WeakSet() + cls._abc_cache = WeakSet() + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ + if not isinstance(subclass, type): + raise TypeError("Can only register classes") + if issubclass(subclass, cls): + return subclass # Already a subclass + # Subtle: test for cycles *after* testing for "already a subclass"; + # this means we allow X.register(X) and interpret it as a no-op. + if issubclass(cls, subclass): + # This would create a cycle, which is bad for the algorithm below + raise RuntimeError("Refusing to create an inheritance cycle") + cls._abc_registry.add(subclass) + ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache + return subclass + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file) + print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file) + for name in sorted(cls.__dict__.keys()): + if name.startswith("_abc_"): + value = getattr(cls, name) + print("%s: %r" % (name, value), file=file) + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + # Inline the cache checking + subclass = instance.__class__ + if subclass in cls._abc_cache: + return True + subtype = type(instance) + if subtype is subclass: + if (cls._abc_negative_cache_version == + ABCMeta._abc_invalidation_counter and + subclass in cls._abc_negative_cache): + return False + # Fall back to the subclass check. + return cls.__subclasscheck__(subclass) + return any(cls.__subclasscheck__(c) for c in {subclass, subtype}) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + # Check cache + if subclass in cls._abc_cache: + return True + # Check negative cache; may have to invalidate + if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: + # Invalidate the negative cache + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + elif subclass in cls._abc_negative_cache: + return False + # Check the subclass hook + ok = cls.__subclasshook__(subclass) + if ok is not NotImplemented: + assert isinstance(ok, bool) + if ok: + cls._abc_cache.add(subclass) + else: + cls._abc_negative_cache.add(subclass) + return ok + # Check if it's a direct subclass + if cls in getattr(subclass, '__mro__', ()): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a registered class (recursive) + for rcls in cls._abc_registry: + if issubclass(subclass, rcls): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a subclass (recursive) + for scls in cls.__subclasses__(): + if issubclass(subclass, scls): + cls._abc_cache.add(subclass) + return True + # No dice; update negative cache + cls._abc_negative_cache.add(subclass) + return False + + +class ABC(metaclass=ABCMeta): + """Helper class that provides a standard way to create an ABC using + inheritance. + """ + pass + + +def get_cache_token(): + """Returns the current ABC cache token. + + The token is an opaque object (supporting equality testing) identifying the + current version of the ABC cache for virtual subclasses. The token changes + with every call to ``register()`` on any ABC. + """ + return ABCMeta._abc_invalidation_counter diff --git a/v1/flask/lib/python3.4/base64.py b/v1/flask/lib/python3.4/base64.py deleted file mode 120000 index fb6d914..0000000 --- a/v1/flask/lib/python3.4/base64.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/base64.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/base64.py b/v1/flask/lib/python3.4/base64.py new file mode 100755 index 0000000..640f787 --- /dev/null +++ b/v1/flask/lib/python3.4/base64.py @@ -0,0 +1,602 @@ +#! /usr/bin/env python3 + +"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings""" + +# Modified 04-Oct-1995 by Jack Jansen to use binascii module +# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support +# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere + +import re +import struct +import binascii + + +__all__ = [ + # Legacy interface exports traditional RFC 1521 Base64 encodings + 'encode', 'decode', 'encodebytes', 'decodebytes', + # Generalized interface for other encodings + 'b64encode', 'b64decode', 'b32encode', 'b32decode', + 'b16encode', 'b16decode', + # Base85 and Ascii85 encodings + 'b85encode', 'b85decode', 'a85encode', 'a85decode', + # Standard Base64 encoding + 'standard_b64encode', 'standard_b64decode', + # Some common Base64 alternatives. As referenced by RFC 3458, see thread + # starting at: + # + # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html + 'urlsafe_b64encode', 'urlsafe_b64decode', + ] + + +bytes_types = (bytes, bytearray) # Types acceptable as binary data + +def _bytes_from_decode_data(s): + if isinstance(s, str): + try: + return s.encode('ascii') + except UnicodeEncodeError: + raise ValueError('string argument should contain only ASCII characters') + if isinstance(s, bytes_types): + return s + try: + return memoryview(s).tobytes() + except TypeError: + raise TypeError("argument should be a bytes-like object or ASCII " + "string, not %r" % s.__class__.__name__) from None + + +# Base64 encoding/decoding uses binascii + +def b64encode(s, altchars=None): + """Encode a byte string using Base64. + + s is the byte string to encode. Optional altchars must be a byte + string of length 2 which specifies an alternative alphabet for the + '+' and '/' characters. This allows an application to + e.g. generate url or filesystem safe Base64 strings. + + The encoded byte string is returned. + """ + # Strip off the trailing newline + encoded = binascii.b2a_base64(s)[:-1] + if altchars is not None: + assert len(altchars) == 2, repr(altchars) + return encoded.translate(bytes.maketrans(b'+/', altchars)) + return encoded + + +def b64decode(s, altchars=None, validate=False): + """Decode a Base64 encoded byte string. + + s is the byte string to decode. Optional altchars must be a + string of length 2 which specifies the alternative alphabet used + instead of the '+' and '/' characters. + + The decoded string is returned. A binascii.Error is raised if s is + incorrectly padded. + + If validate is False (the default), non-base64-alphabet characters are + discarded prior to the padding check. If validate is True, + non-base64-alphabet characters in the input result in a binascii.Error. + """ + s = _bytes_from_decode_data(s) + if altchars is not None: + altchars = _bytes_from_decode_data(altchars) + assert len(altchars) == 2, repr(altchars) + s = s.translate(bytes.maketrans(altchars, b'+/')) + if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s): + raise binascii.Error('Non-base64 digit found') + return binascii.a2b_base64(s) + + +def standard_b64encode(s): + """Encode a byte string using the standard Base64 alphabet. + + s is the byte string to encode. The encoded byte string is returned. + """ + return b64encode(s) + +def standard_b64decode(s): + """Decode a byte string encoded with the standard Base64 alphabet. + + s is the byte string to decode. The decoded byte string is + returned. binascii.Error is raised if the input is incorrectly + padded or if there are non-alphabet characters present in the + input. + """ + return b64decode(s) + + +_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') + +def urlsafe_b64encode(s): + """Encode a byte string using a url-safe Base64 alphabet. + + s is the byte string to encode. The encoded byte string is + returned. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + """ + return b64encode(s).translate(_urlsafe_encode_translation) + +def urlsafe_b64decode(s): + """Decode a byte string encoded with the standard Base64 alphabet. + + s is the byte string to decode. The decoded byte string is + returned. binascii.Error is raised if the input is incorrectly + padded or if there are non-alphabet characters present in the + input. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + """ + s = _bytes_from_decode_data(s) + s = s.translate(_urlsafe_decode_translation) + return b64decode(s) + + + +# Base32 encoding/decoding must be done in Python +_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567' +_b32tab2 = None +_b32rev = None + +def b32encode(s): + """Encode a byte string using Base32. + + s is the byte string to encode. The encoded byte string is returned. + """ + global _b32tab2 + # Delay the initialization of the table to not waste memory + # if the function is never called + if _b32tab2 is None: + b32tab = [bytes((i,)) for i in _b32alphabet] + _b32tab2 = [a + b for a in b32tab for b in b32tab] + b32tab = None + + if not isinstance(s, bytes_types): + s = memoryview(s).tobytes() + leftover = len(s) % 5 + # Pad the last quantum with zero bits if necessary + if leftover: + s = s + bytes(5 - leftover) # Don't use += ! + encoded = bytearray() + from_bytes = int.from_bytes + b32tab2 = _b32tab2 + for i in range(0, len(s), 5): + c = from_bytes(s[i: i + 5], 'big') + encoded += (b32tab2[c >> 30] + # bits 1 - 10 + b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20 + b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30 + b32tab2[c & 0x3ff] # bits 31 - 40 + ) + # Adjust for any leftover partial quanta + if leftover == 1: + encoded[-6:] = b'======' + elif leftover == 2: + encoded[-4:] = b'====' + elif leftover == 3: + encoded[-3:] = b'===' + elif leftover == 4: + encoded[-1:] = b'=' + return bytes(encoded) + +def b32decode(s, casefold=False, map01=None): + """Decode a Base32 encoded byte string. + + s is the byte string to decode. Optional casefold is a flag + specifying whether a lowercase alphabet is acceptable as input. + For security purposes, the default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the + letter O (oh), and for optional mapping of the digit 1 (one) to + either the letter I (eye) or letter L (el). The optional argument + map01 when not None, specifies which letter the digit 1 should be + mapped to (when map01 is not None, the digit 0 is always mapped to + the letter O). For security purposes the default is None, so that + 0 and 1 are not allowed in the input. + + The decoded byte string is returned. binascii.Error is raised if + the input is incorrectly padded or if there are non-alphabet + characters present in the input. + """ + global _b32rev + # Delay the initialization of the table to not waste memory + # if the function is never called + if _b32rev is None: + _b32rev = {v: k for k, v in enumerate(_b32alphabet)} + s = _bytes_from_decode_data(s) + if len(s) % 8: + raise binascii.Error('Incorrect padding') + # Handle section 2.4 zero and one mapping. The flag map01 will be either + # False, or the character to map the digit 1 (one) to. It should be + # either L (el) or I (eye). + if map01 is not None: + map01 = _bytes_from_decode_data(map01) + assert len(map01) == 1, repr(map01) + s = s.translate(bytes.maketrans(b'01', b'O' + map01)) + if casefold: + s = s.upper() + # Strip off pad characters from the right. We need to count the pad + # characters because this will tell us how many null bytes to remove from + # the end of the decoded string. + l = len(s) + s = s.rstrip(b'=') + padchars = l - len(s) + # Now decode the full quanta + decoded = bytearray() + b32rev = _b32rev + for i in range(0, len(s), 8): + quanta = s[i: i + 8] + acc = 0 + try: + for c in quanta: + acc = (acc << 5) + b32rev[c] + except KeyError: + raise binascii.Error('Non-base32 digit found') from None + decoded += acc.to_bytes(5, 'big') + # Process the last, partial quanta + if padchars: + acc <<= 5 * padchars + last = acc.to_bytes(5, 'big') + if padchars == 1: + decoded[-5:] = last[:-1] + elif padchars == 3: + decoded[-5:] = last[:-2] + elif padchars == 4: + decoded[-5:] = last[:-3] + elif padchars == 6: + decoded[-5:] = last[:-4] + else: + raise binascii.Error('Incorrect padding') + return bytes(decoded) + + + +# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns +# lowercase. The RFC also recommends against accepting input case +# insensitively. +def b16encode(s): + """Encode a byte string using Base16. + + s is the byte string to encode. The encoded byte string is returned. + """ + return binascii.hexlify(s).upper() + + +def b16decode(s, casefold=False): + """Decode a Base16 encoded byte string. + + s is the byte string to decode. Optional casefold is a flag + specifying whether a lowercase alphabet is acceptable as input. + For security purposes, the default is False. + + The decoded byte string is returned. binascii.Error is raised if + s were incorrectly padded or if there are non-alphabet characters + present in the string. + """ + s = _bytes_from_decode_data(s) + if casefold: + s = s.upper() + if re.search(b'[^0-9A-F]', s): + raise binascii.Error('Non-base16 digit found') + return binascii.unhexlify(s) + +# +# Ascii85 encoding/decoding +# + +_a85chars = None +_a85chars2 = None +_A85START = b"<~" +_A85END = b"~>" + +def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False): + # Helper function for a85encode and b85encode + if not isinstance(b, bytes_types): + b = memoryview(b).tobytes() + + padding = (-len(b)) % 4 + if padding: + b = b + b'\0' * padding + words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b) + + chunks = [b'z' if foldnuls and not word else + b'y' if foldspaces and word == 0x20202020 else + (chars2[word // 614125] + + chars2[word // 85 % 7225] + + chars[word % 85]) + for word in words] + + if padding and not pad: + if chunks[-1] == b'z': + chunks[-1] = chars[0] * 5 + chunks[-1] = chunks[-1][:-padding] + + return b''.join(chunks) + +def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): + """Encode a byte string using Ascii85. + + b is the byte string to encode. The encoded byte string is returned. + + foldspaces is an optional flag that uses the special short sequence 'y' + instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This + feature is not supported by the "standard" Adobe encoding. + + wrapcol controls whether the output should have newline ('\\n') characters + added to it. If this is non-zero, each output line will be at most this + many characters long. + + pad controls whether the input string is padded to a multiple of 4 before + encoding. Note that the btoa implementation always pads. + + adobe controls whether the encoded byte sequence is framed with <~ and ~>, + which is used by the Adobe implementation. + """ + global _a85chars, _a85chars2 + # Delay the initialization of tables to not waste memory + # if the function is never called + if _a85chars is None: + _a85chars = [bytes((i,)) for i in range(33, 118)] + _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars] + + result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces) + + if adobe: + result = _A85START + result + if wrapcol: + wrapcol = max(2 if adobe else 1, wrapcol) + chunks = [result[i: i + wrapcol] + for i in range(0, len(result), wrapcol)] + if adobe: + if len(chunks[-1]) + 2 > wrapcol: + chunks.append(b'') + result = b'\n'.join(chunks) + if adobe: + result += _A85END + + return result + +def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): + """Decode an Ascii85 encoded byte string. + + s is the byte string to decode. + + foldspaces is a flag that specifies whether the 'y' short sequence should be + accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is + not supported by the "standard" Adobe encoding. + + adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. + is framed with <~ and ~>). + + ignorechars should be a byte string containing characters to ignore from the + input. This should only contain whitespace characters, and by default + contains all whitespace characters in ASCII. + """ + b = _bytes_from_decode_data(b) + if adobe: + if not (b.startswith(_A85START) and b.endswith(_A85END)): + raise ValueError("Ascii85 encoded byte sequences must be bracketed " + "by {!r} and {!r}".format(_A85START, _A85END)) + b = b[2:-2] # Strip off start/end markers + # + # We have to go through this stepwise, so as to ignore spaces and handle + # special short sequences + # + packI = struct.Struct('!I').pack + decoded = [] + decoded_append = decoded.append + curr = [] + curr_append = curr.append + curr_clear = curr.clear + for x in b + b'u' * 4: + if b'!'[0] <= x <= b'u'[0]: + curr_append(x) + if len(curr) == 5: + acc = 0 + for x in curr: + acc = 85 * acc + (x - 33) + try: + decoded_append(packI(acc)) + except struct.error: + raise ValueError('Ascii85 overflow') from None + curr_clear() + elif x == b'z'[0]: + if curr: + raise ValueError('z inside Ascii85 5-tuple') + decoded_append(b'\0\0\0\0') + elif foldspaces and x == b'y'[0]: + if curr: + raise ValueError('y inside Ascii85 5-tuple') + decoded_append(b'\x20\x20\x20\x20') + elif x in ignorechars: + # Skip whitespace + continue + else: + raise ValueError('Non-Ascii85 digit found: %c' % x) + + result = b''.join(decoded) + padding = 4 - len(curr) + if padding: + # Throw away the extra padding + result = result[:-padding] + return result + +# The following code is originally taken (with permission) from Mercurial + +_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") +_b85chars = None +_b85chars2 = None +_b85dec = None + +def b85encode(b, pad=False): + """Encode an ASCII-encoded byte array in base85 format. + + If pad is true, the input is padded with "\\0" so its length is a multiple of + 4 characters before encoding. + """ + global _b85chars, _b85chars2 + # Delay the initialization of tables to not waste memory + # if the function is never called + if _b85chars is None: + _b85chars = [bytes((i,)) for i in _b85alphabet] + _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] + return _85encode(b, _b85chars, _b85chars2, pad) + +def b85decode(b): + """Decode base85-encoded byte array""" + global _b85dec + # Delay the initialization of tables to not waste memory + # if the function is never called + if _b85dec is None: + _b85dec = [None] * 256 + for i, c in enumerate(_b85alphabet): + _b85dec[c] = i + + b = _bytes_from_decode_data(b) + padding = (-len(b)) % 5 + b = b + b'~' * padding + out = [] + packI = struct.Struct('!I').pack + for i in range(0, len(b), 5): + chunk = b[i:i + 5] + acc = 0 + try: + for c in chunk: + acc = acc * 85 + _b85dec[c] + except TypeError: + for j, c in enumerate(chunk): + if _b85dec[c] is None: + raise ValueError('bad base85 character at position %d' + % (i + j)) from None + raise + try: + out.append(packI(acc)) + except struct.error: + raise ValueError('base85 overflow in hunk starting at byte %d' + % i) from None + + result = b''.join(out) + if padding: + result = result[:-padding] + return result + +# Legacy interface. This code could be cleaned up since I don't believe +# binascii has any line length limitations. It just doesn't seem worth it +# though. The files should be opened in binary mode. + +MAXLINESIZE = 76 # Excluding the CRLF +MAXBINSIZE = (MAXLINESIZE//4)*3 + +def encode(input, output): + """Encode a file; input and output are binary files.""" + while True: + s = input.read(MAXBINSIZE) + if not s: + break + while len(s) < MAXBINSIZE: + ns = input.read(MAXBINSIZE-len(s)) + if not ns: + break + s += ns + line = binascii.b2a_base64(s) + output.write(line) + + +def decode(input, output): + """Decode a file; input and output are binary files.""" + while True: + line = input.readline() + if not line: + break + s = binascii.a2b_base64(line) + output.write(s) + +def _input_type_check(s): + try: + m = memoryview(s) + except TypeError as err: + msg = "expected bytes-like object, not %s" % s.__class__.__name__ + raise TypeError(msg) from err + if m.format not in ('c', 'b', 'B'): + msg = ("expected single byte elements, not %r from %s" % + (m.format, s.__class__.__name__)) + raise TypeError(msg) + if m.ndim != 1: + msg = ("expected 1-D data, not %d-D data from %s" % + (m.ndim, s.__class__.__name__)) + raise TypeError(msg) + + +def encodebytes(s): + """Encode a bytestring into a bytestring containing multiple lines + of base-64 data.""" + _input_type_check(s) + pieces = [] + for i in range(0, len(s), MAXBINSIZE): + chunk = s[i : i + MAXBINSIZE] + pieces.append(binascii.b2a_base64(chunk)) + return b"".join(pieces) + +def encodestring(s): + """Legacy alias of encodebytes().""" + import warnings + warnings.warn("encodestring() is a deprecated alias, use encodebytes()", + DeprecationWarning, 2) + return encodebytes(s) + + +def decodebytes(s): + """Decode a bytestring of base-64 data into a bytestring.""" + _input_type_check(s) + return binascii.a2b_base64(s) + +def decodestring(s): + """Legacy alias of decodebytes().""" + import warnings + warnings.warn("decodestring() is a deprecated alias, use decodebytes()", + DeprecationWarning, 2) + return decodebytes(s) + + +# Usable as a script... +def main(): + """Small main program""" + import sys, getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'deut') + except getopt.error as msg: + sys.stdout = sys.stderr + print(msg) + print("""usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) + sys.exit(2) + func = encode + for o, a in opts: + if o == '-e': func = encode + if o == '-d': func = decode + if o == '-u': func = decode + if o == '-t': test(); return + if args and args[0] != '-': + with open(args[0], 'rb') as f: + func(f, sys.stdout.buffer) + else: + func(sys.stdin.buffer, sys.stdout.buffer) + + +def test(): + s0 = b"Aladdin:open sesame" + print(repr(s0)) + s1 = encodebytes(s0) + print(repr(s1)) + s2 = decodebytes(s1) + print(repr(s2)) + assert s0 == s2 + + +if __name__ == '__main__': + main() diff --git a/v1/flask/lib/python3.4/bisect.py b/v1/flask/lib/python3.4/bisect.py deleted file mode 120000 index 656c489..0000000 --- a/v1/flask/lib/python3.4/bisect.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/bisect.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/bisect.py b/v1/flask/lib/python3.4/bisect.py new file mode 100644 index 0000000..4a4d052 --- /dev/null +++ b/v1/flask/lib/python3.4/bisect.py @@ -0,0 +1,92 @@ +"""Bisection algorithms.""" + +def insort_right(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + a.insert(lo, x) + +insort = insort_right # backward compatibility + +def bisect_right(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + return lo + +bisect = bisect_right # backward compatibility + +def insort_left(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + a.insert(lo, x) + + +def bisect_left(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + return lo + +# Overwrite above definitions with a fast C implementation +try: + from _bisect import * +except ImportError: + pass diff --git a/v1/flask/lib/python3.4/codecs.py b/v1/flask/lib/python3.4/codecs.py deleted file mode 120000 index 889f533..0000000 --- a/v1/flask/lib/python3.4/codecs.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/codecs.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/codecs.py b/v1/flask/lib/python3.4/codecs.py new file mode 100644 index 0000000..6cdf3f4 --- /dev/null +++ b/v1/flask/lib/python3.4/codecs.py @@ -0,0 +1,1105 @@ +""" codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +"""#" + +import builtins, sys + +### Registry and builtin stateless codec functions + +try: + from _codecs import * +except ImportError as why: + raise SystemError('Failed to load the builtin codecs: %s' % why) + +__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", + "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", + "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", + "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", + "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", + "StreamReader", "StreamWriter", + "StreamReaderWriter", "StreamRecoder", + "getencoder", "getdecoder", "getincrementalencoder", + "getincrementaldecoder", "getreader", "getwriter", + "encode", "decode", "iterencode", "iterdecode", + "strict_errors", "ignore_errors", "replace_errors", + "xmlcharrefreplace_errors", "backslashreplace_errors", + "register_error", "lookup_error"] + +### Constants + +# +# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) +# and its possible byte string values +# for UTF8/UTF16/UTF32 output and little/big endian machines +# + +# UTF-8 +BOM_UTF8 = b'\xef\xbb\xbf' + +# UTF-16, little endian +BOM_LE = BOM_UTF16_LE = b'\xff\xfe' + +# UTF-16, big endian +BOM_BE = BOM_UTF16_BE = b'\xfe\xff' + +# UTF-32, little endian +BOM_UTF32_LE = b'\xff\xfe\x00\x00' + +# UTF-32, big endian +BOM_UTF32_BE = b'\x00\x00\xfe\xff' + +if sys.byteorder == 'little': + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_LE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_LE + +else: + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_BE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_BE + +# Old broken names (don't use in new code) +BOM32_LE = BOM_UTF16_LE +BOM32_BE = BOM_UTF16_BE +BOM64_LE = BOM_UTF32_LE +BOM64_BE = BOM_UTF32_BE + + +### Codec base classes (defining the API) + +class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python 3.4 to blacklist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default + + def __new__(cls, encode, decode, streamreader=None, streamwriter=None, + incrementalencoder=None, incrementaldecoder=None, name=None, + *, _is_text_encoding=None): + self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) + self.name = name + self.encode = encode + self.decode = decode + self.incrementalencoder = incrementalencoder + self.incrementaldecoder = incrementaldecoder + self.streamwriter = streamwriter + self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding + return self + + def __repr__(self): + return "<%s.%s object for encoding %s at 0x%x>" % \ + (self.__class__.__module__, self.__class__.__name__, + self.name, id(self)) + +class Codec: + + """ Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + """ + def encode(self, input, errors='strict'): + + """ Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + + def decode(self, input, errors='strict'): + + """ Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + +class IncrementalEncoder(object): + """ + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + """ + def __init__(self, errors='strict'): + """ + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + self.buffer = "" + + def encode(self, input, final=False): + """ + Encodes input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Resets the encoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the encoder. + """ + return 0 + + def setstate(self, state): + """ + Set the current state of the encoder. state must have been + returned by getstate(). + """ + +class BufferedIncrementalEncoder(IncrementalEncoder): + """ + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + """ + def __init__(self, errors='strict'): + IncrementalEncoder.__init__(self, errors) + # unencoded input that is kept between calls to encode() + self.buffer = "" + + def _buffer_encode(self, input, errors, final): + # Overwrite this method in subclasses: It must encode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def encode(self, input, final=False): + # encode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_encode(data, self.errors, final) + # keep unencoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalEncoder.reset(self) + self.buffer = "" + + def getstate(self): + return self.buffer or 0 + + def setstate(self, state): + self.buffer = state or "" + +class IncrementalDecoder(object): + """ + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + """ + def __init__(self, errors='strict'): + """ + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + + def decode(self, input, final=False): + """ + Decode input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the decoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + """ + return (b"", 0) + + def setstate(self, state): + """ + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + """ + +class BufferedIncrementalDecoder(IncrementalDecoder): + """ + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + """ + def __init__(self, errors='strict'): + IncrementalDecoder.__init__(self, errors) + # undecoded input that is kept between calls to decode() + self.buffer = b"" + + def _buffer_decode(self, input, errors, final): + # Overwrite this method in subclasses: It must decode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def decode(self, input, final=False): + # decode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_decode(data, self.errors, final) + # keep undecoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalDecoder.reset(self) + self.buffer = b"" + + def getstate(self): + # additional state info is always 0 + return (self.buffer, 0) + + def setstate(self, state): + # ignore additional state info + self.buffer = state[0] + +# +# The StreamWriter and StreamReader class provide generic working +# interfaces which can be used to implement new encoding submodules +# very easily. See encodings/utf_8.py for an example on how this is +# done. +# + +class StreamWriter(Codec): + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences (only for encoding). + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + + def write(self, object): + + """ Writes the object's contents encoded to self.stream. + """ + data, consumed = self.encode(object, self.errors) + self.stream.write(data) + + def writelines(self, list): + + """ Writes the concatenated list of strings to the stream + using .write(). + """ + self.write(''.join(list)) + + def reset(self): + + """ Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + """ + pass + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + if whence == 0 and offset == 0: + self.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReader(Codec): + + charbuffertype = str + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character; + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + self.bytebuffer = b"" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def decode(self, input, errors='strict'): + raise NotImplementedError + + def read(self, size=-1, chars=-1, firstline=False): + + """ Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + """ + # If we have lines cached, first merge them back into characters + if self.linebuffer: + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) + self.linebuffer = None + + # read until we get the required number of characters (if available) + while True: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: + break + elif size >= 0: + if len(self.charbuffer) >= size: + break + # we need more data + if size < 0: + newdata = self.stream.read() + else: + newdata = self.stream.read(size) + # decode bytes (those remaining from the last call included) + data = self.bytebuffer + newdata + if not data: + break + try: + newchars, decodedbytes = self.decode(data, self.errors) + except UnicodeDecodeError as exc: + if firstline: + newchars, decodedbytes = \ + self.decode(data[:exc.start], self.errors) + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: + raise + else: + raise + # keep undecoded bytes until the next call + self.bytebuffer = data[decodedbytes:] + # put new characters in the character buffer + self.charbuffer += newchars + # there was no data available + if not newdata: + break + if chars < 0: + # Return everything we've got + result = self.charbuffer + self.charbuffer = self._empty_charbuffer + else: + # Return the first chars characters + result = self.charbuffer[:chars] + self.charbuffer = self.charbuffer[chars:] + return result + + def readline(self, size=None, keepends=True): + + """ Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + """ + # If we have lines cached from an earlier read, return + # them unconditionally + if self.linebuffer: + line = self.linebuffer[0] + del self.linebuffer[0] + if len(self.linebuffer) == 1: + # revert to charbuffer mode; we might need more data + # next time + self.charbuffer = self.linebuffer[0] + self.linebuffer = None + if not keepends: + line = line.splitlines(keepends=False)[0] + return line + + readsize = size or 72 + line = self._empty_charbuffer + # If size is given, we call read() only once + while True: + data = self.read(readsize, firstline=True) + if data: + # If we're at a "\r" read one extra character (which might + # be a "\n") to get a proper line ending. If the stream is + # temporarily exhausted we return the wrong line ending. + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): + data += self.read(size=1, chars=1) + + line += data + lines = line.splitlines(keepends=True) + if lines: + if len(lines) > 1: + # More than one line result; the first line is a full line + # to return + line = lines[0] + del lines[0] + if len(lines) > 1: + # cache the remaining lines + lines[-1] += self.charbuffer + self.linebuffer = lines + self.charbuffer = None + else: + # only one remaining line, put it back into charbuffer + self.charbuffer = lines[0] + self.charbuffer + if not keepends: + line = line.splitlines(keepends=False)[0] + break + line0withend = lines[0] + line0withoutend = lines[0].splitlines(keepends=False)[0] + if line0withend != line0withoutend: # We really have a line end + # Put the rest back together and keep it until the next call + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer + if keepends: + line = line0withend + else: + line = line0withoutend + break + # we didn't get anything or this was our only try + if not data or size is not None: + if line and not keepends: + line = line.splitlines(keepends=False)[0] + break + if readsize < 8000: + readsize *= 2 + return line + + def readlines(self, sizehint=None, keepends=True): + + """ Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + """ + data = self.read() + return data.splitlines(keepends) + + def reset(self): + + """ Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + """ + self.bytebuffer = b"" + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def seek(self, offset, whence=0): + """ Set the input stream's current position. + + Resets the codec buffers used for keeping state. + """ + self.stream.seek(offset, whence) + self.reset() + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + line = self.readline() + if line: + return line + raise StopIteration + + def __iter__(self): + return self + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReaderWriter: + + """ StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + """ + # Optional attributes set by the file wrappers below + encoding = 'unknown' + + def __init__(self, stream, Reader, Writer, errors='strict'): + + """ Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + return self.reader.read(size) + + def readline(self, size=None): + + return self.reader.readline(size) + + def readlines(self, sizehint=None): + + return self.reader.readlines(sizehint) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + return next(self.reader) + + def __iter__(self): + return self + + def write(self, data): + + return self.writer.write(data) + + def writelines(self, list): + + return self.writer.writelines(list) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + self.reader.reset() + if whence == 0 and offset == 0: + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + # these are needed to make "with codecs.open(...)" work properly + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamRecoder: + + """ StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + """ + # Optional attributes set by the file wrappers below + data_encoding = 'unknown' + file_encoding = 'unknown' + + def __init__(self, stream, encode, decode, Reader, Writer, + errors='strict'): + + """ Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.encode = encode + self.decode = decode + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + data = self.reader.read(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readline(self, size=None): + + if size is None: + data = self.reader.readline() + else: + data = self.reader.readline(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readlines(self, sizehint=None): + + data = self.reader.read() + data, bytesencoded = self.encode(data, self.errors) + return data.splitlines(keepends=True) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + data = next(self.reader) + data, bytesencoded = self.encode(data, self.errors) + return data + + def __iter__(self): + return self + + def write(self, data): + + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def writelines(self, list): + + data = ''.join(list) + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### Shortcuts + +def open(filename, mode='r', encoding=None, errors='strict', buffering=1): + + """ Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to line buffered. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + """ + if encoding is not None and \ + 'b' not in mode: + # Force opening of the file in binary mode + mode = mode + 'b' + file = builtins.open(filename, mode, buffering) + if encoding is None: + return file + info = lookup(encoding) + srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) + # Add attributes to simplify introspection + srw.encoding = encoding + return srw + +def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): + + """ Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + """ + if file_encoding is None: + file_encoding = data_encoding + data_info = lookup(data_encoding) + file_info = lookup(file_encoding) + sr = StreamRecoder(file, data_info.encode, data_info.decode, + file_info.streamreader, file_info.streamwriter, errors) + # Add attributes to simplify introspection + sr.data_encoding = data_encoding + sr.file_encoding = file_encoding + return sr + +### Helpers for codec lookup + +def getencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).encode + +def getdecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).decode + +def getincrementalencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + """ + encoder = lookup(encoding).incrementalencoder + if encoder is None: + raise LookupError(encoding) + return encoder + +def getincrementaldecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + """ + decoder = lookup(encoding).incrementaldecoder + if decoder is None: + raise LookupError(encoding) + return decoder + +def getreader(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamreader + +def getwriter(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamwriter + +def iterencode(iterator, encoding, errors='strict', **kwargs): + """ + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + """ + encoder = getincrementalencoder(encoding)(errors, **kwargs) + for input in iterator: + output = encoder.encode(input) + if output: + yield output + output = encoder.encode("", True) + if output: + yield output + +def iterdecode(iterator, encoding, errors='strict', **kwargs): + """ + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + """ + decoder = getincrementaldecoder(encoding)(errors, **kwargs) + for input in iterator: + output = decoder.decode(input) + if output: + yield output + output = decoder.decode(b"", True) + if output: + yield output + +### Helpers for charmap-based codecs + +def make_identity_dict(rng): + + """ make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + """ + return {i:i for i in rng} + +def make_encoding_map(decoding_map): + + """ Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \\u001a. + + """ + m = {} + for k,v in decoding_map.items(): + if not v in m: + m[v] = k + else: + m[v] = None + return m + +### error handlers + +try: + strict_errors = lookup_error("strict") + ignore_errors = lookup_error("ignore") + replace_errors = lookup_error("replace") + xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") + backslashreplace_errors = lookup_error("backslashreplace") +except LookupError: + # In --disable-unicode builds, these error handler are missing + strict_errors = None + ignore_errors = None + replace_errors = None + xmlcharrefreplace_errors = None + backslashreplace_errors = None + +# Tell modulefinder that using codecs probably needs the encodings +# package +_false = 0 +if _false: + import encodings + +### Tests + +if __name__ == '__main__': + + # Make stdout translate Latin-1 output into UTF-8 output + sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') + + # Have stdin translate Latin-1 input into UTF-8 input + sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/v1/flask/lib/python3.4/copy.py b/v1/flask/lib/python3.4/copy.py deleted file mode 120000 index 991451b..0000000 --- a/v1/flask/lib/python3.4/copy.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/copy.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/copy.py b/v1/flask/lib/python3.4/copy.py new file mode 100644 index 0000000..f0fb443 --- /dev/null +++ b/v1/flask/lib/python3.4/copy.py @@ -0,0 +1,333 @@ +"""Generic (shallow and deep) copying operations. + +Interface summary: + + import copy + + x = copy.copy(y) # make a shallow copy of y + x = copy.deepcopy(y) # make a deep copy of y + +For module specific errors, copy.Error is raised. + +The difference between shallow and deep copying is only relevant for +compound objects (objects that contain other objects, like lists or +class instances). + +- A shallow copy constructs a new compound object and then (to the + extent possible) inserts *the same objects* into it that the + original contains. + +- A deep copy constructs a new compound object and then, recursively, + inserts *copies* into it of the objects found in the original. + +Two problems often exist with deep copy operations that don't exist +with shallow copy operations: + + a) recursive objects (compound objects that, directly or indirectly, + contain a reference to themselves) may cause a recursive loop + + b) because deep copy copies *everything* it may copy too much, e.g. + administrative data structures that should be shared even between + copies + +Python's deep copy operation avoids these problems by: + + a) keeping a table of objects already copied during the current + copying pass + + b) letting user-defined classes override the copying operation or the + set of components copied + +This version does not copy types like module, class, function, method, +nor stack trace, stack frame, nor file, socket, window, nor array, nor +any similar types. + +Classes can use the same interfaces to control copying that they use +to control pickling: they can define methods called __getinitargs__(), +__getstate__() and __setstate__(). See the documentation for module +"pickle" for information on these methods. +""" + +import types +import weakref +from copyreg import dispatch_table +import builtins + +class Error(Exception): + pass +error = Error # backward compatibility + +try: + from org.python.core import PyStringMap +except ImportError: + PyStringMap = None + +__all__ = ["Error", "copy", "deepcopy"] + +def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + try: + issc = issubclass(cls, type) + except TypeError: # cls is not a class + issc = False + if issc: + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor: + rv = reductor(2) + else: + reductor = getattr(x, "__reduce__", None) + if reductor: + rv = reductor() + else: + raise Error("un(shallow)copyable object of type %s" % cls) + + return _reconstruct(x, rv, 0) + + +_copy_dispatch = d = {} + +def _copy_immutable(x): + return x +for t in (type(None), int, float, bool, str, tuple, + bytes, frozenset, type, range, + types.BuiltinFunctionType, type(Ellipsis), + types.FunctionType, weakref.ref): + d[t] = _copy_immutable +t = getattr(types, "CodeType", None) +if t is not None: + d[t] = _copy_immutable +for name in ("complex", "unicode"): + t = getattr(builtins, name, None) + if t is not None: + d[t] = _copy_immutable + +def _copy_with_constructor(x): + return type(x)(x) +for t in (list, dict, set): + d[t] = _copy_with_constructor + +def _copy_with_copy_method(x): + return x.copy() +if PyStringMap is not None: + d[PyStringMap] = _copy_with_copy_method + +del d + +def deepcopy(x, memo=None, _nil=[]): + """Deep copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + if memo is None: + memo = {} + + d = id(x) + y = memo.get(d, _nil) + if y is not _nil: + return y + + cls = type(x) + + copier = _deepcopy_dispatch.get(cls) + if copier: + y = copier(x, memo) + else: + try: + issc = issubclass(cls, type) + except TypeError: # cls is not a class (old Boost; see SF #502085) + issc = 0 + if issc: + y = _deepcopy_atomic(x, memo) + else: + copier = getattr(x, "__deepcopy__", None) + if copier: + y = copier(memo) + else: + reductor = dispatch_table.get(cls) + if reductor: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor: + rv = reductor(2) + else: + reductor = getattr(x, "__reduce__", None) + if reductor: + rv = reductor() + else: + raise Error( + "un(deep)copyable object of type %s" % cls) + y = _reconstruct(x, rv, 1, memo) + + # If is its own copy, don't memoize. + if y is not x: + memo[d] = y + _keep_alive(x, memo) # Make sure x lives at least as long as d + return y + +_deepcopy_dispatch = d = {} + +def _deepcopy_atomic(x, memo): + return x +d[type(None)] = _deepcopy_atomic +d[type(Ellipsis)] = _deepcopy_atomic +d[int] = _deepcopy_atomic +d[float] = _deepcopy_atomic +d[bool] = _deepcopy_atomic +try: + d[complex] = _deepcopy_atomic +except NameError: + pass +d[bytes] = _deepcopy_atomic +d[str] = _deepcopy_atomic +try: + d[types.CodeType] = _deepcopy_atomic +except AttributeError: + pass +d[type] = _deepcopy_atomic +d[range] = _deepcopy_atomic +d[types.BuiltinFunctionType] = _deepcopy_atomic +d[types.FunctionType] = _deepcopy_atomic +d[weakref.ref] = _deepcopy_atomic + +def _deepcopy_list(x, memo): + y = [] + memo[id(x)] = y + for a in x: + y.append(deepcopy(a, memo)) + return y +d[list] = _deepcopy_list + +def _deepcopy_tuple(x, memo): + y = [] + for a in x: + y.append(deepcopy(a, memo)) + # We're not going to put the tuple in the memo, but it's still important we + # check for it, in case the tuple contains recursive mutable structures. + try: + return memo[id(x)] + except KeyError: + pass + for i in range(len(x)): + if x[i] is not y[i]: + y = tuple(y) + break + else: + y = x + return y +d[tuple] = _deepcopy_tuple + +def _deepcopy_dict(x, memo): + y = {} + memo[id(x)] = y + for key, value in x.items(): + y[deepcopy(key, memo)] = deepcopy(value, memo) + return y +d[dict] = _deepcopy_dict +if PyStringMap is not None: + d[PyStringMap] = _deepcopy_dict + +def _deepcopy_method(x, memo): # Copy instance methods + return type(x)(x.__func__, deepcopy(x.__self__, memo)) +_deepcopy_dispatch[types.MethodType] = _deepcopy_method + +def _keep_alive(x, memo): + """Keeps a reference to the object x in the memo. + + Because we remember objects by their id, we have + to assure that possibly temporary objects are kept + alive by referencing them. + We store a reference at the id of the memo, which should + normally not be used unless someone tries to deepcopy + the memo itself... + """ + try: + memo[id(memo)].append(x) + except KeyError: + # aha, this is the first one :-) + memo[id(memo)]=[x] + +def _reconstruct(x, info, deep, memo=None): + if isinstance(info, str): + return x + assert isinstance(info, tuple) + if memo is None: + memo = {} + n = len(info) + assert n in (2, 3, 4, 5) + callable, args = info[:2] + if n > 2: + state = info[2] + else: + state = None + if n > 3: + listiter = info[3] + else: + listiter = None + if n > 4: + dictiter = info[4] + else: + dictiter = None + if deep: + args = deepcopy(args, memo) + y = callable(*args) + memo[id(x)] = y + + if state is not None: + if deep: + state = deepcopy(state, memo) + if hasattr(y, '__setstate__'): + y.__setstate__(state) + else: + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + else: + slotstate = None + if state is not None: + y.__dict__.update(state) + if slotstate is not None: + for key, value in slotstate.items(): + setattr(y, key, value) + + if listiter is not None: + for item in listiter: + if deep: + item = deepcopy(item, memo) + y.append(item) + if dictiter is not None: + for key, value in dictiter: + if deep: + key = deepcopy(key, memo) + value = deepcopy(value, memo) + y[key] = value + return y + +del d + +del types + +# Helper for instance creation without calling __init__ +class _EmptyClass: + pass diff --git a/v1/flask/lib/python3.4/copyreg.py b/v1/flask/lib/python3.4/copyreg.py deleted file mode 120000 index dbeda4c..0000000 --- a/v1/flask/lib/python3.4/copyreg.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/copyreg.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/copyreg.py b/v1/flask/lib/python3.4/copyreg.py new file mode 100644 index 0000000..67f5bb0 --- /dev/null +++ b/v1/flask/lib/python3.4/copyreg.py @@ -0,0 +1,202 @@ +"""Helper to provide extensibility for pickle. + +This is only useful to add pickle support for extension types defined in +C, not for instances of user-defined classes. +""" + +__all__ = ["pickle", "constructor", + "add_extension", "remove_extension", "clear_extension_cache"] + +dispatch_table = {} + +def pickle(ob_type, pickle_function, constructor_ob=None): + if not callable(pickle_function): + raise TypeError("reduction functions must be callable") + dispatch_table[ob_type] = pickle_function + + # The constructor_ob function is a vestige of safe for unpickling. + # There is no reason for the caller to pass it anymore. + if constructor_ob is not None: + constructor(constructor_ob) + +def constructor(object): + if not callable(object): + raise TypeError("constructors must be callable") + +# Example: provide pickling support for complex numbers. + +try: + complex +except NameError: + pass +else: + + def pickle_complex(c): + return complex, (c.real, c.imag) + + pickle(complex, pickle_complex, complex) + +# Support for pickling new-style objects + +def _reconstructor(cls, base, state): + if base is object: + obj = object.__new__(cls) + else: + obj = base.__new__(cls, state) + if base.__init__ != object.__init__: + base.__init__(obj, state) + return obj + +_HEAPTYPE = 1<<9 + +# Python code for object.__reduce_ex__ for protocols 0 and 1 + +def _reduce_ex(self, proto): + assert proto < 2 + for base in self.__class__.__mro__: + if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: + break + else: + base = object # not really reachable + if base is object: + state = None + else: + if base is self.__class__: + raise TypeError("can't pickle %s objects" % base.__name__) + state = base(self) + args = (self.__class__, base, state) + try: + getstate = self.__getstate__ + except AttributeError: + if getattr(self, "__slots__", None): + raise TypeError("a class that defines __slots__ without " + "defining __getstate__ cannot be pickled") + try: + dict = self.__dict__ + except AttributeError: + dict = None + else: + dict = getstate() + if dict: + return _reconstructor, args, dict + else: + return _reconstructor, args + +# Helper for __reduce_ex__ protocol 2 + +def __newobj__(cls, *args): + return cls.__new__(cls, *args) + +def __newobj_ex__(cls, args, kwargs): + """Used by pickle protocol 4, instead of __newobj__ to allow classes with + keyword-only arguments to be pickled correctly. + """ + return cls.__new__(cls, *args, **kwargs) + +def _slotnames(cls): + """Return a list of slot names for a given class. + + This needs to find slots defined by the class and its bases, so we + can't simply return the __slots__ attribute. We must walk down + the Method Resolution Order and concatenate the __slots__ of each + class found there. (This assumes classes don't modify their + __slots__ attribute to misrepresent their slots after the class is + defined.) + """ + + # Get the value from a cache in the class if possible + names = cls.__dict__.get("__slotnames__") + if names is not None: + return names + + # Not cached -- calculate the value + names = [] + if not hasattr(cls, "__slots__"): + # This class has no slots + pass + else: + # Slots found -- gather slot names from all base classes + for c in cls.__mro__: + if "__slots__" in c.__dict__: + slots = c.__dict__['__slots__'] + # if class has a single slot, it can be given as a string + if isinstance(slots, str): + slots = (slots,) + for name in slots: + # special descriptors + if name in ("__dict__", "__weakref__"): + continue + # mangled names + elif name.startswith('__') and not name.endswith('__'): + names.append('_%s%s' % (c.__name__, name)) + else: + names.append(name) + + # Cache the outcome in the class if at all possible + try: + cls.__slotnames__ = names + except: + pass # But don't die if we can't + + return names + +# A registry of extension codes. This is an ad-hoc compression +# mechanism. Whenever a global reference to , is about +# to be pickled, the (, ) tuple is looked up here to see +# if it is a registered extension code for it. Extension codes are +# universal, so that the meaning of a pickle does not depend on +# context. (There are also some codes reserved for local use that +# don't have this restriction.) Codes are positive ints; 0 is +# reserved. + +_extension_registry = {} # key -> code +_inverted_registry = {} # code -> key +_extension_cache = {} # code -> object +# Don't ever rebind those names: pickling grabs a reference to them when +# it's initialized, and won't see a rebinding. + +def add_extension(module, name, code): + """Register an extension code.""" + code = int(code) + if not 1 <= code <= 0x7fffffff: + raise ValueError("code out of range") + key = (module, name) + if (_extension_registry.get(key) == code and + _inverted_registry.get(code) == key): + return # Redundant registrations are benign + if key in _extension_registry: + raise ValueError("key %s is already registered with code %s" % + (key, _extension_registry[key])) + if code in _inverted_registry: + raise ValueError("code %s is already in use for key %s" % + (code, _inverted_registry[code])) + _extension_registry[key] = code + _inverted_registry[code] = key + +def remove_extension(module, name, code): + """Unregister an extension code. For testing only.""" + key = (module, name) + if (_extension_registry.get(key) != code or + _inverted_registry.get(code) != key): + raise ValueError("key %s is not registered with code %s" % + (key, code)) + del _extension_registry[key] + del _inverted_registry[code] + if code in _extension_cache: + del _extension_cache[code] + +def clear_extension_cache(): + _extension_cache.clear() + +# Standard extension code assignments + +# Reserved ranges + +# First Last Count Purpose +# 1 127 127 Reserved for Python standard library +# 128 191 64 Reserved for Zope +# 192 239 48 Reserved for 3rd parties +# 240 255 16 Reserved for private use (will never be assigned) +# 256 Inf Inf Reserved for future assignment + +# Extension codes are assigned by the Python Software Foundation. diff --git a/v1/flask/lib/python3.4/fnmatch.py b/v1/flask/lib/python3.4/fnmatch.py deleted file mode 120000 index 3e55c05..0000000 --- a/v1/flask/lib/python3.4/fnmatch.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/fnmatch.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/fnmatch.py b/v1/flask/lib/python3.4/fnmatch.py new file mode 100644 index 0000000..6330b0c --- /dev/null +++ b/v1/flask/lib/python3.4/fnmatch.py @@ -0,0 +1,109 @@ +"""Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +""" +import os +import posixpath +import re +import functools + +__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"] + +def fnmatch(name, pat): + """Test whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + """ + name = os.path.normcase(name) + pat = os.path.normcase(pat) + return fnmatchcase(name, pat) + +@functools.lru_cache(maxsize=256, typed=True) +def _compile_pattern(pat): + if isinstance(pat, bytes): + pat_str = str(pat, 'ISO-8859-1') + res_str = translate(pat_str) + res = bytes(res_str, 'ISO-8859-1') + else: + res = translate(pat) + return re.compile(res).match + +def filter(names, pat): + """Return the subset of the list NAMES that match PAT.""" + result = [] + pat = os.path.normcase(pat) + match = _compile_pattern(pat) + if os.path is posixpath: + # normcase on posix is NOP. Optimize it away from the loop. + for name in names: + if match(name): + result.append(name) + else: + for name in names: + if match(os.path.normcase(name)): + result.append(name) + return result + +def fnmatchcase(name, pat): + """Test whether FILENAME matches PATTERN, including case. + + This is a version of fnmatch() which doesn't case-normalize + its arguments. + """ + match = _compile_pattern(pat) + return match(name) is not None + + +def translate(pat): + """Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + """ + + i, n = 0, len(pat) + res = '' + while i < n: + c = pat[i] + i = i+1 + if c == '*': + res = res + '.*' + elif c == '?': + res = res + '.' + elif c == '[': + j = i + if j < n and pat[j] == '!': + j = j+1 + if j < n and pat[j] == ']': + j = j+1 + while j < n and pat[j] != ']': + j = j+1 + if j >= n: + res = res + '\\[' + else: + stuff = pat[i:j].replace('\\','\\\\') + i = j+1 + if stuff[0] == '!': + stuff = '^' + stuff[1:] + elif stuff[0] == '^': + stuff = '\\' + stuff + res = '%s[%s]' % (res, stuff) + else: + res = res + re.escape(c) + return res + '\Z(?ms)' diff --git a/v1/flask/lib/python3.4/functools.py b/v1/flask/lib/python3.4/functools.py deleted file mode 120000 index fbbc0da..0000000 --- a/v1/flask/lib/python3.4/functools.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/functools.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/functools.py b/v1/flask/lib/python3.4/functools.py new file mode 100644 index 0000000..2c299d7 --- /dev/null +++ b/v1/flask/lib/python3.4/functools.py @@ -0,0 +1,735 @@ +"""functools.py - Tools for working with functions and callable objects +""" +# Python module wrapper for _functools C module +# to allow utilities written in Python to be added +# to the functools module. +# Written by Nick Coghlan , +# Raymond Hettinger , +# and Łukasz Langa . +# Copyright (C) 2006-2013 Python Software Foundation. +# See C source code for _functools credits/copyright + +__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', + 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial', + 'partialmethod', 'singledispatch'] + +try: + from _functools import reduce +except ImportError: + pass +from abc import get_cache_token +from collections import namedtuple +from types import MappingProxyType +from weakref import WeakKeyDictionary +try: + from _thread import RLock +except: + class RLock: + 'Dummy reentrant lock for builds without threads' + def __enter__(self): pass + def __exit__(self, exctype, excinst, exctb): pass + + +################################################################################ +### update_wrapper() and wraps() decorator +################################################################################ + +# update_wrapper() and wraps() are tools to help write +# wrapper functions that can handle naive introspection + +WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', + '__annotations__') +WRAPPER_UPDATES = ('__dict__',) +def update_wrapper(wrapper, + wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + pass + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Issue #17482: set __wrapped__ last so we don't inadvertently copy it + # from the wrapped function when updating __dict__ + wrapper.__wrapped__ = wrapped + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + +def wraps(wrapped, + assigned = WRAPPER_ASSIGNMENTS, + updated = WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return partial(update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + + +################################################################################ +### total_ordering class decorator +################################################################################ + +# The total ordering functions all invoke the root magic method directly +# rather than using the corresponding operator. This avoids possible +# infinite recursion that could occur when the operator dispatch logic +# detects a NotImplemented result and then calls a reflected method. + +def _gt_from_lt(self, other): + 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' + op_result = self.__lt__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result and self != other + +def _le_from_lt(self, other): + 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' + op_result = self.__lt__(other) + return op_result or self == other + +def _ge_from_lt(self, other): + 'Return a >= b. Computed by @total_ordering from (not a < b).' + op_result = self.__lt__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result + +def _ge_from_le(self, other): + 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' + op_result = self.__le__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result or self == other + +def _lt_from_le(self, other): + 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' + op_result = self.__le__(other) + if op_result is NotImplemented: + return NotImplemented + return op_result and self != other + +def _gt_from_le(self, other): + 'Return a > b. Computed by @total_ordering from (not a <= b).' + op_result = self.__le__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result + +def _lt_from_gt(self, other): + 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' + op_result = self.__gt__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result and self != other + +def _ge_from_gt(self, other): + 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' + op_result = self.__gt__(other) + return op_result or self == other + +def _le_from_gt(self, other): + 'Return a <= b. Computed by @total_ordering from (not a > b).' + op_result = self.__gt__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result + +def _le_from_ge(self, other): + 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' + op_result = self.__ge__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result or self == other + +def _gt_from_ge(self, other): + 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' + op_result = self.__ge__(other) + if op_result is NotImplemented: + return NotImplemented + return op_result and self != other + +def _lt_from_ge(self, other): + 'Return a < b. Computed by @total_ordering from (not a >= b).' + op_result = self.__ge__(other) + if op_result is NotImplemented: + return NotImplemented + return not op_result + +def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + convert = { + '__lt__': [('__gt__', _gt_from_lt), + ('__le__', _le_from_lt), + ('__ge__', _ge_from_lt)], + '__le__': [('__ge__', _ge_from_le), + ('__lt__', _lt_from_le), + ('__gt__', _gt_from_le)], + '__gt__': [('__lt__', _lt_from_gt), + ('__ge__', _ge_from_gt), + ('__le__', _le_from_gt)], + '__ge__': [('__le__', _le_from_ge), + ('__gt__', _gt_from_ge), + ('__lt__', _lt_from_ge)] + } + # Find user-defined comparisons (not those inherited from object). + roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)] + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in convert[root]: + if opname not in roots: + opfunc.__name__ = opname + setattr(cls, opname, opfunc) + return cls + + +################################################################################ +### cmp_to_key() function converter +################################################################################ + +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + def __ne__(self, other): + return mycmp(self.obj, other.obj) != 0 + __hash__ = None + return K + +try: + from _functools import cmp_to_key +except ImportError: + pass + + +################################################################################ +### partial() argument application +################################################################################ + +# Purely functional, no descriptor behaviour +def partial(func, *args, **keywords): + """New function with partial application of the given arguments + and keywords. + """ + def newfunc(*fargs, **fkeywords): + newkeywords = keywords.copy() + newkeywords.update(fkeywords) + return func(*(args + fargs), **newkeywords) + newfunc.func = func + newfunc.args = args + newfunc.keywords = keywords + return newfunc + +try: + from _functools import partial +except ImportError: + pass + +# Descriptor version +class partialmethod(object): + """Method descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + """ + + def __init__(self, func, *args, **keywords): + if not callable(func) and not hasattr(func, "__get__"): + raise TypeError("{!r} is not callable or a descriptor" + .format(func)) + + # func could be a descriptor like classmethod which isn't callable, + # so we can't inherit from partial (it verifies func is callable) + if isinstance(func, partialmethod): + # flattening is mandatory in order to place cls/self before all + # other arguments + # it's also more efficient since only one function will be called + self.func = func.func + self.args = func.args + args + self.keywords = func.keywords.copy() + self.keywords.update(keywords) + else: + self.func = func + self.args = args + self.keywords = keywords + + def __repr__(self): + args = ", ".join(map(repr, self.args)) + keywords = ", ".join("{}={!r}".format(k, v) + for k, v in self.keywords.items()) + format_string = "{module}.{cls}({func}, {args}, {keywords})" + return format_string.format(module=self.__class__.__module__, + cls=self.__class__.__name__, + func=self.func, + args=args, + keywords=keywords) + + def _make_unbound_method(self): + def _method(*args, **keywords): + call_keywords = self.keywords.copy() + call_keywords.update(keywords) + cls_or_self, *rest = args + call_args = (cls_or_self,) + self.args + tuple(rest) + return self.func(*call_args, **call_keywords) + _method.__isabstractmethod__ = self.__isabstractmethod__ + _method._partialmethod = self + return _method + + def __get__(self, obj, cls): + get = getattr(self.func, "__get__", None) + result = None + if get is not None: + new_func = get(obj, cls) + if new_func is not self.func: + # Assume __get__ returning something new indicates the + # creation of an appropriate callable + result = partial(new_func, *self.args, **self.keywords) + try: + result.__self__ = new_func.__self__ + except AttributeError: + pass + if result is None: + # If the underlying descriptor didn't do anything, treat this + # like an instance method + result = self._make_unbound_method().__get__(obj, cls) + return result + + @property + def __isabstractmethod__(self): + return getattr(self.func, "__isabstractmethod__", False) + + +################################################################################ +### LRU Cache function decorator +################################################################################ + +_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + +class _HashedSeq(list): + """ This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + """ + + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + +def _make_key(args, kwds, typed, + kwd_mark = (object(),), + fasttypes = {int, str, frozenset, type(None)}, + sorted=sorted, tuple=tuple, type=type, len=len): + """Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + """ + key = args + if kwds: + sorted_items = sorted(kwds.items()) + key += kwd_mark + for item in sorted_items: + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for k, v in sorted_items) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + +def lru_cache(maxsize=128, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + # Early detection of an erroneous call to @lru_cache without any arguments + # resulting in the inner function being passed to maxsize instead of an + # integer or None. + if maxsize is not None and not isinstance(maxsize, int): + raise TypeError('Expected maxsize to be an integer or None') + + # Constants shared by all lru cache instances: + sentinel = object() # unique object used to signal cache misses + make_key = _make_key # build a key from the function arguments + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + def decorating_function(user_function): + cache = {} + hits = misses = 0 + full = False + cache_get = cache.get # bound method to lookup a key or return None + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + + if maxsize == 0: + + def wrapper(*args, **kwds): + # No caching -- just a statistics update after a successful call + nonlocal misses + result = user_function(*args, **kwds) + misses += 1 + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # Simple caching without ordering or size limit + nonlocal hits, misses + key = make_key(args, kwds, typed) + result = cache_get(key, sentinel) + if result is not sentinel: + hits += 1 + return result + result = user_function(*args, **kwds) + cache[key] = result + misses += 1 + return result + + else: + + def wrapper(*args, **kwds): + # Size limited caching that tracks accesses by recency + nonlocal root, hits, misses, full + key = make_key(args, kwds, typed) + with lock: + link = cache_get(key) + if link is not None: + # Move the link to the front of the circular queue + link_prev, link_next, _key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + hits += 1 + return result + result = user_function(*args, **kwds) + with lock: + if key in cache: + # Getting here means that this same key was added to the + # cache while the lock was released. Since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif full: + # Use the old root to store the new key and result. + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # Empty the oldest link and make it the new root. + # Keep a reference to the old key and old result to + # prevent their ref counts from going to zero during the + # update. That will prevent potentially arbitrary object + # clean-up code (i.e. __del__) from running while we're + # still adjusting the links. + root = oldroot[NEXT] + oldkey = root[KEY] + oldresult = root[RESULT] + root[KEY] = root[RESULT] = None + # Now update the cache dictionary. + del cache[oldkey] + # Save the potentially reentrant cache[key] assignment + # for last, after the root and links have been put in + # a consistent state. + cache[key] = oldroot + else: + # Put result in a new link at the front of the queue. + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + full = (len(cache) >= maxsize) + misses += 1 + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits, misses, maxsize, len(cache)) + + def cache_clear(): + """Clear the cache and cache statistics""" + nonlocal hits, misses, full + with lock: + cache.clear() + root[:] = [root, root, None, None] + hits = misses = 0 + full = False + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return update_wrapper(wrapper, user_function) + + return decorating_function + + +################################################################################ +### singledispatch() - single-dispatch generic function decorator +################################################################################ + +def _c3_merge(sequences): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if candidate is None: + raise RuntimeError("Inconsistent hierarchy") + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + +def _c3_mro(cls, abcs=None): + """Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + """ + for i, base in enumerate(reversed(cls.__bases__)): + if hasattr(base, '__abstractmethods__'): + boundary = len(cls.__bases__) - i + break # Bases up to the last explicit ABC are considered first. + else: + boundary = 0 + abcs = list(abcs) if abcs else [] + explicit_bases = list(cls.__bases__[:boundary]) + abstract_bases = [] + other_bases = list(cls.__bases__[boundary:]) + for base in abcs: + if issubclass(cls, base) and not any( + issubclass(b, base) for b in cls.__bases__ + ): + # If *cls* is the class that introduces behaviour described by + # an ABC *base*, insert said ABC to its MRO. + abstract_bases.append(base) + for base in abstract_bases: + abcs.remove(base) + explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] + abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] + other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] + return _c3_merge( + [[cls]] + + explicit_c3_mros + abstract_c3_mros + other_c3_mros + + [explicit_bases] + [abstract_bases] + [other_bases] + ) + +def _compose_mro(cls, types): + """Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + """ + bases = set(cls.__mro__) + # Remove entries which are already present in the __mro__ or unrelated. + def is_related(typ): + return (typ not in bases and hasattr(typ, '__mro__') + and issubclass(cls, typ)) + types = [n for n in types if is_related(n)] + # Remove entries which are strict bases of other entries (they will end up + # in the MRO anyway. + def is_strict_base(typ): + for other in types: + if typ != other and typ in other.__mro__: + return True + return False + types = [n for n in types if not is_strict_base(n)] + # Subclasses of the ABCs in *types* which are also implemented by + # *cls* can be used to stabilize ABC ordering. + type_set = set(types) + mro = [] + for typ in types: + found = [] + for sub in typ.__subclasses__(): + if sub not in bases and issubclass(cls, sub): + found.append([s for s in sub.__mro__ if s in type_set]) + if not found: + mro.append(typ) + continue + # Favor subclasses with the biggest number of useful bases + found.sort(key=len, reverse=True) + for sub in found: + for subcls in sub: + if subcls not in mro: + mro.append(subcls) + return _c3_mro(cls, abcs=mro) + +def _find_impl(cls, registry): + """Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + """ + mro = _compose_mro(cls, registry.keys()) + match = None + for t in mro: + if match is not None: + # If *match* is an implicit ABC but there is another unrelated, + # equally matching implicit ABC, refuse the temptation to guess. + if (t in registry and t not in cls.__mro__ + and match not in cls.__mro__ + and not issubclass(match, t)): + raise RuntimeError("Ambiguous dispatch: {} or {}".format( + match, t)) + break + if t in registry: + match = t + return registry.get(match) + +def singledispatch(func): + """Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + + """ + registry = {} + dispatch_cache = WeakKeyDictionary() + cache_token = None + + def dispatch(cls): + """generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + """ + nonlocal cache_token + if cache_token is not None: + current_token = get_cache_token() + if cache_token != current_token: + dispatch_cache.clear() + cache_token = current_token + try: + impl = dispatch_cache[cls] + except KeyError: + try: + impl = registry[cls] + except KeyError: + impl = _find_impl(cls, registry) + dispatch_cache[cls] = impl + return impl + + def register(cls, func=None): + """generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + """ + nonlocal cache_token + if func is None: + return lambda f: register(cls, f) + registry[cls] = func + if cache_token is None and hasattr(cls, '__abstractmethods__'): + cache_token = get_cache_token() + dispatch_cache.clear() + return func + + def wrapper(*args, **kw): + return dispatch(args[0].__class__)(*args, **kw) + + registry[object] = func + wrapper.register = register + wrapper.dispatch = dispatch + wrapper.registry = MappingProxyType(registry) + wrapper._clear_cache = dispatch_cache.clear + update_wrapper(wrapper, func) + return wrapper diff --git a/v1/flask/lib/python3.4/genericpath.py b/v1/flask/lib/python3.4/genericpath.py deleted file mode 120000 index e6399ed..0000000 --- a/v1/flask/lib/python3.4/genericpath.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/genericpath.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/genericpath.py b/v1/flask/lib/python3.4/genericpath.py new file mode 100644 index 0000000..ca4a510 --- /dev/null +++ b/v1/flask/lib/python3.4/genericpath.py @@ -0,0 +1,132 @@ +""" +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +""" +import os +import stat + +__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', + 'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', + 'samestat'] + + +# Does a path exist? +# This is false for dangling symbolic links on systems that support them. +def exists(path): + """Test whether a path exists. Returns False for broken symbolic links""" + try: + os.stat(path) + except OSError: + return False + return True + + +# This follows symbolic links, so both islink() and isdir() can be true +# for the same path on systems that support symlinks +def isfile(path): + """Test whether a path is a regular file""" + try: + st = os.stat(path) + except OSError: + return False + return stat.S_ISREG(st.st_mode) + + +# Is a path a directory? +# This follows symbolic links, so both islink() and isdir() +# can be true for the same path on systems that support symlinks +def isdir(s): + """Return true if the pathname refers to an existing directory.""" + try: + st = os.stat(s) + except OSError: + return False + return stat.S_ISDIR(st.st_mode) + + +def getsize(filename): + """Return the size of a file, reported by os.stat().""" + return os.stat(filename).st_size + + +def getmtime(filename): + """Return the last modification time of a file, reported by os.stat().""" + return os.stat(filename).st_mtime + + +def getatime(filename): + """Return the last access time of a file, reported by os.stat().""" + return os.stat(filename).st_atime + + +def getctime(filename): + """Return the metadata change time of a file, reported by os.stat().""" + return os.stat(filename).st_ctime + + +# Return the longest prefix of all list elements. +def commonprefix(m): + "Given a list of pathnames, returns the longest common leading component" + if not m: return '' + s1 = min(m) + s2 = max(m) + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + return s1 + +# Are two stat buffers (obtained from stat, fstat or lstat) +# describing the same file? +def samestat(s1, s2): + """Test whether two stat buffers reference the same file""" + return (s1.st_ino == s2.st_ino and + s1.st_dev == s2.st_dev) + + +# Are two filenames really pointing to the same file? +def samefile(f1, f2): + """Test whether two pathnames reference the same actual file""" + s1 = os.stat(f1) + s2 = os.stat(f2) + return samestat(s1, s2) + + +# Are two open files really referencing the same file? +# (Not necessarily the same file descriptor!) +def sameopenfile(fp1, fp2): + """Test whether two open file objects reference the same file""" + s1 = os.fstat(fp1) + s2 = os.fstat(fp2) + return samestat(s1, s2) + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +# Generic implementation of splitext, to be parametrized with +# the separators +def _splitext(p, sep, altsep, extsep): + """Split the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.""" + # NOTE: This code must work for text and bytes strings. + + sepIndex = p.rfind(sep) + if altsep: + altsepIndex = p.rfind(altsep) + sepIndex = max(sepIndex, altsepIndex) + + dotIndex = p.rfind(extsep) + if dotIndex > sepIndex: + # skip all leading dots + filenameIndex = sepIndex + 1 + while filenameIndex < dotIndex: + if p[filenameIndex:filenameIndex+1] != extsep: + return p[:dotIndex], p[dotIndex:] + filenameIndex += 1 + + return p, p[:0] diff --git a/v1/flask/lib/python3.4/hashlib.py b/v1/flask/lib/python3.4/hashlib.py deleted file mode 120000 index 45c7e77..0000000 --- a/v1/flask/lib/python3.4/hashlib.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/hashlib.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/hashlib.py b/v1/flask/lib/python3.4/hashlib.py new file mode 100644 index 0000000..316cece --- /dev/null +++ b/v1/flask/lib/python3.4/hashlib.py @@ -0,0 +1,217 @@ +#. Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org) +# Licensed to PSF under a Contributor Agreement. +# + +__doc__ = """hashlib module - A common interface to many hash functions. + +new(name, data=b'') - returns a new hash object implementing the + given hash function; initializing the hash + using the given binary data. + +Named constructor functions are also available, these are faster +than using new(name): + +md5(), sha1(), sha224(), sha256(), sha384(), and sha512() + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(arg): Update the hash object with the bytes in arg. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the bytes passed to the update() method + so far. + - hexdigest(): Like digest() except the digest is returned as a unicode + object of double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of strings that share a common + initial substring. + +For example, to obtain the digest of the string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update(b"Nobody inspects") + >>> m.update(b" the spammish repetition") + >>> m.digest() + b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' + +More condensed: + + >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +""" + +# This tuple and __get_builtin_constructor() must be modified if a new +# always available algorithm is added. +__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') + +algorithms_guaranteed = set(__always_supported) +algorithms_available = set(__always_supported) + +__all__ = __always_supported + ('new', 'algorithms_guaranteed', + 'algorithms_available', 'pbkdf2_hmac') + + +__builtin_constructor_cache = {} + +def __get_builtin_constructor(name): + cache = __builtin_constructor_cache + constructor = cache.get(name) + if constructor is not None: + return constructor + try: + if name in ('SHA1', 'sha1'): + import _sha1 + cache['SHA1'] = cache['sha1'] = _sha1.sha1 + elif name in ('MD5', 'md5'): + import _md5 + cache['MD5'] = cache['md5'] = _md5.md5 + elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): + import _sha256 + cache['SHA224'] = cache['sha224'] = _sha256.sha224 + cache['SHA256'] = cache['sha256'] = _sha256.sha256 + elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): + import _sha512 + cache['SHA384'] = cache['sha384'] = _sha512.sha384 + cache['SHA512'] = cache['sha512'] = _sha512.sha512 + except ImportError: + pass # no extension module, this hash is unsupported. + + constructor = cache.get(name) + if constructor is not None: + return constructor + + raise ValueError('unsupported hash type ' + name) + + +def __get_openssl_constructor(name): + try: + f = getattr(_hashlib, 'openssl_' + name) + # Allow the C module to raise ValueError. The function will be + # defined but the hash not actually available thanks to OpenSSL. + f() + # Use the C function directly (very fast) + return f + except (AttributeError, ValueError): + return __get_builtin_constructor(name) + + +def __py_new(name, data=b''): + """new(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be bytes). + """ + return __get_builtin_constructor(name)(data) + + +def __hash_new(name, data=b''): + """new(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be bytes). + """ + try: + return _hashlib.new(name, data) + except ValueError: + # If the _hashlib module (OpenSSL) doesn't support the named + # hash, try using our builtin implementations. + # This allows for SHA224/256 and SHA384/512 support even though + # the OpenSSL library prior to 0.9.8 doesn't provide them. + return __get_builtin_constructor(name)(data) + + +try: + import _hashlib + new = __hash_new + __get_hash = __get_openssl_constructor + algorithms_available = algorithms_available.union( + _hashlib.openssl_md_meth_names) +except ImportError: + new = __py_new + __get_hash = __get_builtin_constructor + +try: + # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA + from _hashlib import pbkdf2_hmac +except ImportError: + _trans_5C = bytes((x ^ 0x5C) for x in range(256)) + _trans_36 = bytes((x ^ 0x36) for x in range(256)) + + def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): + """Password based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + """ + if not isinstance(hash_name, str): + raise TypeError(hash_name) + + if not isinstance(password, (bytes, bytearray)): + password = bytes(memoryview(password)) + if not isinstance(salt, (bytes, bytearray)): + salt = bytes(memoryview(salt)) + + # Fast inline HMAC implementation + inner = new(hash_name) + outer = new(hash_name) + blocksize = getattr(inner, 'block_size', 64) + if len(password) > blocksize: + password = new(hash_name, password).digest() + password = password + b'\x00' * (blocksize - len(password)) + inner.update(password.translate(_trans_36)) + outer.update(password.translate(_trans_5C)) + + def prf(msg, inner=inner, outer=outer): + # PBKDF2_HMAC uses the password as key. We can re-use the same + # digest objects and just update copies to skip initialization. + icpy = inner.copy() + ocpy = outer.copy() + icpy.update(msg) + ocpy.update(icpy.digest()) + return ocpy.digest() + + if iterations < 1: + raise ValueError(iterations) + if dklen is None: + dklen = outer.digest_size + if dklen < 1: + raise ValueError(dklen) + + dkey = b'' + loop = 1 + from_bytes = int.from_bytes + while len(dkey) < dklen: + prev = prf(salt + loop.to_bytes(4, 'big')) + # endianess doesn't matter here as long to / from use the same + rkey = int.from_bytes(prev, 'big') + for i in range(iterations - 1): + prev = prf(prev) + # rkey = rkey ^ prev + rkey ^= from_bytes(prev, 'big') + loop += 1 + dkey += rkey.to_bytes(inner.digest_size, 'big') + + return dkey[:dklen] + + +for __func_name in __always_supported: + # try them all, some may not work due to the OpenSSL + # version not supporting that algorithm. + try: + globals()[__func_name] = __get_hash(__func_name) + except ValueError: + import logging + logging.exception('code for hash %s was not found.', __func_name) + +# Cleanup locals() +del __always_supported, __func_name, __get_hash +del __py_new, __hash_new, __get_openssl_constructor diff --git a/v1/flask/lib/python3.4/heapq.py b/v1/flask/lib/python3.4/heapq.py deleted file mode 120000 index 4ff7352..0000000 --- a/v1/flask/lib/python3.4/heapq.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/heapq.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/heapq.py b/v1/flask/lib/python3.4/heapq.py new file mode 100644 index 0000000..d615239 --- /dev/null +++ b/v1/flask/lib/python3.4/heapq.py @@ -0,0 +1,476 @@ +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +an usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +from itertools import islice, count, tee, chain + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + else: + returnitem = lastelt + return returnitem + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(range(n//2)): + _siftup(x, i) + +def _heappushpop_max(heap, item): + """Maxheap version of a heappush followed by a heappop.""" + if heap and item < heap[0]: + item, heap[0] = heap[0], item + _siftup_max(heap, 0) + return item + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + +def nlargest(n, iterable): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, reverse=True)[:n] + """ + if n < 0: + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + heapify(result) + _heappushpop = heappushpop + for elem in it: + _heappushpop(result, elem) + result.sort(reverse=True) + return result + +def nsmallest(n, iterable): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable)[:n] + """ + if n < 0: + return [] + it = iter(iterable) + result = list(islice(it, n)) + if not result: + return result + _heapify_max(result) + _heappushpop = _heappushpop_max + for elem in it: + _heappushpop(result, elem) + result.sort() + return result + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom comparison methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if parent < newitem: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[rightpos] < heap[childpos]: + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass + +def merge(*iterables): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + ''' + _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + _len = len + + h = [] + h_append = h.append + for itnum, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + h_append([next(), itnum, next]) + except _StopIteration: + pass + heapify(h) + + while _len(h) > 1: + try: + while True: + v, itnum, next = s = h[0] + yield v + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except _StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + v, itnum, next = h[0] + yield v + yield from next.__self__ + +# Extend the implementations of nsmallest and nlargest to use a key= argument +_nsmallest = nsmallest +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + # Short-cut for n==1 is to use min() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [min(chain(head, it))] + return [min(chain(head, it), key=key)] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = zip(iterable, count()) # decorate + result = _nsmallest(n, it) + return [r[0] for r in result] # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = zip(map(key, in1), count(), in2) # decorate + result = _nsmallest(n, it) + return [r[2] for r in result] # undecorate + +_nlargest = nlargest +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() when len(iterable)>0 + if n == 1: + it = iter(iterable) + head = list(islice(it, 1)) + if not head: + return [] + if key is None: + return [max(chain(head, it))] + return [max(chain(head, it), key=key)] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = zip(iterable, count(0,-1)) # decorate + result = _nlargest(n, it) + return [r[0] for r in result] # undecorate + + # General case, slowest method + in1, in2 = tee(iterable) + it = zip(map(key, in1), count(0,-1), in2) # decorate + result = _nlargest(n, it) + return [r[2] for r in result] # undecorate + +if __name__ == "__main__": + # Simple sanity test + heap = [] + data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + for item in data: + heappush(heap, item) + sort = [] + while heap: + sort.append(heappop(heap)) + print(sort) + + import doctest + doctest.testmod() diff --git a/v1/flask/lib/python3.4/hmac.py b/v1/flask/lib/python3.4/hmac.py deleted file mode 120000 index bf2d90c..0000000 --- a/v1/flask/lib/python3.4/hmac.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/hmac.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/hmac.py b/v1/flask/lib/python3.4/hmac.py new file mode 100644 index 0000000..77785a2 --- /dev/null +++ b/v1/flask/lib/python3.4/hmac.py @@ -0,0 +1,144 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. +""" + +import warnings as _warnings +from _operator import _compare_digest as compare_digest +import hashlib as _hashlib + +trans_5C = bytes((x ^ 0x5C) for x in range(256)) +trans_36 = bytes((x ^ 0x36) for x in range(256)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. Use digest_size from the instance of HMAC instead. +digest_size = None + + + +class HMAC: + """RFC 2104 HMAC class. Also complies with RFC 4231. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + blocksize = 64 # 512-bit HMAC; can be changed in subclasses. + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. *OR* + A hashlib constructor returning a new hash object. *OR* + A hash name suitable for hashlib.new(). + Defaults to hashlib.md5. + Implicit default to hashlib.md5 is deprecated and will be + removed in Python 3.6. + + Note: key and msg must be a bytes or bytearray objects. + """ + + if not isinstance(key, (bytes, bytearray)): + raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__) + + if digestmod is None: + _warnings.warn("HMAC() without an explicit digestmod argument " + "is deprecated.", PendingDeprecationWarning, 2) + digestmod = _hashlib.md5 + + if callable(digestmod): + self.digest_cons = digestmod + elif isinstance(digestmod, str): + self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d) + else: + self.digest_cons = lambda d=b'': digestmod.new(d) + + self.outer = self.digest_cons() + self.inner = self.digest_cons() + self.digest_size = self.inner.digest_size + + if hasattr(self.inner, 'block_size'): + blocksize = self.inner.block_size + if blocksize < 16: + _warnings.warn('block_size of %d seems too small; using our ' + 'default of %d.' % (blocksize, self.blocksize), + RuntimeWarning, 2) + blocksize = self.blocksize + else: + _warnings.warn('No block_size attribute on given digest object; ' + 'Assuming %d.' % (self.blocksize), + RuntimeWarning, 2) + blocksize = self.blocksize + + # self.blocksize is the default blocksize. self.block_size is + # effective block size as well as the public API attribute. + self.block_size = blocksize + + if len(key) > blocksize: + key = self.digest_cons(key).digest() + + key = key + bytes(blocksize - len(key)) + self.outer.update(key.translate(trans_5C)) + self.inner.update(key.translate(trans_36)) + if msg is not None: + self.update(msg) + + @property + def name(self): + return "hmac-" + self.inner.name + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + # Call __new__ directly to avoid the expensive __init__. + other = self.__class__.__new__(self.__class__) + other.digest_cons = self.digest_cons + other.digest_size = self.digest_size + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def _current(self): + """Return a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self._current() + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + h = self._current() + return h.hexdigest() + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) diff --git a/v1/flask/lib/python3.4/imp.py b/v1/flask/lib/python3.4/imp.py deleted file mode 120000 index d89cb1b..0000000 --- a/v1/flask/lib/python3.4/imp.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/imp.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/imp.py b/v1/flask/lib/python3.4/imp.py new file mode 100644 index 0000000..c922e92 --- /dev/null +++ b/v1/flask/lib/python3.4/imp.py @@ -0,0 +1,315 @@ +"""This module provides the components needed to build your own __import__ +function. Undocumented functions are obsolete. + +In most cases it is preferred you consider using the importlib module's +functionality over this module. + +""" +# (Probably) need to stay in _imp +from _imp import (lock_held, acquire_lock, release_lock, + get_frozen_object, is_frozen_package, + init_builtin, init_frozen, is_builtin, is_frozen, + _fix_co_filename) +try: + from _imp import load_dynamic +except ImportError: + # Platform doesn't support dynamic loading. + load_dynamic = None + +from importlib._bootstrap import SourcelessFileLoader, _ERR_MSG, _SpecMethods + +from importlib import machinery +from importlib import util +import importlib +import os +import sys +import tokenize +import types +import warnings + +warnings.warn("the imp module is deprecated in favour of importlib; " + "see the module's documentation for alternative uses", + PendingDeprecationWarning) + +# DEPRECATED +SEARCH_ERROR = 0 +PY_SOURCE = 1 +PY_COMPILED = 2 +C_EXTENSION = 3 +PY_RESOURCE = 4 +PKG_DIRECTORY = 5 +C_BUILTIN = 6 +PY_FROZEN = 7 +PY_CODERESOURCE = 8 +IMP_HOOK = 9 + + +def new_module(name): + """**DEPRECATED** + + Create a new module. + + The module is not entered into sys.modules. + + """ + return types.ModuleType(name) + + +def get_magic(): + """**DEPRECATED** + + Return the magic number for .pyc or .pyo files. + """ + return util.MAGIC_NUMBER + + +def get_tag(): + """Return the magic tag for .pyc or .pyo files.""" + return sys.implementation.cache_tag + + +def cache_from_source(path, debug_override=None): + """**DEPRECATED** + + Given the path to a .py file, return the path to its .pyc/.pyo file. + + The .py file does not need to exist; this simply returns the path to the + .pyc/.pyo file calculated as if the .py file were imported. The extension + will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo. + + If debug_override is not None, then it must be a boolean and is used in + place of sys.flags.optimize. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + return util.cache_from_source(path, debug_override) + + +def source_from_cache(path): + """**DEPRECATED** + + Given the path to a .pyc./.pyo file, return the path to its .py file. + + The .pyc/.pyo file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc/.pyo file. If path does + not conform to PEP 3147 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + return util.source_from_cache(path) + + +def get_suffixes(): + """**DEPRECATED**""" + extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES] + source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES] + bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES] + + return extensions + source + bytecode + + +class NullImporter: + + """**DEPRECATED** + + Null import object. + + """ + + def __init__(self, path): + if path == '': + raise ImportError('empty pathname', path='') + elif os.path.isdir(path): + raise ImportError('existing directory', path=path) + + def find_module(self, fullname): + """Always returns None.""" + return None + + +class _HackedGetData: + + """Compatibility support for 'file' arguments of various load_*() + functions.""" + + def __init__(self, fullname, path, file=None): + super().__init__(fullname, path) + self.file = file + + def get_data(self, path): + """Gross hack to contort loader to deal w/ load_*()'s bad API.""" + if self.file and path == self.path: + if not self.file.closed: + file = self.file + else: + self.file = file = open(self.path, 'r') + + with file: + # Technically should be returning bytes, but + # SourceLoader.get_code() just passed what is returned to + # compile() which can handle str. And converting to bytes would + # require figuring out the encoding to decode to and + # tokenize.detect_encoding() only accepts bytes. + return file.read() + else: + return super().get_data(path) + + +class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader): + + """Compatibility support for implementing load_source().""" + + +def load_source(name, pathname, file=None): + loader = _LoadSourceCompatibility(name, pathname, file) + spec = util.spec_from_file_location(name, pathname, loader=loader) + methods = _SpecMethods(spec) + if name in sys.modules: + module = methods.exec(sys.modules[name]) + else: + module = methods.load() + # To allow reloading to potentially work, use a non-hacked loader which + # won't rely on a now-closed file object. + module.__loader__ = machinery.SourceFileLoader(name, pathname) + module.__spec__.loader = module.__loader__ + return module + + +class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader): + + """Compatibility support for implementing load_compiled().""" + + +def load_compiled(name, pathname, file=None): + """**DEPRECATED**""" + loader = _LoadCompiledCompatibility(name, pathname, file) + spec = util.spec_from_file_location(name, pathname, loader=loader) + methods = _SpecMethods(spec) + if name in sys.modules: + module = methods.exec(sys.modules[name]) + else: + module = methods.load() + # To allow reloading to potentially work, use a non-hacked loader which + # won't rely on a now-closed file object. + module.__loader__ = SourcelessFileLoader(name, pathname) + module.__spec__.loader = module.__loader__ + return module + + +def load_package(name, path): + """**DEPRECATED**""" + if os.path.isdir(path): + extensions = (machinery.SOURCE_SUFFIXES[:] + + machinery.BYTECODE_SUFFIXES[:]) + for extension in extensions: + path = os.path.join(path, '__init__'+extension) + if os.path.exists(path): + break + else: + raise ValueError('{!r} is not a package'.format(path)) + spec = util.spec_from_file_location(name, path, + submodule_search_locations=[]) + methods = _SpecMethods(spec) + if name in sys.modules: + return methods.exec(sys.modules[name]) + else: + return methods.load() + + +def load_module(name, file, filename, details): + """**DEPRECATED** + + Load a module, given information returned by find_module(). + + The module name must include the full package name, if any. + + """ + suffix, mode, type_ = details + if mode and (not mode.startswith(('r', 'U')) or '+' in mode): + raise ValueError('invalid file open mode {!r}'.format(mode)) + elif file is None and type_ in {PY_SOURCE, PY_COMPILED}: + msg = 'file object required for import (type code {})'.format(type_) + raise ValueError(msg) + elif type_ == PY_SOURCE: + return load_source(name, filename, file) + elif type_ == PY_COMPILED: + return load_compiled(name, filename, file) + elif type_ == C_EXTENSION and load_dynamic is not None: + if file is None: + with open(filename, 'rb') as opened_file: + return load_dynamic(name, filename, opened_file) + else: + return load_dynamic(name, filename, file) + elif type_ == PKG_DIRECTORY: + return load_package(name, filename) + elif type_ == C_BUILTIN: + return init_builtin(name) + elif type_ == PY_FROZEN: + return init_frozen(name) + else: + msg = "Don't know how to import {} (type code {})".format(name, type_) + raise ImportError(msg, name=name) + + +def find_module(name, path=None): + """**DEPRECATED** + + Search for a module. + + If path is omitted or None, search for a built-in, frozen or special + module and continue search in sys.path. The module name cannot + contain '.'; to search for a submodule of a package, pass the + submodule name and the package's __path__. + + """ + if not isinstance(name, str): + raise TypeError("'name' must be a str, not {}".format(type(name))) + elif not isinstance(path, (type(None), list)): + # Backwards-compatibility + raise RuntimeError("'list' must be None or a list, " + "not {}".format(type(name))) + + if path is None: + if is_builtin(name): + return None, None, ('', '', C_BUILTIN) + elif is_frozen(name): + return None, None, ('', '', PY_FROZEN) + else: + path = sys.path + + for entry in path: + package_directory = os.path.join(entry, name) + for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]: + package_file_name = '__init__' + suffix + file_path = os.path.join(package_directory, package_file_name) + if os.path.isfile(file_path): + return None, package_directory, ('', '', PKG_DIRECTORY) + for suffix, mode, type_ in get_suffixes(): + file_name = name + suffix + file_path = os.path.join(entry, file_name) + if os.path.isfile(file_path): + break + else: + continue + break # Break out of outer loop when breaking out of inner loop. + else: + raise ImportError(_ERR_MSG.format(name), name=name) + + encoding = None + if 'b' not in mode: + with open(file_path, 'rb') as file: + encoding = tokenize.detect_encoding(file.readline)[0] + file = open(file_path, mode, encoding=encoding) + return file, file_path, (suffix, mode, type_) + + +def reload(module): + """**DEPRECATED** + + Reload the module and return it. + + The module must have been successfully imported before. + + """ + return importlib.reload(module) diff --git a/v1/flask/lib/python3.4/io.py b/v1/flask/lib/python3.4/io.py deleted file mode 120000 index 4211375..0000000 --- a/v1/flask/lib/python3.4/io.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/io.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/io.py b/v1/flask/lib/python3.4/io.py new file mode 100644 index 0000000..e03db97 --- /dev/null +++ b/v1/flask/lib/python3.4/io.py @@ -0,0 +1,92 @@ +"""The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +""" +# New I/O library conforming to PEP 3116. + +__author__ = ("Guido van Rossum , " + "Mike Verdone , " + "Mark Russell , " + "Antoine Pitrou , " + "Amaury Forgeot d'Arc , " + "Benjamin Peterson ") + +__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO", + "BytesIO", "StringIO", "BufferedIOBase", + "BufferedReader", "BufferedWriter", "BufferedRWPair", + "BufferedRandom", "TextIOBase", "TextIOWrapper", + "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] + + +import _io +import abc + +from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, + open, FileIO, BytesIO, StringIO, BufferedReader, + BufferedWriter, BufferedRWPair, BufferedRandom, + IncrementalNewlineDecoder, TextIOWrapper) + +OpenWrapper = _io.open # for compatibility with _pyio + +# Pretend this exception was created here. +UnsupportedOperation.__module__ = "io" + +# for seek() +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Declaring ABCs in C is tricky so we do it here. +# Method descriptions and default implementations are inherited from the C +# version however. +class IOBase(_io._IOBase, metaclass=abc.ABCMeta): + __doc__ = _io._IOBase.__doc__ + +class RawIOBase(_io._RawIOBase, IOBase): + __doc__ = _io._RawIOBase.__doc__ + +class BufferedIOBase(_io._BufferedIOBase, IOBase): + __doc__ = _io._BufferedIOBase.__doc__ + +class TextIOBase(_io._TextIOBase, IOBase): + __doc__ = _io._TextIOBase.__doc__ + +RawIOBase.register(FileIO) + +for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, + BufferedRWPair): + BufferedIOBase.register(klass) + +for klass in (StringIO, TextIOWrapper): + TextIOBase.register(klass) +del klass diff --git a/v1/flask/lib/python3.4/keyword.py b/v1/flask/lib/python3.4/keyword.py deleted file mode 120000 index 73adc67..0000000 --- a/v1/flask/lib/python3.4/keyword.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/keyword.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/keyword.py b/v1/flask/lib/python3.4/keyword.py new file mode 100755 index 0000000..6e1e882 --- /dev/null +++ b/v1/flask/lib/python3.4/keyword.py @@ -0,0 +1,94 @@ +#! /usr/bin/env python3 + +"""Keywords (from "graminit.c") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree after building the interpreter and run: + + ./python Lib/keyword.py +""" + +__all__ = ["iskeyword", "kwlist"] + +kwlist = [ +#--start keywords-- + 'False', + 'None', + 'True', + 'and', + 'as', + 'assert', + 'break', + 'class', + 'continue', + 'def', + 'del', + 'elif', + 'else', + 'except', + 'finally', + 'for', + 'from', + 'global', + 'if', + 'import', + 'in', + 'is', + 'lambda', + 'nonlocal', + 'not', + 'or', + 'pass', + 'raise', + 'return', + 'try', + 'while', + 'with', + 'yield', +#--end keywords-- + ] + +iskeyword = frozenset(kwlist).__contains__ + +def main(): + import sys, re + + args = sys.argv[1:] + iptfile = args and args[0] or "Python/graminit.c" + if len(args) > 1: optfile = args[1] + else: optfile = "Lib/keyword.py" + + # load the output skeleton from the target, taking care to preserve its + # newline convention. + with open(optfile, newline='') as fp: + format = fp.readlines() + nl = format[0][len(format[0].strip()):] if format else '\n' + + # scan the source file for keywords + with open(iptfile) as fp: + strprog = re.compile('"([^"]+)"') + lines = [] + for line in fp: + if '{1, "' in line: + match = strprog.search(line) + if match: + lines.append(" '" + match.group(1) + "'," + nl) + lines.sort() + + # insert the lines of keywords into the skeleton + try: + start = format.index("#--start keywords--" + nl) + 1 + end = format.index("#--end keywords--" + nl) + format[start:end] = lines + except ValueError: + sys.stderr.write("target does not contain format markers\n") + sys.exit(1) + + # write the output file + with open(optfile, 'w', newline='') as fp: + fp.writelines(format) + +if __name__ == "__main__": + main() diff --git a/v1/flask/lib/python3.4/linecache.py b/v1/flask/lib/python3.4/linecache.py deleted file mode 120000 index ddb7bf4..0000000 --- a/v1/flask/lib/python3.4/linecache.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/linecache.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/linecache.py b/v1/flask/lib/python3.4/linecache.py new file mode 100644 index 0000000..884cbf4 --- /dev/null +++ b/v1/flask/lib/python3.4/linecache.py @@ -0,0 +1,138 @@ +"""Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +""" + +import sys +import os +import tokenize + +__all__ = ["getline", "clearcache", "checkcache"] + +def getline(filename, lineno, module_globals=None): + lines = getlines(filename, module_globals) + if 1 <= lineno <= len(lines): + return lines[lineno-1] + else: + return '' + + +# The cache + +cache = {} # The cache + + +def clearcache(): + """Clear the cache entirely.""" + + global cache + cache = {} + + +def getlines(filename, module_globals=None): + """Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + if filename in cache: + return cache[filename][2] + + try: + return updatecache(filename, module_globals) + except MemoryError: + clearcache() + return [] + + +def checkcache(filename=None): + """Discard cache entries that are out of date. + (This is not checked upon each call!)""" + + if filename is None: + filenames = list(cache.keys()) + else: + if filename in cache: + filenames = [filename] + else: + return + + for filename in filenames: + size, mtime, lines, fullname = cache[filename] + if mtime is None: + continue # no-op for files loaded via a __loader__ + try: + stat = os.stat(fullname) + except OSError: + del cache[filename] + continue + if size != stat.st_size or mtime != stat.st_mtime: + del cache[filename] + + +def updatecache(filename, module_globals=None): + """Update a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.""" + + if filename in cache: + del cache[filename] + if not filename or (filename.startswith('<') and filename.endswith('>')): + return [] + + fullname = filename + try: + stat = os.stat(fullname) + except OSError: + basename = filename + + # Try for a __loader__, if available + if module_globals and '__loader__' in module_globals: + name = module_globals.get('__name__') + loader = module_globals['__loader__'] + get_source = getattr(loader, 'get_source', None) + + if name and get_source: + try: + data = get_source(name) + except (ImportError, OSError): + pass + else: + if data is None: + # No luck, the PEP302 loader cannot find the source + # for this module. + return [] + cache[filename] = ( + len(data), None, + [line+'\n' for line in data.splitlines()], fullname + ) + return cache[filename][2] + + # Try looking through the module search path, which is only useful + # when handling a relative filename. + if os.path.isabs(filename): + return [] + + for dirname in sys.path: + try: + fullname = os.path.join(dirname, basename) + except (TypeError, AttributeError): + # Not sufficiently string-like to do anything useful with. + continue + try: + stat = os.stat(fullname) + break + except OSError: + pass + else: + return [] + try: + with tokenize.open(fullname) as fp: + lines = fp.readlines() + except OSError: + return [] + if lines and not lines[-1].endswith('\n'): + lines[-1] += '\n' + size, mtime = stat.st_size, stat.st_mtime + cache[filename] = size, mtime, lines, fullname + return lines diff --git a/v1/flask/lib/python3.4/locale.py b/v1/flask/lib/python3.4/locale.py deleted file mode 120000 index 2b02bd3..0000000 --- a/v1/flask/lib/python3.4/locale.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/locale.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/locale.py b/v1/flask/lib/python3.4/locale.py new file mode 100644 index 0000000..7ff4356 --- /dev/null +++ b/v1/flask/lib/python3.4/locale.py @@ -0,0 +1,1676 @@ +""" Locale support. + + The module provides low-level access to the C lib's locale APIs + and adds high level number formatting APIs as well as a locale + aliasing engine to complement these. + + The aliasing engine includes support for many commonly used locale + names and maps them to values suitable for passing to the C lib's + setlocale() function. It also includes default encodings for all + supported locale names. + +""" + +import sys +import encodings +import encodings.aliases +import re +import collections +from builtins import str as _builtin_str +import functools + +# Try importing the _locale module. +# +# If this fails, fall back on a basic 'C' locale emulation. + +# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before +# trying the import. So __all__ is also fiddled at the end of the file. +__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", + "setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm", + "str", "atof", "atoi", "format", "format_string", "currency", + "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", + "LC_NUMERIC", "LC_ALL", "CHAR_MAX"] + +def _strcoll(a,b): + """ strcoll(string,string) -> int. + Compares two strings according to the locale. + """ + return (a > b) - (a < b) + +def _strxfrm(s): + """ strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + """ + return s + +try: + + from _locale import * + +except ImportError: + + # Locale emulation + + CHAR_MAX = 127 + LC_ALL = 6 + LC_COLLATE = 3 + LC_CTYPE = 0 + LC_MESSAGES = 5 + LC_MONETARY = 4 + LC_NUMERIC = 1 + LC_TIME = 2 + Error = ValueError + + def localeconv(): + """ localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + """ + # 'C' locale default values + return {'grouping': [127], + 'currency_symbol': '', + 'n_sign_posn': 127, + 'p_cs_precedes': 127, + 'n_cs_precedes': 127, + 'mon_grouping': [], + 'n_sep_by_space': 127, + 'decimal_point': '.', + 'negative_sign': '', + 'positive_sign': '', + 'p_sep_by_space': 127, + 'int_curr_symbol': '', + 'p_sign_posn': 127, + 'thousands_sep': '', + 'mon_thousands_sep': '', + 'frac_digits': 127, + 'mon_decimal_point': '', + 'int_frac_digits': 127} + + def setlocale(category, value=None): + """ setlocale(integer,string=None) -> string. + Activates/queries locale processing. + """ + if value not in (None, '', 'C'): + raise Error('_locale emulation only supports "C" locale') + return 'C' + +# These may or may not exist in _locale, so be sure to set them. +if 'strxfrm' not in globals(): + strxfrm = _strxfrm +if 'strcoll' not in globals(): + strcoll = _strcoll + + +_localeconv = localeconv + +# With this dict, you can override some items of localeconv's return value. +# This is useful for testing purposes. +_override_localeconv = {} + +@functools.wraps(_localeconv) +def localeconv(): + d = _localeconv() + if _override_localeconv: + d.update(_override_localeconv) + return d + + +### Number formatting APIs + +# Author: Martin von Loewis +# improved by Georg Brandl + +# Iterate over grouping intervals +def _grouping_intervals(grouping): + last_interval = None + for interval in grouping: + # if grouping is -1, we are done + if interval == CHAR_MAX: + return + # 0: re-use last group ad infinitum + if interval == 0: + if last_interval is None: + raise ValueError("invalid grouping") + while True: + yield last_interval + yield interval + last_interval = interval + +#perform the grouping from right to left +def _group(s, monetary=False): + conv = localeconv() + thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] + grouping = conv[monetary and 'mon_grouping' or 'grouping'] + if not grouping: + return (s, 0) + if s[-1] == ' ': + stripped = s.rstrip() + right_spaces = s[len(stripped):] + s = stripped + else: + right_spaces = '' + left_spaces = '' + groups = [] + for interval in _grouping_intervals(grouping): + if not s or s[-1] not in "0123456789": + # only non-digit characters remain (sign, spaces) + left_spaces = s + s = '' + break + groups.append(s[-interval:]) + s = s[:-interval] + if s: + groups.append(s) + groups.reverse() + return ( + left_spaces + thousands_sep.join(groups) + right_spaces, + len(thousands_sep) * (len(groups) - 1) + ) + +# Strip a given amount of excess padding from the given string +def _strip_padding(s, amount): + lpos = 0 + while amount and s[lpos] == ' ': + lpos += 1 + amount -= 1 + rpos = len(s) - 1 + while amount and s[rpos] == ' ': + rpos -= 1 + amount -= 1 + return s[lpos:rpos+1] + +_percent_re = re.compile(r'%(?:\((?P.*?)\))?' + r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') + +def format(percent, value, grouping=False, monetary=False, *additional): + """Returns the locale-aware substitution of a %? specifier + (percent). + + additional is for format strings which contain one or more + '*' modifiers.""" + # this is only for one-percent-specifier strings and this should be checked + match = _percent_re.match(percent) + if not match or len(match.group())!= len(percent): + raise ValueError(("format() must be given exactly one %%char " + "format specifier, %s not valid") % repr(percent)) + return _format(percent, value, grouping, monetary, *additional) + +def _format(percent, value, grouping=False, monetary=False, *additional): + if additional: + formatted = percent % ((value,) + additional) + else: + formatted = percent % value + # floats and decimal ints need special action! + if percent[-1] in 'eEfFgG': + seps = 0 + parts = formatted.split('.') + if grouping: + parts[0], seps = _group(parts[0], monetary=monetary) + decimal_point = localeconv()[monetary and 'mon_decimal_point' + or 'decimal_point'] + formatted = decimal_point.join(parts) + if seps: + formatted = _strip_padding(formatted, seps) + elif percent[-1] in 'diu': + seps = 0 + if grouping: + formatted, seps = _group(formatted, monetary=monetary) + if seps: + formatted = _strip_padding(formatted, seps) + return formatted + +def format_string(f, val, grouping=False): + """Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + Grouping is applied if the third parameter is true.""" + percents = list(_percent_re.finditer(f)) + new_f = _percent_re.sub('%s', f) + + if isinstance(val, collections.Mapping): + new_val = [] + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + new_val.append(format(perc.group(), val, grouping)) + else: + if not isinstance(val, tuple): + val = (val,) + new_val = [] + i = 0 + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + starcount = perc.group('modifiers').count('*') + new_val.append(_format(perc.group(), + val[i], + grouping, + False, + *val[i+1:i+1+starcount])) + i += (1 + starcount) + val = tuple(new_val) + + return new_f % val + +def currency(val, symbol=True, grouping=False, international=False): + """Formats val according to the currency settings + in the current locale.""" + conv = localeconv() + + # check for illegal values + digits = conv[international and 'int_frac_digits' or 'frac_digits'] + if digits == 127: + raise ValueError("Currency formatting is not possible using " + "the 'C' locale.") + + s = format('%%.%if' % digits, abs(val), grouping, monetary=True) + # '<' and '>' are markers if the sign must be inserted between symbol and value + s = '<' + s + '>' + + if symbol: + smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] + precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] + separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] + + if precedes: + s = smb + (separated and ' ' or '') + s + else: + s = s + (separated and ' ' or '') + smb + + sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] + sign = conv[val<0 and 'negative_sign' or 'positive_sign'] + + if sign_pos == 0: + s = '(' + s + ')' + elif sign_pos == 1: + s = sign + s + elif sign_pos == 2: + s = s + sign + elif sign_pos == 3: + s = s.replace('<', sign) + elif sign_pos == 4: + s = s.replace('>', sign) + else: + # the default if nothing specified; + # this should be the most fitting sign position + s = sign + s + + return s.replace('<', '').replace('>', '') + +def str(val): + """Convert float to integer, taking the locale into account.""" + return format("%.12g", val) + +def atof(string, func=float): + "Parses a string as a float according to the locale settings." + #First, get rid of the grouping + ts = localeconv()['thousands_sep'] + if ts: + string = string.replace(ts, '') + #next, replace the decimal point with a dot + dd = localeconv()['decimal_point'] + if dd: + string = string.replace(dd, '.') + #finally, parse the string + return func(string) + +def atoi(str): + "Converts a string to an integer according to the locale settings." + return atof(str, int) + +def _test(): + setlocale(LC_ALL, "") + #do grouping + s1 = format("%d", 123456789,1) + print(s1, "is", atoi(s1)) + #standard formatting + s1 = str(3.14) + print(s1, "is", atof(s1)) + +### Locale name aliasing engine + +# Author: Marc-Andre Lemburg, mal@lemburg.com +# Various tweaks by Fredrik Lundh + +# store away the low-level version of setlocale (it's +# overridden below) +_setlocale = setlocale + +def _replace_encoding(code, encoding): + if '.' in code: + langname = code[:code.index('.')] + else: + langname = code + # Convert the encoding to a C lib compatible encoding string + norm_encoding = encodings.normalize_encoding(encoding) + #print('norm encoding: %r' % norm_encoding) + norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), + norm_encoding) + #print('aliased encoding: %r' % norm_encoding) + encoding = norm_encoding + norm_encoding = norm_encoding.lower() + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + else: + norm_encoding = norm_encoding.replace('_', '') + norm_encoding = norm_encoding.replace('-', '') + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + #print('found encoding %r' % encoding) + return langname + '.' + encoding + +def _append_modifier(code, modifier): + if modifier == 'euro': + if '.' not in code: + return code + '.ISO8859-15' + _, _, encoding = code.partition('.') + if encoding in ('ISO8859-15', 'UTF-8'): + return code + if encoding == 'ISO8859-1': + return _replace_encoding(code, 'ISO8859-15') + return code + '@' + modifier + +def normalize(localename): + + """ Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + """ + # Normalize the locale name and extract the encoding and modifier + code = localename.lower() + if ':' in code: + # ':' is sometimes used as encoding delimiter. + code = code.replace(':', '.') + if '@' in code: + code, modifier = code.split('@', 1) + else: + modifier = '' + if '.' in code: + langname, encoding = code.split('.')[:2] + else: + langname = code + encoding = '' + + # First lookup: fullname (possibly with encoding and modifier) + lang_enc = langname + if encoding: + norm_encoding = encoding.replace('-', '') + norm_encoding = norm_encoding.replace('_', '') + lang_enc += '.' + norm_encoding + lookup_name = lang_enc + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + return code + #print('first lookup failed') + + if modifier: + # Second try: fullname without modifier (possibly with encoding) + code = locale_alias.get(lang_enc, None) + if code is not None: + #print('lookup without modifier succeeded') + if '@' not in code: + return _append_modifier(code, modifier) + if code.split('@', 1)[1].lower() == modifier: + return code + #print('second lookup failed') + + if encoding: + # Third try: langname (without encoding, possibly with modifier) + lookup_name = langname + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + #print('lookup without encoding succeeded') + if '@' not in code: + return _replace_encoding(code, encoding) + code, modifier = code.split('@', 1) + return _replace_encoding(code, encoding) + '@' + modifier + + if modifier: + # Fourth try: langname (without encoding and modifier) + code = locale_alias.get(langname, None) + if code is not None: + #print('lookup without modifier and encoding succeeded') + if '@' not in code: + code = _replace_encoding(code, encoding) + return _append_modifier(code, modifier) + code, defmod = code.split('@', 1) + if defmod.lower() == modifier: + return _replace_encoding(code, encoding) + '@' + defmod + + return localename + +def _parse_localename(localename): + + """ Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + """ + code = normalize(localename) + if '@' in code: + # Deal with locale modifiers + code, modifier = code.split('@', 1) + if modifier == 'euro' and '.' not in code: + # Assume Latin-9 for @euro locales. This is bogus, + # since some systems may use other encodings for these + # locales. Also, we ignore other modifiers. + return code, 'iso-8859-15' + + if '.' in code: + return tuple(code.split('.')[:2]) + elif code == 'C': + return None, None + raise ValueError('unknown locale: %s' % localename) + +def _build_localename(localetuple): + + """ Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + """ + try: + language, encoding = localetuple + + if language is None: + language = 'C' + if encoding is None: + return language + else: + return language + '.' + encoding + except (TypeError, ValueError): + raise TypeError('Locale must be None, a string, or an iterable of two strings -- language code, encoding.') + +def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + + """ Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + + try: + # check if it's supported by the _locale module + import _locale + code, encoding = _locale._getdefaultlocale() + except (ImportError, AttributeError): + pass + else: + # make sure the code/encoding values are valid + if sys.platform == "win32" and code and code[:2] == "0x": + # map windows language identifier to language name + code = windows_locale.get(int(code, 0)) + # ...add other platform-specific processing here, if + # necessary... + return code, encoding + + # fall back on POSIX behaviour + import os + lookup = os.environ.get + for variable in envvars: + localename = lookup(variable,None) + if localename: + if variable == 'LANGUAGE': + localename = localename.split(':')[0] + break + else: + localename = 'C' + return _parse_localename(localename) + + +def getlocale(category=LC_CTYPE): + + """ Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + localename = _setlocale(category) + if category == LC_ALL and ';' in localename: + raise TypeError('category LC_ALL is not supported') + return _parse_localename(localename) + +def setlocale(category, locale=None): + + """ Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + """ + if locale and not isinstance(locale, _builtin_str): + # convert to string + locale = normalize(_build_localename(locale)) + return _setlocale(category, locale) + +def resetlocale(category=LC_ALL): + + """ Sets the locale for category to the default setting. + + The default setting is determined by calling + getdefaultlocale(). category defaults to LC_ALL. + + """ + _setlocale(category, _build_localename(getdefaultlocale())) + +if sys.platform.startswith("win"): + # On Win32, this will return the ANSI code page + def getpreferredencoding(do_setlocale = True): + """Return the charset that the user is likely using.""" + import _bootlocale + return _bootlocale.getpreferredencoding(False) +else: + # On Unix, if CODESET is available, use that. + try: + CODESET + except NameError: + # Fall back to parsing environment variables :-( + def getpreferredencoding(do_setlocale = True): + """Return the charset that the user is likely using, + by looking at environment variables.""" + res = getdefaultlocale()[1] + if res is None: + # LANG not set, default conservatively to ASCII + res = 'ascii' + return res + else: + def getpreferredencoding(do_setlocale = True): + """Return the charset that the user is likely using, + according to the system configuration.""" + import _bootlocale + if do_setlocale: + oldloc = setlocale(LC_CTYPE) + try: + setlocale(LC_CTYPE, "") + except Error: + pass + result = _bootlocale.getpreferredencoding(False) + if do_setlocale: + setlocale(LC_CTYPE, oldloc) + return result + + +### Database +# +# The following data was extracted from the locale.alias file which +# comes with X11 and then hand edited removing the explicit encoding +# definitions and adding some more aliases. The file is usually +# available as /usr/lib/X11/locale/locale.alias. +# + +# +# The local_encoding_alias table maps lowercase encoding alias names +# to C locale encoding names (case-sensitive). Note that normalize() +# first looks up the encoding in the encodings.aliases dictionary and +# then applies this mapping to find the correct C lib name for the +# encoding. +# +locale_encoding_alias = { + + # Mappings for non-standard encoding names used in locale names + '437': 'C', + 'c': 'C', + 'en': 'ISO8859-1', + 'jis': 'JIS7', + 'jis7': 'JIS7', + 'ajec': 'eucJP', + 'koi8c': 'KOI8-C', + 'microsoftcp1251': 'CP1251', + 'microsoftcp1255': 'CP1255', + 'microsoftcp1256': 'CP1256', + '88591': 'ISO8859-1', + '88592': 'ISO8859-2', + '88595': 'ISO8859-5', + '885915': 'ISO8859-15', + + # Mappings from Python codec names to C lib encoding names + 'ascii': 'ISO8859-1', + 'latin_1': 'ISO8859-1', + 'iso8859_1': 'ISO8859-1', + 'iso8859_10': 'ISO8859-10', + 'iso8859_11': 'ISO8859-11', + 'iso8859_13': 'ISO8859-13', + 'iso8859_14': 'ISO8859-14', + 'iso8859_15': 'ISO8859-15', + 'iso8859_16': 'ISO8859-16', + 'iso8859_2': 'ISO8859-2', + 'iso8859_3': 'ISO8859-3', + 'iso8859_4': 'ISO8859-4', + 'iso8859_5': 'ISO8859-5', + 'iso8859_6': 'ISO8859-6', + 'iso8859_7': 'ISO8859-7', + 'iso8859_8': 'ISO8859-8', + 'iso8859_9': 'ISO8859-9', + 'iso2022_jp': 'JIS7', + 'shift_jis': 'SJIS', + 'tactis': 'TACTIS', + 'euc_jp': 'eucJP', + 'euc_kr': 'eucKR', + 'utf_8': 'UTF-8', + 'koi8_r': 'KOI8-R', + 'koi8_u': 'KOI8-U', + 'cp1251': 'CP1251', + 'cp1255': 'CP1255', + 'cp1256': 'CP1256', + + # XXX This list is still incomplete. If you know more + # mappings, please file a bug report. Thanks. +} + +for k, v in sorted(locale_encoding_alias.items()): + k = k.replace('_', '') + locale_encoding_alias.setdefault(k, v) + +# +# The locale_alias table maps lowercase alias names to C locale names +# (case-sensitive). Encodings are always separated from the locale +# name using a dot ('.'); they should only be given in case the +# language name is needed to interpret the given encoding alias +# correctly (CJK codes often have this need). +# +# Note that the normalize() function which uses this tables +# removes '_' and '-' characters from the encoding part of the +# locale name before doing the lookup. This saves a lot of +# space in the table. +# +# MAL 2004-12-10: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.4 +# and older): +# +# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' +# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' +# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' +# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' +# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' +# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' +# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# +# MAL 2008-05-30: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.5 +# and older): +# +# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' +# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' +# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# +# AP 2010-04-12: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.6.5 +# and older): +# +# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# +# SS 2013-12-20: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 3.3.3 +# and older): +# +# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' +# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# +# SS 2014-10-01: +# Updated alias mapping with glibc 2.19 supported locales. + +locale_alias = { + 'a3': 'az_AZ.KOI8-C', + 'a3_az': 'az_AZ.KOI8-C', + 'a3_az.koic': 'az_AZ.KOI8-C', + 'aa_dj': 'aa_DJ.ISO8859-1', + 'aa_er': 'aa_ER.UTF-8', + 'aa_et': 'aa_ET.UTF-8', + 'af': 'af_ZA.ISO8859-1', + 'af_za': 'af_ZA.ISO8859-1', + 'am': 'am_ET.UTF-8', + 'am_et': 'am_ET.UTF-8', + 'american': 'en_US.ISO8859-1', + 'an_es': 'an_ES.ISO8859-15', + 'ar': 'ar_AA.ISO8859-6', + 'ar_aa': 'ar_AA.ISO8859-6', + 'ar_ae': 'ar_AE.ISO8859-6', + 'ar_bh': 'ar_BH.ISO8859-6', + 'ar_dz': 'ar_DZ.ISO8859-6', + 'ar_eg': 'ar_EG.ISO8859-6', + 'ar_in': 'ar_IN.UTF-8', + 'ar_iq': 'ar_IQ.ISO8859-6', + 'ar_jo': 'ar_JO.ISO8859-6', + 'ar_kw': 'ar_KW.ISO8859-6', + 'ar_lb': 'ar_LB.ISO8859-6', + 'ar_ly': 'ar_LY.ISO8859-6', + 'ar_ma': 'ar_MA.ISO8859-6', + 'ar_om': 'ar_OM.ISO8859-6', + 'ar_qa': 'ar_QA.ISO8859-6', + 'ar_sa': 'ar_SA.ISO8859-6', + 'ar_sd': 'ar_SD.ISO8859-6', + 'ar_sy': 'ar_SY.ISO8859-6', + 'ar_tn': 'ar_TN.ISO8859-6', + 'ar_ye': 'ar_YE.ISO8859-6', + 'arabic': 'ar_AA.ISO8859-6', + 'as': 'as_IN.UTF-8', + 'as_in': 'as_IN.UTF-8', + 'ast_es': 'ast_ES.ISO8859-15', + 'ayc_pe': 'ayc_PE.UTF-8', + 'az': 'az_AZ.ISO8859-9E', + 'az_az': 'az_AZ.ISO8859-9E', + 'az_az.iso88599e': 'az_AZ.ISO8859-9E', + 'be': 'be_BY.CP1251', + 'be@latin': 'be_BY.UTF-8@latin', + 'be_bg.utf8': 'bg_BG.UTF-8', + 'be_by': 'be_BY.CP1251', + 'be_by@latin': 'be_BY.UTF-8@latin', + 'bem_zm': 'bem_ZM.UTF-8', + 'ber_dz': 'ber_DZ.UTF-8', + 'ber_ma': 'ber_MA.UTF-8', + 'bg': 'bg_BG.CP1251', + 'bg_bg': 'bg_BG.CP1251', + 'bho_in': 'bho_IN.UTF-8', + 'bn_bd': 'bn_BD.UTF-8', + 'bn_in': 'bn_IN.UTF-8', + 'bo_cn': 'bo_CN.UTF-8', + 'bo_in': 'bo_IN.UTF-8', + 'bokmal': 'nb_NO.ISO8859-1', + 'bokm\xe5l': 'nb_NO.ISO8859-1', + 'br': 'br_FR.ISO8859-1', + 'br_fr': 'br_FR.ISO8859-1', + 'brx_in': 'brx_IN.UTF-8', + 'bs': 'bs_BA.ISO8859-2', + 'bs_ba': 'bs_BA.ISO8859-2', + 'bulgarian': 'bg_BG.CP1251', + 'byn_er': 'byn_ER.UTF-8', + 'c': 'C', + 'c-french': 'fr_CA.ISO8859-1', + 'c.ascii': 'C', + 'c.en': 'C', + 'c.iso88591': 'en_US.ISO8859-1', + 'c.utf8': 'en_US.UTF-8', + 'c_c': 'C', + 'c_c.c': 'C', + 'ca': 'ca_ES.ISO8859-1', + 'ca_ad': 'ca_AD.ISO8859-1', + 'ca_es': 'ca_ES.ISO8859-1', + 'ca_es@valencia': 'ca_ES.ISO8859-15@valencia', + 'ca_fr': 'ca_FR.ISO8859-1', + 'ca_it': 'ca_IT.ISO8859-1', + 'catalan': 'ca_ES.ISO8859-1', + 'cextend': 'en_US.ISO8859-1', + 'chinese-s': 'zh_CN.eucCN', + 'chinese-t': 'zh_TW.eucTW', + 'crh_ua': 'crh_UA.UTF-8', + 'croatian': 'hr_HR.ISO8859-2', + 'cs': 'cs_CZ.ISO8859-2', + 'cs_cs': 'cs_CZ.ISO8859-2', + 'cs_cz': 'cs_CZ.ISO8859-2', + 'csb_pl': 'csb_PL.UTF-8', + 'cv_ru': 'cv_RU.UTF-8', + 'cy': 'cy_GB.ISO8859-1', + 'cy_gb': 'cy_GB.ISO8859-1', + 'cz': 'cs_CZ.ISO8859-2', + 'cz_cz': 'cs_CZ.ISO8859-2', + 'czech': 'cs_CZ.ISO8859-2', + 'da': 'da_DK.ISO8859-1', + 'da_dk': 'da_DK.ISO8859-1', + 'danish': 'da_DK.ISO8859-1', + 'dansk': 'da_DK.ISO8859-1', + 'de': 'de_DE.ISO8859-1', + 'de_at': 'de_AT.ISO8859-1', + 'de_be': 'de_BE.ISO8859-1', + 'de_ch': 'de_CH.ISO8859-1', + 'de_de': 'de_DE.ISO8859-1', + 'de_li.utf8': 'de_LI.UTF-8', + 'de_lu': 'de_LU.ISO8859-1', + 'deutsch': 'de_DE.ISO8859-1', + 'doi_in': 'doi_IN.UTF-8', + 'dutch': 'nl_NL.ISO8859-1', + 'dutch.iso88591': 'nl_BE.ISO8859-1', + 'dv_mv': 'dv_MV.UTF-8', + 'dz_bt': 'dz_BT.UTF-8', + 'ee': 'ee_EE.ISO8859-4', + 'ee_ee': 'ee_EE.ISO8859-4', + 'eesti': 'et_EE.ISO8859-1', + 'el': 'el_GR.ISO8859-7', + 'el_cy': 'el_CY.ISO8859-7', + 'el_gr': 'el_GR.ISO8859-7', + 'el_gr@euro': 'el_GR.ISO8859-15', + 'en': 'en_US.ISO8859-1', + 'en_ag': 'en_AG.UTF-8', + 'en_au': 'en_AU.ISO8859-1', + 'en_be': 'en_BE.ISO8859-1', + 'en_bw': 'en_BW.ISO8859-1', + 'en_ca': 'en_CA.ISO8859-1', + 'en_dk': 'en_DK.ISO8859-1', + 'en_dl.utf8': 'en_DL.UTF-8', + 'en_gb': 'en_GB.ISO8859-1', + 'en_hk': 'en_HK.ISO8859-1', + 'en_ie': 'en_IE.ISO8859-1', + 'en_in': 'en_IN.ISO8859-1', + 'en_ng': 'en_NG.UTF-8', + 'en_nz': 'en_NZ.ISO8859-1', + 'en_ph': 'en_PH.ISO8859-1', + 'en_sg': 'en_SG.ISO8859-1', + 'en_uk': 'en_GB.ISO8859-1', + 'en_us': 'en_US.ISO8859-1', + 'en_us@euro@euro': 'en_US.ISO8859-15', + 'en_za': 'en_ZA.ISO8859-1', + 'en_zm': 'en_ZM.UTF-8', + 'en_zw': 'en_ZW.ISO8859-1', + 'en_zw.utf8': 'en_ZS.UTF-8', + 'eng_gb': 'en_GB.ISO8859-1', + 'english': 'en_EN.ISO8859-1', + 'english_uk': 'en_GB.ISO8859-1', + 'english_united-states': 'en_US.ISO8859-1', + 'english_united-states.437': 'C', + 'english_us': 'en_US.ISO8859-1', + 'eo': 'eo_XX.ISO8859-3', + 'eo.utf8': 'eo.UTF-8', + 'eo_eo': 'eo_EO.ISO8859-3', + 'eo_us.utf8': 'eo_US.UTF-8', + 'eo_xx': 'eo_XX.ISO8859-3', + 'es': 'es_ES.ISO8859-1', + 'es_ar': 'es_AR.ISO8859-1', + 'es_bo': 'es_BO.ISO8859-1', + 'es_cl': 'es_CL.ISO8859-1', + 'es_co': 'es_CO.ISO8859-1', + 'es_cr': 'es_CR.ISO8859-1', + 'es_cu': 'es_CU.UTF-8', + 'es_do': 'es_DO.ISO8859-1', + 'es_ec': 'es_EC.ISO8859-1', + 'es_es': 'es_ES.ISO8859-1', + 'es_gt': 'es_GT.ISO8859-1', + 'es_hn': 'es_HN.ISO8859-1', + 'es_mx': 'es_MX.ISO8859-1', + 'es_ni': 'es_NI.ISO8859-1', + 'es_pa': 'es_PA.ISO8859-1', + 'es_pe': 'es_PE.ISO8859-1', + 'es_pr': 'es_PR.ISO8859-1', + 'es_py': 'es_PY.ISO8859-1', + 'es_sv': 'es_SV.ISO8859-1', + 'es_us': 'es_US.ISO8859-1', + 'es_uy': 'es_UY.ISO8859-1', + 'es_ve': 'es_VE.ISO8859-1', + 'estonian': 'et_EE.ISO8859-1', + 'et': 'et_EE.ISO8859-15', + 'et_ee': 'et_EE.ISO8859-15', + 'eu': 'eu_ES.ISO8859-1', + 'eu_es': 'eu_ES.ISO8859-1', + 'eu_fr': 'eu_FR.ISO8859-1', + 'fa': 'fa_IR.UTF-8', + 'fa_ir': 'fa_IR.UTF-8', + 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', + 'ff_sn': 'ff_SN.UTF-8', + 'fi': 'fi_FI.ISO8859-15', + 'fi_fi': 'fi_FI.ISO8859-15', + 'fil_ph': 'fil_PH.UTF-8', + 'finnish': 'fi_FI.ISO8859-1', + 'fo': 'fo_FO.ISO8859-1', + 'fo_fo': 'fo_FO.ISO8859-1', + 'fr': 'fr_FR.ISO8859-1', + 'fr_be': 'fr_BE.ISO8859-1', + 'fr_ca': 'fr_CA.ISO8859-1', + 'fr_ch': 'fr_CH.ISO8859-1', + 'fr_fr': 'fr_FR.ISO8859-1', + 'fr_lu': 'fr_LU.ISO8859-1', + 'fran\xe7ais': 'fr_FR.ISO8859-1', + 'fre_fr': 'fr_FR.ISO8859-1', + 'french': 'fr_FR.ISO8859-1', + 'french.iso88591': 'fr_CH.ISO8859-1', + 'french_france': 'fr_FR.ISO8859-1', + 'fur_it': 'fur_IT.UTF-8', + 'fy_de': 'fy_DE.UTF-8', + 'fy_nl': 'fy_NL.UTF-8', + 'ga': 'ga_IE.ISO8859-1', + 'ga_ie': 'ga_IE.ISO8859-1', + 'galego': 'gl_ES.ISO8859-1', + 'galician': 'gl_ES.ISO8859-1', + 'gd': 'gd_GB.ISO8859-1', + 'gd_gb': 'gd_GB.ISO8859-1', + 'ger_de': 'de_DE.ISO8859-1', + 'german': 'de_DE.ISO8859-1', + 'german.iso88591': 'de_CH.ISO8859-1', + 'german_germany': 'de_DE.ISO8859-1', + 'gez_er': 'gez_ER.UTF-8', + 'gez_et': 'gez_ET.UTF-8', + 'gl': 'gl_ES.ISO8859-1', + 'gl_es': 'gl_ES.ISO8859-1', + 'greek': 'el_GR.ISO8859-7', + 'gu_in': 'gu_IN.UTF-8', + 'gv': 'gv_GB.ISO8859-1', + 'gv_gb': 'gv_GB.ISO8859-1', + 'ha_ng': 'ha_NG.UTF-8', + 'he': 'he_IL.ISO8859-8', + 'he_il': 'he_IL.ISO8859-8', + 'hebrew': 'he_IL.ISO8859-8', + 'hi': 'hi_IN.ISCII-DEV', + 'hi_in': 'hi_IN.ISCII-DEV', + 'hi_in.isciidev': 'hi_IN.ISCII-DEV', + 'hne': 'hne_IN.UTF-8', + 'hne_in': 'hne_IN.UTF-8', + 'hr': 'hr_HR.ISO8859-2', + 'hr_hr': 'hr_HR.ISO8859-2', + 'hrvatski': 'hr_HR.ISO8859-2', + 'hsb_de': 'hsb_DE.ISO8859-2', + 'ht_ht': 'ht_HT.UTF-8', + 'hu': 'hu_HU.ISO8859-2', + 'hu_hu': 'hu_HU.ISO8859-2', + 'hungarian': 'hu_HU.ISO8859-2', + 'hy_am': 'hy_AM.UTF-8', + 'hy_am.armscii8': 'hy_AM.ARMSCII_8', + 'ia': 'ia.UTF-8', + 'ia_fr': 'ia_FR.UTF-8', + 'icelandic': 'is_IS.ISO8859-1', + 'id': 'id_ID.ISO8859-1', + 'id_id': 'id_ID.ISO8859-1', + 'ig_ng': 'ig_NG.UTF-8', + 'ik_ca': 'ik_CA.UTF-8', + 'in': 'id_ID.ISO8859-1', + 'in_id': 'id_ID.ISO8859-1', + 'is': 'is_IS.ISO8859-1', + 'is_is': 'is_IS.ISO8859-1', + 'iso-8859-1': 'en_US.ISO8859-1', + 'iso-8859-15': 'en_US.ISO8859-15', + 'iso8859-1': 'en_US.ISO8859-1', + 'iso8859-15': 'en_US.ISO8859-15', + 'iso_8859_1': 'en_US.ISO8859-1', + 'iso_8859_15': 'en_US.ISO8859-15', + 'it': 'it_IT.ISO8859-1', + 'it_ch': 'it_CH.ISO8859-1', + 'it_it': 'it_IT.ISO8859-1', + 'italian': 'it_IT.ISO8859-1', + 'iu': 'iu_CA.NUNACOM-8', + 'iu_ca': 'iu_CA.NUNACOM-8', + 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', + 'iw': 'he_IL.ISO8859-8', + 'iw_il': 'he_IL.ISO8859-8', + 'iw_il.utf8': 'iw_IL.UTF-8', + 'ja': 'ja_JP.eucJP', + 'ja_jp': 'ja_JP.eucJP', + 'ja_jp.euc': 'ja_JP.eucJP', + 'ja_jp.mscode': 'ja_JP.SJIS', + 'ja_jp.pck': 'ja_JP.SJIS', + 'japan': 'ja_JP.eucJP', + 'japanese': 'ja_JP.eucJP', + 'japanese-euc': 'ja_JP.eucJP', + 'japanese.euc': 'ja_JP.eucJP', + 'jp_jp': 'ja_JP.eucJP', + 'ka': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', + 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', + 'kk_kz': 'kk_KZ.RK1048', + 'kl': 'kl_GL.ISO8859-1', + 'kl_gl': 'kl_GL.ISO8859-1', + 'km_kh': 'km_KH.UTF-8', + 'kn': 'kn_IN.UTF-8', + 'kn_in': 'kn_IN.UTF-8', + 'ko': 'ko_KR.eucKR', + 'ko_kr': 'ko_KR.eucKR', + 'ko_kr.euc': 'ko_KR.eucKR', + 'kok_in': 'kok_IN.UTF-8', + 'korean': 'ko_KR.eucKR', + 'korean.euc': 'ko_KR.eucKR', + 'ks': 'ks_IN.UTF-8', + 'ks_in': 'ks_IN.UTF-8', + 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', + 'ku_tr': 'ku_TR.ISO8859-9', + 'kw': 'kw_GB.ISO8859-1', + 'kw_gb': 'kw_GB.ISO8859-1', + 'ky': 'ky_KG.UTF-8', + 'ky_kg': 'ky_KG.UTF-8', + 'lb_lu': 'lb_LU.UTF-8', + 'lg_ug': 'lg_UG.ISO8859-10', + 'li_be': 'li_BE.UTF-8', + 'li_nl': 'li_NL.UTF-8', + 'lij_it': 'lij_IT.UTF-8', + 'lithuanian': 'lt_LT.ISO8859-13', + 'lo': 'lo_LA.MULELAO-1', + 'lo_la': 'lo_LA.MULELAO-1', + 'lo_la.cp1133': 'lo_LA.IBM-CP1133', + 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', + 'lo_la.mulelao1': 'lo_LA.MULELAO-1', + 'lt': 'lt_LT.ISO8859-13', + 'lt_lt': 'lt_LT.ISO8859-13', + 'lv': 'lv_LV.ISO8859-13', + 'lv_lv': 'lv_LV.ISO8859-13', + 'mag_in': 'mag_IN.UTF-8', + 'mai': 'mai_IN.UTF-8', + 'mai_in': 'mai_IN.UTF-8', + 'mg_mg': 'mg_MG.ISO8859-15', + 'mhr_ru': 'mhr_RU.UTF-8', + 'mi': 'mi_NZ.ISO8859-1', + 'mi_nz': 'mi_NZ.ISO8859-1', + 'mk': 'mk_MK.ISO8859-5', + 'mk_mk': 'mk_MK.ISO8859-5', + 'ml': 'ml_IN.UTF-8', + 'ml_in': 'ml_IN.UTF-8', + 'mn_mn': 'mn_MN.UTF-8', + 'mni_in': 'mni_IN.UTF-8', + 'mr': 'mr_IN.UTF-8', + 'mr_in': 'mr_IN.UTF-8', + 'ms': 'ms_MY.ISO8859-1', + 'ms_my': 'ms_MY.ISO8859-1', + 'mt': 'mt_MT.ISO8859-3', + 'mt_mt': 'mt_MT.ISO8859-3', + 'my_mm': 'my_MM.UTF-8', + 'nan_tw@latin': 'nan_TW.UTF-8@latin', + 'nb': 'nb_NO.ISO8859-1', + 'nb_no': 'nb_NO.ISO8859-1', + 'nds_de': 'nds_DE.UTF-8', + 'nds_nl': 'nds_NL.UTF-8', + 'ne_np': 'ne_NP.UTF-8', + 'nhn_mx': 'nhn_MX.UTF-8', + 'niu_nu': 'niu_NU.UTF-8', + 'niu_nz': 'niu_NZ.UTF-8', + 'nl': 'nl_NL.ISO8859-1', + 'nl_aw': 'nl_AW.UTF-8', + 'nl_be': 'nl_BE.ISO8859-1', + 'nl_nl': 'nl_NL.ISO8859-1', + 'nn': 'nn_NO.ISO8859-1', + 'nn_no': 'nn_NO.ISO8859-1', + 'no': 'no_NO.ISO8859-1', + 'no@nynorsk': 'ny_NO.ISO8859-1', + 'no_no': 'no_NO.ISO8859-1', + 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', + 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', + 'norwegian': 'no_NO.ISO8859-1', + 'nr': 'nr_ZA.ISO8859-1', + 'nr_za': 'nr_ZA.ISO8859-1', + 'nso': 'nso_ZA.ISO8859-15', + 'nso_za': 'nso_ZA.ISO8859-15', + 'ny': 'ny_NO.ISO8859-1', + 'ny_no': 'ny_NO.ISO8859-1', + 'nynorsk': 'nn_NO.ISO8859-1', + 'oc': 'oc_FR.ISO8859-1', + 'oc_fr': 'oc_FR.ISO8859-1', + 'om_et': 'om_ET.UTF-8', + 'om_ke': 'om_KE.ISO8859-1', + 'or': 'or_IN.UTF-8', + 'or_in': 'or_IN.UTF-8', + 'os_ru': 'os_RU.UTF-8', + 'pa': 'pa_IN.UTF-8', + 'pa_in': 'pa_IN.UTF-8', + 'pa_pk': 'pa_PK.UTF-8', + 'pap_an': 'pap_AN.UTF-8', + 'pd': 'pd_US.ISO8859-1', + 'pd_de': 'pd_DE.ISO8859-1', + 'pd_us': 'pd_US.ISO8859-1', + 'ph': 'ph_PH.ISO8859-1', + 'ph_ph': 'ph_PH.ISO8859-1', + 'pl': 'pl_PL.ISO8859-2', + 'pl_pl': 'pl_PL.ISO8859-2', + 'polish': 'pl_PL.ISO8859-2', + 'portuguese': 'pt_PT.ISO8859-1', + 'portuguese_brazil': 'pt_BR.ISO8859-1', + 'posix': 'C', + 'posix-utf2': 'C', + 'pp': 'pp_AN.ISO8859-1', + 'pp_an': 'pp_AN.ISO8859-1', + 'ps_af': 'ps_AF.UTF-8', + 'pt': 'pt_PT.ISO8859-1', + 'pt_br': 'pt_BR.ISO8859-1', + 'pt_pt': 'pt_PT.ISO8859-1', + 'ro': 'ro_RO.ISO8859-2', + 'ro_ro': 'ro_RO.ISO8859-2', + 'romanian': 'ro_RO.ISO8859-2', + 'ru': 'ru_RU.UTF-8', + 'ru_ru': 'ru_RU.UTF-8', + 'ru_ua': 'ru_UA.KOI8-U', + 'rumanian': 'ro_RO.ISO8859-2', + 'russian': 'ru_RU.ISO8859-5', + 'rw': 'rw_RW.ISO8859-1', + 'rw_rw': 'rw_RW.ISO8859-1', + 'sa_in': 'sa_IN.UTF-8', + 'sat_in': 'sat_IN.UTF-8', + 'sc_it': 'sc_IT.UTF-8', + 'sd': 'sd_IN.UTF-8', + 'sd_in': 'sd_IN.UTF-8', + 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', + 'sd_pk': 'sd_PK.UTF-8', + 'se_no': 'se_NO.UTF-8', + 'serbocroatian': 'sr_RS.UTF-8@latin', + 'sh': 'sr_RS.UTF-8@latin', + 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', + 'sh_hr': 'sh_HR.ISO8859-2', + 'sh_hr.iso88592': 'hr_HR.ISO8859-2', + 'sh_sp': 'sr_CS.ISO8859-2', + 'sh_yu': 'sr_RS.UTF-8@latin', + 'shs_ca': 'shs_CA.UTF-8', + 'si': 'si_LK.UTF-8', + 'si_lk': 'si_LK.UTF-8', + 'sid_et': 'sid_ET.UTF-8', + 'sinhala': 'si_LK.UTF-8', + 'sk': 'sk_SK.ISO8859-2', + 'sk_sk': 'sk_SK.ISO8859-2', + 'sl': 'sl_SI.ISO8859-2', + 'sl_cs': 'sl_CS.ISO8859-2', + 'sl_si': 'sl_SI.ISO8859-2', + 'slovak': 'sk_SK.ISO8859-2', + 'slovene': 'sl_SI.ISO8859-2', + 'slovenian': 'sl_SI.ISO8859-2', + 'so_dj': 'so_DJ.ISO8859-1', + 'so_et': 'so_ET.UTF-8', + 'so_ke': 'so_KE.ISO8859-1', + 'so_so': 'so_SO.ISO8859-1', + 'sp': 'sr_CS.ISO8859-5', + 'sp_yu': 'sr_CS.ISO8859-5', + 'spanish': 'es_ES.ISO8859-1', + 'spanish_spain': 'es_ES.ISO8859-1', + 'sq': 'sq_AL.ISO8859-2', + 'sq_al': 'sq_AL.ISO8859-2', + 'sq_mk': 'sq_MK.UTF-8', + 'sr': 'sr_RS.UTF-8', + 'sr@cyrillic': 'sr_RS.UTF-8', + 'sr@latn': 'sr_CS.UTF-8@latin', + 'sr_cs': 'sr_CS.UTF-8', + 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', + 'sr_cs@latn': 'sr_CS.UTF-8@latin', + 'sr_me': 'sr_ME.UTF-8', + 'sr_rs': 'sr_RS.UTF-8', + 'sr_rs@latn': 'sr_RS.UTF-8@latin', + 'sr_sp': 'sr_CS.ISO8859-2', + 'sr_yu': 'sr_RS.UTF-8@latin', + 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.iso88592': 'sr_CS.ISO8859-2', + 'sr_yu.iso88595': 'sr_CS.ISO8859-5', + 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', + 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.utf8': 'sr_RS.UTF-8', + 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', + 'sr_yu@cyrillic': 'sr_RS.UTF-8', + 'ss': 'ss_ZA.ISO8859-1', + 'ss_za': 'ss_ZA.ISO8859-1', + 'st': 'st_ZA.ISO8859-1', + 'st_za': 'st_ZA.ISO8859-1', + 'sv': 'sv_SE.ISO8859-1', + 'sv_fi': 'sv_FI.ISO8859-1', + 'sv_se': 'sv_SE.ISO8859-1', + 'sw_ke': 'sw_KE.UTF-8', + 'sw_tz': 'sw_TZ.UTF-8', + 'swedish': 'sv_SE.ISO8859-1', + 'szl_pl': 'szl_PL.UTF-8', + 'ta': 'ta_IN.TSCII-0', + 'ta_in': 'ta_IN.TSCII-0', + 'ta_in.tscii': 'ta_IN.TSCII-0', + 'ta_in.tscii0': 'ta_IN.TSCII-0', + 'ta_lk': 'ta_LK.UTF-8', + 'te': 'te_IN.UTF-8', + 'te_in': 'te_IN.UTF-8', + 'tg': 'tg_TJ.KOI8-C', + 'tg_tj': 'tg_TJ.KOI8-C', + 'th': 'th_TH.ISO8859-11', + 'th_th': 'th_TH.ISO8859-11', + 'th_th.tactis': 'th_TH.TIS620', + 'th_th.tis620': 'th_TH.TIS620', + 'thai': 'th_TH.ISO8859-11', + 'ti_er': 'ti_ER.UTF-8', + 'ti_et': 'ti_ET.UTF-8', + 'tig_er': 'tig_ER.UTF-8', + 'tk_tm': 'tk_TM.UTF-8', + 'tl': 'tl_PH.ISO8859-1', + 'tl_ph': 'tl_PH.ISO8859-1', + 'tn': 'tn_ZA.ISO8859-15', + 'tn_za': 'tn_ZA.ISO8859-15', + 'tr': 'tr_TR.ISO8859-9', + 'tr_cy': 'tr_CY.ISO8859-9', + 'tr_tr': 'tr_TR.ISO8859-9', + 'ts': 'ts_ZA.ISO8859-1', + 'ts_za': 'ts_ZA.ISO8859-1', + 'tt': 'tt_RU.TATAR-CYR', + 'tt_ru': 'tt_RU.TATAR-CYR', + 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', + 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', + 'turkish': 'tr_TR.ISO8859-9', + 'ug_cn': 'ug_CN.UTF-8', + 'uk': 'uk_UA.KOI8-U', + 'uk_ua': 'uk_UA.KOI8-U', + 'univ': 'en_US.utf', + 'universal': 'en_US.utf', + 'universal.utf8@ucs4': 'en_US.UTF-8', + 'unm_us': 'unm_US.UTF-8', + 'ur': 'ur_PK.CP1256', + 'ur_in': 'ur_IN.UTF-8', + 'ur_pk': 'ur_PK.CP1256', + 'uz': 'uz_UZ.UTF-8', + 'uz_uz': 'uz_UZ.UTF-8', + 'uz_uz@cyrillic': 'uz_UZ.UTF-8', + 've': 've_ZA.UTF-8', + 've_za': 've_ZA.UTF-8', + 'vi': 'vi_VN.TCVN', + 'vi_vn': 'vi_VN.TCVN', + 'vi_vn.tcvn': 'vi_VN.TCVN', + 'vi_vn.tcvn5712': 'vi_VN.TCVN', + 'vi_vn.viscii': 'vi_VN.VISCII', + 'vi_vn.viscii111': 'vi_VN.VISCII', + 'wa': 'wa_BE.ISO8859-1', + 'wa_be': 'wa_BE.ISO8859-1', + 'wae_ch': 'wae_CH.UTF-8', + 'wal_et': 'wal_ET.UTF-8', + 'wo_sn': 'wo_SN.UTF-8', + 'xh': 'xh_ZA.ISO8859-1', + 'xh_za': 'xh_ZA.ISO8859-1', + 'yi': 'yi_US.CP1255', + 'yi_us': 'yi_US.CP1255', + 'yo_ng': 'yo_NG.UTF-8', + 'yue_hk': 'yue_HK.UTF-8', + 'zh': 'zh_CN.eucCN', + 'zh_cn': 'zh_CN.gb2312', + 'zh_cn.big5': 'zh_TW.big5', + 'zh_cn.euc': 'zh_CN.eucCN', + 'zh_hk': 'zh_HK.big5hkscs', + 'zh_hk.big5hk': 'zh_HK.big5hkscs', + 'zh_sg': 'zh_SG.GB2312', + 'zh_sg.gbk': 'zh_SG.GBK', + 'zh_tw': 'zh_TW.big5', + 'zh_tw.euc': 'zh_TW.eucTW', + 'zh_tw.euctw': 'zh_TW.eucTW', + 'zu': 'zu_ZA.ISO8859-1', + 'zu_za': 'zu_ZA.ISO8859-1', +} + +# +# This maps Windows language identifiers to locale strings. +# +# This list has been updated from +# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp +# to include every locale up to Windows Vista. +# +# NOTE: this mapping is incomplete. If your language is missing, please +# submit a bug report to the Python bug tracker at http://bugs.python.org/ +# Make sure you include the missing language identifier and the suggested +# locale code. +# + +windows_locale = { + 0x0436: "af_ZA", # Afrikaans + 0x041c: "sq_AL", # Albanian + 0x0484: "gsw_FR",# Alsatian - France + 0x045e: "am_ET", # Amharic - Ethiopia + 0x0401: "ar_SA", # Arabic - Saudi Arabia + 0x0801: "ar_IQ", # Arabic - Iraq + 0x0c01: "ar_EG", # Arabic - Egypt + 0x1001: "ar_LY", # Arabic - Libya + 0x1401: "ar_DZ", # Arabic - Algeria + 0x1801: "ar_MA", # Arabic - Morocco + 0x1c01: "ar_TN", # Arabic - Tunisia + 0x2001: "ar_OM", # Arabic - Oman + 0x2401: "ar_YE", # Arabic - Yemen + 0x2801: "ar_SY", # Arabic - Syria + 0x2c01: "ar_JO", # Arabic - Jordan + 0x3001: "ar_LB", # Arabic - Lebanon + 0x3401: "ar_KW", # Arabic - Kuwait + 0x3801: "ar_AE", # Arabic - United Arab Emirates + 0x3c01: "ar_BH", # Arabic - Bahrain + 0x4001: "ar_QA", # Arabic - Qatar + 0x042b: "hy_AM", # Armenian + 0x044d: "as_IN", # Assamese - India + 0x042c: "az_AZ", # Azeri - Latin + 0x082c: "az_AZ", # Azeri - Cyrillic + 0x046d: "ba_RU", # Bashkir + 0x042d: "eu_ES", # Basque - Russia + 0x0423: "be_BY", # Belarusian + 0x0445: "bn_IN", # Begali + 0x201a: "bs_BA", # Bosnian - Cyrillic + 0x141a: "bs_BA", # Bosnian - Latin + 0x047e: "br_FR", # Breton - France + 0x0402: "bg_BG", # Bulgarian +# 0x0455: "my_MM", # Burmese - Not supported + 0x0403: "ca_ES", # Catalan + 0x0004: "zh_CHS",# Chinese - Simplified + 0x0404: "zh_TW", # Chinese - Taiwan + 0x0804: "zh_CN", # Chinese - PRC + 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. + 0x1004: "zh_SG", # Chinese - Singapore + 0x1404: "zh_MO", # Chinese - Macao S.A.R. + 0x7c04: "zh_CHT",# Chinese - Traditional + 0x0483: "co_FR", # Corsican - France + 0x041a: "hr_HR", # Croatian + 0x101a: "hr_BA", # Croatian - Bosnia + 0x0405: "cs_CZ", # Czech + 0x0406: "da_DK", # Danish + 0x048c: "gbz_AF",# Dari - Afghanistan + 0x0465: "div_MV",# Divehi - Maldives + 0x0413: "nl_NL", # Dutch - The Netherlands + 0x0813: "nl_BE", # Dutch - Belgium + 0x0409: "en_US", # English - United States + 0x0809: "en_GB", # English - United Kingdom + 0x0c09: "en_AU", # English - Australia + 0x1009: "en_CA", # English - Canada + 0x1409: "en_NZ", # English - New Zealand + 0x1809: "en_IE", # English - Ireland + 0x1c09: "en_ZA", # English - South Africa + 0x2009: "en_JA", # English - Jamaica + 0x2409: "en_CB", # English - Carribbean + 0x2809: "en_BZ", # English - Belize + 0x2c09: "en_TT", # English - Trinidad + 0x3009: "en_ZW", # English - Zimbabwe + 0x3409: "en_PH", # English - Philippines + 0x4009: "en_IN", # English - India + 0x4409: "en_MY", # English - Malaysia + 0x4809: "en_IN", # English - Singapore + 0x0425: "et_EE", # Estonian + 0x0438: "fo_FO", # Faroese + 0x0464: "fil_PH",# Filipino + 0x040b: "fi_FI", # Finnish + 0x040c: "fr_FR", # French - France + 0x080c: "fr_BE", # French - Belgium + 0x0c0c: "fr_CA", # French - Canada + 0x100c: "fr_CH", # French - Switzerland + 0x140c: "fr_LU", # French - Luxembourg + 0x180c: "fr_MC", # French - Monaco + 0x0462: "fy_NL", # Frisian - Netherlands + 0x0456: "gl_ES", # Galician + 0x0437: "ka_GE", # Georgian + 0x0407: "de_DE", # German - Germany + 0x0807: "de_CH", # German - Switzerland + 0x0c07: "de_AT", # German - Austria + 0x1007: "de_LU", # German - Luxembourg + 0x1407: "de_LI", # German - Liechtenstein + 0x0408: "el_GR", # Greek + 0x046f: "kl_GL", # Greenlandic - Greenland + 0x0447: "gu_IN", # Gujarati + 0x0468: "ha_NG", # Hausa - Latin + 0x040d: "he_IL", # Hebrew + 0x0439: "hi_IN", # Hindi + 0x040e: "hu_HU", # Hungarian + 0x040f: "is_IS", # Icelandic + 0x0421: "id_ID", # Indonesian + 0x045d: "iu_CA", # Inuktitut - Syllabics + 0x085d: "iu_CA", # Inuktitut - Latin + 0x083c: "ga_IE", # Irish - Ireland + 0x0410: "it_IT", # Italian - Italy + 0x0810: "it_CH", # Italian - Switzerland + 0x0411: "ja_JP", # Japanese + 0x044b: "kn_IN", # Kannada - India + 0x043f: "kk_KZ", # Kazakh + 0x0453: "kh_KH", # Khmer - Cambodia + 0x0486: "qut_GT",# K'iche - Guatemala + 0x0487: "rw_RW", # Kinyarwanda - Rwanda + 0x0457: "kok_IN",# Konkani + 0x0412: "ko_KR", # Korean + 0x0440: "ky_KG", # Kyrgyz + 0x0454: "lo_LA", # Lao - Lao PDR + 0x0426: "lv_LV", # Latvian + 0x0427: "lt_LT", # Lithuanian + 0x082e: "dsb_DE",# Lower Sorbian - Germany + 0x046e: "lb_LU", # Luxembourgish + 0x042f: "mk_MK", # FYROM Macedonian + 0x043e: "ms_MY", # Malay - Malaysia + 0x083e: "ms_BN", # Malay - Brunei Darussalam + 0x044c: "ml_IN", # Malayalam - India + 0x043a: "mt_MT", # Maltese + 0x0481: "mi_NZ", # Maori + 0x047a: "arn_CL",# Mapudungun + 0x044e: "mr_IN", # Marathi + 0x047c: "moh_CA",# Mohawk - Canada + 0x0450: "mn_MN", # Mongolian - Cyrillic + 0x0850: "mn_CN", # Mongolian - PRC + 0x0461: "ne_NP", # Nepali + 0x0414: "nb_NO", # Norwegian - Bokmal + 0x0814: "nn_NO", # Norwegian - Nynorsk + 0x0482: "oc_FR", # Occitan - France + 0x0448: "or_IN", # Oriya - India + 0x0463: "ps_AF", # Pashto - Afghanistan + 0x0429: "fa_IR", # Persian + 0x0415: "pl_PL", # Polish + 0x0416: "pt_BR", # Portuguese - Brazil + 0x0816: "pt_PT", # Portuguese - Portugal + 0x0446: "pa_IN", # Punjabi + 0x046b: "quz_BO",# Quechua (Bolivia) + 0x086b: "quz_EC",# Quechua (Ecuador) + 0x0c6b: "quz_PE",# Quechua (Peru) + 0x0418: "ro_RO", # Romanian - Romania + 0x0417: "rm_CH", # Romansh + 0x0419: "ru_RU", # Russian + 0x243b: "smn_FI",# Sami Finland + 0x103b: "smj_NO",# Sami Norway + 0x143b: "smj_SE",# Sami Sweden + 0x043b: "se_NO", # Sami Northern Norway + 0x083b: "se_SE", # Sami Northern Sweden + 0x0c3b: "se_FI", # Sami Northern Finland + 0x203b: "sms_FI",# Sami Skolt + 0x183b: "sma_NO",# Sami Southern Norway + 0x1c3b: "sma_SE",# Sami Southern Sweden + 0x044f: "sa_IN", # Sanskrit + 0x0c1a: "sr_SP", # Serbian - Cyrillic + 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic + 0x081a: "sr_SP", # Serbian - Latin + 0x181a: "sr_BA", # Serbian - Bosnia Latin + 0x045b: "si_LK", # Sinhala - Sri Lanka + 0x046c: "ns_ZA", # Northern Sotho + 0x0432: "tn_ZA", # Setswana - Southern Africa + 0x041b: "sk_SK", # Slovak + 0x0424: "sl_SI", # Slovenian + 0x040a: "es_ES", # Spanish - Spain + 0x080a: "es_MX", # Spanish - Mexico + 0x0c0a: "es_ES", # Spanish - Spain (Modern) + 0x100a: "es_GT", # Spanish - Guatemala + 0x140a: "es_CR", # Spanish - Costa Rica + 0x180a: "es_PA", # Spanish - Panama + 0x1c0a: "es_DO", # Spanish - Dominican Republic + 0x200a: "es_VE", # Spanish - Venezuela + 0x240a: "es_CO", # Spanish - Colombia + 0x280a: "es_PE", # Spanish - Peru + 0x2c0a: "es_AR", # Spanish - Argentina + 0x300a: "es_EC", # Spanish - Ecuador + 0x340a: "es_CL", # Spanish - Chile + 0x380a: "es_UR", # Spanish - Uruguay + 0x3c0a: "es_PY", # Spanish - Paraguay + 0x400a: "es_BO", # Spanish - Bolivia + 0x440a: "es_SV", # Spanish - El Salvador + 0x480a: "es_HN", # Spanish - Honduras + 0x4c0a: "es_NI", # Spanish - Nicaragua + 0x500a: "es_PR", # Spanish - Puerto Rico + 0x540a: "es_US", # Spanish - United States +# 0x0430: "", # Sutu - Not supported + 0x0441: "sw_KE", # Swahili + 0x041d: "sv_SE", # Swedish - Sweden + 0x081d: "sv_FI", # Swedish - Finland + 0x045a: "syr_SY",# Syriac + 0x0428: "tg_TJ", # Tajik - Cyrillic + 0x085f: "tmz_DZ",# Tamazight - Latin + 0x0449: "ta_IN", # Tamil + 0x0444: "tt_RU", # Tatar + 0x044a: "te_IN", # Telugu + 0x041e: "th_TH", # Thai + 0x0851: "bo_BT", # Tibetan - Bhutan + 0x0451: "bo_CN", # Tibetan - PRC + 0x041f: "tr_TR", # Turkish + 0x0442: "tk_TM", # Turkmen - Cyrillic + 0x0480: "ug_CN", # Uighur - Arabic + 0x0422: "uk_UA", # Ukrainian + 0x042e: "wen_DE",# Upper Sorbian - Germany + 0x0420: "ur_PK", # Urdu + 0x0820: "ur_IN", # Urdu - India + 0x0443: "uz_UZ", # Uzbek - Latin + 0x0843: "uz_UZ", # Uzbek - Cyrillic + 0x042a: "vi_VN", # Vietnamese + 0x0452: "cy_GB", # Welsh + 0x0488: "wo_SN", # Wolof - Senegal + 0x0434: "xh_ZA", # Xhosa - South Africa + 0x0485: "sah_RU",# Yakut - Cyrillic + 0x0478: "ii_CN", # Yi - PRC + 0x046a: "yo_NG", # Yoruba - Nigeria + 0x0435: "zu_ZA", # Zulu +} + +def _print_locale(): + + """ Test function. + """ + categories = {} + def _init_categories(categories=categories): + for k,v in globals().items(): + if k[:3] == 'LC_': + categories[k] = v + _init_categories() + del categories['LC_ALL'] + + print('Locale defaults as determined by getdefaultlocale():') + print('-'*72) + lang, enc = getdefaultlocale() + print('Language: ', lang or '(undefined)') + print('Encoding: ', enc or '(undefined)') + print() + + print('Locale settings on startup:') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + print() + print('Locale settings after calling resetlocale():') + print('-'*72) + resetlocale() + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + try: + setlocale(LC_ALL, "") + except: + print('NOTE:') + print('setlocale(LC_ALL, "") does not support the default locale') + print('given in the OS environment variables.') + else: + print() + print('Locale settings after calling setlocale(LC_ALL, ""):') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + +### + +try: + LC_MESSAGES +except NameError: + pass +else: + __all__.append("LC_MESSAGES") + +if __name__=='__main__': + print('Locale aliasing:') + print() + _print_locale() + print() + print('Number formatting:') + print() + _test() diff --git a/v1/flask/lib/python3.4/ntpath.py b/v1/flask/lib/python3.4/ntpath.py deleted file mode 120000 index 56bd9ab..0000000 --- a/v1/flask/lib/python3.4/ntpath.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/ntpath.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/ntpath.py b/v1/flask/lib/python3.4/ntpath.py new file mode 100644 index 0000000..992970a --- /dev/null +++ b/v1/flask/lib/python3.4/ntpath.py @@ -0,0 +1,625 @@ +# Module 'ntpath' -- common operations on WinNT/Win95 pathnames +"""Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +""" + +import os +import sys +import stat +import genericpath +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime", "islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "splitunc","curdir","pardir","sep","pathsep","defpath","altsep", + "extsep","devnull","realpath","supports_unicode_filenames","relpath", + "samefile", "sameopenfile", "samestat",] + +# strings representing various path-related bits and pieces +# These are primarily for export; internally, they are hardcoded. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '\\' +pathsep = ';' +altsep = '/' +defpath = '.;C:\\bin' +if 'ce' in sys.builtin_module_names: + defpath = '\\Windows' +devnull = 'nul' + +def _get_empty(path): + if isinstance(path, bytes): + return b'' + else: + return '' + +def _get_sep(path): + if isinstance(path, bytes): + return b'\\' + else: + return '\\' + +def _get_altsep(path): + if isinstance(path, bytes): + return b'/' + else: + return '/' + +def _get_bothseps(path): + if isinstance(path, bytes): + return b'\\/' + else: + return '\\/' + +def _get_dot(path): + if isinstance(path, bytes): + return b'.' + else: + return '.' + +def _get_colon(path): + if isinstance(path, bytes): + return b':' + else: + return ':' + +def _get_special(path): + if isinstance(path, bytes): + return (b'\\\\.\\', b'\\\\?\\') + else: + return ('\\\\.\\', '\\\\?\\') + +# Normalize the case of a pathname and map slashes to backslashes. +# Other normalizations (such as optimizing '../' away) are not done +# (this is done by normpath). + +def normcase(s): + """Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.""" + if not isinstance(s, (bytes, str)): + raise TypeError("normcase() argument must be str or bytes, " + "not '{}'".format(s.__class__.__name__)) + return s.replace(_get_altsep(s), _get_sep(s)).lower() + + +# Return whether a path is absolute. +# Trivial in Posix, harder on Windows. +# For Windows it is absolute if it starts with a slash or backslash (current +# volume), or if a pathname after the volume-letter-and-colon or UNC-resource +# starts with a slash or backslash. + +def isabs(s): + """Test whether a path is absolute""" + s = splitdrive(s)[1] + return len(s) > 0 and s[:1] in _get_bothseps(s) + + +# Join two (or more) paths. +def join(path, *paths): + sep = _get_sep(path) + seps = _get_bothseps(path) + colon = _get_colon(path) + result_drive, result_path = splitdrive(path) + for p in paths: + p_drive, p_path = splitdrive(p) + if p_path and p_path[0] in seps: + # Second path is absolute + if p_drive or not result_drive: + result_drive = p_drive + result_path = p_path + continue + elif p_drive and p_drive != result_drive: + if p_drive.lower() != result_drive.lower(): + # Different drives => ignore the first path entirely + result_drive = p_drive + result_path = p_path + continue + # Same drive in different case + result_drive = p_drive + # Second path is relative to the first + if result_path and result_path[-1] not in seps: + result_path = result_path + sep + result_path = result_path + p_path + ## add separator between UNC and non-absolute path + if (result_path and result_path[0] not in seps and + result_drive and result_drive[-1:] != colon): + return result_drive + sep + result_path + return result_drive + result_path + + +# Split a path in a drive specification (a drive letter followed by a +# colon) and the path specification. +# It is always true that drivespec + pathspec == p +def splitdrive(p): + """Split a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + """ + empty = _get_empty(p) + if len(p) > 1: + sep = _get_sep(p) + normp = p.replace(_get_altsep(p), sep) + if (normp[0:2] == sep*2) and (normp[2:3] != sep): + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path + # \\machine\mountpoint\directory\etc\... + # directory ^^^^^^^^^^^^^^^ + index = normp.find(sep, 2) + if index == -1: + return empty, p + index2 = normp.find(sep, index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 == index + 1: + return empty, p + if index2 == -1: + index2 = len(p) + return p[:index2], p[index2:] + if normp[1:2] == _get_colon(p): + return p[:2], p[2:] + return empty, p + + +# Parse UNC paths +def splitunc(p): + """Deprecated since Python 3.1. Please use splitdrive() instead; + it now handles UNC paths. + + Split a pathname into UNC mount point and relative path specifiers. + + Return a 2-tuple (unc, rest); either part may be empty. + If unc is not empty, it has the form '//host/mount' (or similar + using backslashes). unc+rest is always the input path. + Paths containing drive letters never have an UNC part. + """ + import warnings + warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead", + DeprecationWarning, 2) + drive, path = splitdrive(p) + if len(drive) == 2: + # Drive letter present + return p[:0], p + return drive, path + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). After the trailing '/' is stripped, the invariant +# join(head, tail) == p holds. +# The resulting head won't end in '/' unless it is the root. + +def split(p): + """Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.""" + + seps = _get_bothseps(p) + d, p = splitdrive(p) + # set i to index beyond p's last slash + i = len(p) + while i and p[i-1] not in seps: + i -= 1 + head, tail = p[:i], p[i:] # now tail has no slashes + # remove trailing slashes from head, unless it's all slashes + head2 = head + while head2 and head2[-1:] in seps: + head2 = head2[:-1] + head = head2 or head + return d + head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + return genericpath._splitext(p, _get_sep(p), _get_altsep(p), + _get_dot(p)) +splitext.__doc__ = genericpath._splitext.__doc__ + + +# Return the tail (basename) part of a path. + +def basename(p): + """Returns the final component of a pathname""" + return split(p)[1] + + +# Return the head (dirname) part of a path. + +def dirname(p): + """Returns the directory component of a pathname""" + return split(p)[0] + +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link. + This will always return false for Windows prior to 6.0. + """ + try: + st = os.lstat(path) + except (OSError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + +# Being true for dangling symbolic links is also useful. + +def lexists(path): + """Test whether a path exists. Returns True for broken symbolic links""" + try: + st = os.lstat(path) + except OSError: + return False + return True + +# Is a path a mount point? +# Any drive letter root (eg c:\) +# Any share UNC (eg \\server\share) +# Any volume mounted on a filesystem folder +# +# No one method detects all three situations. Historically we've lexically +# detected drive letter roots and share UNCs. The canonical approach to +# detecting mounted volumes (querying the reparse tag) fails for the most +# common case: drive letter roots. The alternative which uses GetVolumePathName +# fails if the drive letter is the result of a SUBST. +try: + from nt import _getvolumepathname +except ImportError: + _getvolumepathname = None +def ismount(path): + """Test whether a path is a mount point (a drive root, the root of a + share, or a mounted volume)""" + seps = _get_bothseps(path) + path = abspath(path) + root, rest = splitdrive(path) + if root and root[0] in seps: + return (not rest) or (rest in seps) + if rest in seps: + return True + + if _getvolumepathname: + return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps) + else: + return False + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.""" + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + i, n = 1, len(path) + while i < n and path[i] not in _get_bothseps(path): + i += 1 + + if 'HOME' in os.environ: + userhome = os.environ['HOME'] + elif 'USERPROFILE' in os.environ: + userhome = os.environ['USERPROFILE'] + elif not 'HOMEPATH' in os.environ: + return path + else: + try: + drive = os.environ['HOMEDRIVE'] + except KeyError: + drive = '' + userhome = join(drive, os.environ['HOMEPATH']) + + if isinstance(path, bytes): + userhome = userhome.encode(sys.getfilesystemencoding()) + + if i != 1: #~user + userhome = join(dirname(userhome), path[1:i]) + + return userhome + path[i:] + + +# Expand paths containing shell variable substitutions. +# The following rules apply: +# - no expansion within single quotes +# - '$$' is translated into '$' +# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% +# - ${varname} is accepted. +# - $varname is accepted. +# - %varname% is accepted. +# - varnames can be made out of letters, digits and the characters '_-' +# (though is not verified in the ${varname} and %varname% cases) +# XXX With COMMAND.COM you can use any characters in a variable name, +# XXX except '^|<>='. + +def expandvars(path): + """Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.""" + if isinstance(path, bytes): + if ord('$') not in path and ord('%') not in path: + return path + import string + varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii') + quote = b'\'' + percent = b'%' + brace = b'{' + dollar = b'$' + environ = getattr(os, 'environb', None) + else: + if '$' not in path and '%' not in path: + return path + import string + varchars = string.ascii_letters + string.digits + '_-' + quote = '\'' + percent = '%' + brace = '{' + dollar = '$' + environ = os.environ + res = path[:0] + index = 0 + pathlen = len(path) + while index < pathlen: + c = path[index:index+1] + if c == quote: # no expansion within single quotes + path = path[index + 1:] + pathlen = len(path) + try: + index = path.index(c) + res += c + path[:index + 1] + except ValueError: + res += c + path + index = pathlen - 1 + elif c == percent: # variable or '%' + if path[index + 1:index + 2] == percent: + res += c + index += 1 + else: + path = path[index+1:] + pathlen = len(path) + try: + index = path.index(percent) + except ValueError: + res += percent + path + index = pathlen - 1 + else: + var = path[:index] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = percent + var + percent + res += value + elif c == dollar: # variable or '$$' + if path[index + 1:index + 2] == dollar: + res += c + index += 1 + elif path[index + 1:index + 2] == brace: + path = path[index+2:] + pathlen = len(path) + try: + if isinstance(path, bytes): + index = path.index(b'}') + else: + index = path.index('}') + except ValueError: + if isinstance(path, bytes): + res += b'${' + path + else: + res += '${' + path + index = pathlen - 1 + else: + var = path[:index] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + if isinstance(path, bytes): + value = b'${' + var + b'}' + else: + value = '${' + var + '}' + res += value + else: + var = path[:0] + index += 1 + c = path[index:index + 1] + while c and c in varchars: + var += c + index += 1 + c = path[index:index + 1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(var)]) + else: + value = environ[var] + except KeyError: + value = dollar + var + res += value + if c: + index -= 1 + else: + res += c + index += 1 + return res + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. +# Previously, this function also truncated pathnames to 8+3 format, +# but as this module is called "ntpath", that's obviously wrong! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + sep = _get_sep(path) + dotdot = _get_dot(path) * 2 + special_prefixes = _get_special(path) + if path.startswith(special_prefixes): + # in the case of paths with these prefixes: + # \\.\ -> device names + # \\?\ -> literal paths + # do not do any normalization, but return the path unchanged + return path + path = path.replace(_get_altsep(path), sep) + prefix, path = splitdrive(path) + + # collapse initial backslashes + if path.startswith(sep): + prefix += sep + path = path.lstrip(sep) + + comps = path.split(sep) + i = 0 + while i < len(comps): + if not comps[i] or comps[i] == _get_dot(path): + del comps[i] + elif comps[i] == dotdot: + if i > 0 and comps[i-1] != dotdot: + del comps[i-1:i+1] + i -= 1 + elif i == 0 and prefix.endswith(_get_sep(path)): + del comps[i] + else: + i += 1 + else: + i += 1 + # If the path is now empty, substitute '.' + if not prefix and not comps: + comps.append(_get_dot(path)) + return prefix + sep.join(comps) + + +# Return an absolute path. +try: + from nt import _getfullpathname + +except ImportError: # not running on Windows - mock up something sensible + def abspath(path): + """Return the absolute version of a path.""" + if not isabs(path): + if isinstance(path, bytes): + cwd = os.getcwdb() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + +else: # use native Windows method on Windows + def abspath(path): + """Return the absolute version of a path.""" + + if path: # Empty path must return current working directory. + try: + path = _getfullpathname(path) + except OSError: + pass # Bad path - return unchanged. + elif isinstance(path, bytes): + path = os.getcwdb() + else: + path = os.getcwd() + return normpath(path) + +# realpath is a no-op on systems without islink support +realpath = abspath +# Win9x family and earlier have no Unicode filename support. +supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and + sys.getwindowsversion()[3] >= 2) + +def relpath(path, start=curdir): + """Return a relative version of a path""" + sep = _get_sep(path) + + if start is curdir: + start = _get_dot(path) + + if not path: + raise ValueError("no path specified") + + start_abs = abspath(normpath(start)) + path_abs = abspath(normpath(path)) + start_drive, start_rest = splitdrive(start_abs) + path_drive, path_rest = splitdrive(path_abs) + if normcase(start_drive) != normcase(path_drive): + error = "path is on mount '{0}', start on mount '{1}'".format( + path_drive, start_drive) + raise ValueError(error) + + start_list = [x for x in start_rest.split(sep) if x] + path_list = [x for x in path_rest.split(sep) if x] + # Work out how much of the filepath is shared by start and path. + i = 0 + for e1, e2 in zip(start_list, path_list): + if normcase(e1) != normcase(e2): + break + i += 1 + + if isinstance(path, bytes): + pardir = b'..' + else: + pardir = '..' + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return _get_dot(path) + return join(*rel_list) + + +# determine if two files are in fact the same file +try: + # GetFinalPathNameByHandle is available starting with Windows 6.0. + # Windows XP and non-Windows OS'es will mock _getfinalpathname. + if sys.getwindowsversion()[:2] >= (6, 0): + from nt import _getfinalpathname + else: + raise ImportError +except (AttributeError, ImportError): + # On Windows XP and earlier, two files are the same if their absolute + # pathnames are the same. + # Non-Windows operating systems fake this method with an XP + # approximation. + def _getfinalpathname(f): + return normcase(abspath(f)) + + +try: + # The genericpath.isdir implementation uses os.stat and checks the mode + # attribute to tell whether or not the path is a directory. + # This is overkill on Windows - just pass the path to GetFileAttributes + # and check the attribute from there. + from nt import _isdir as isdir +except ImportError: + # Use genericpath.isdir as imported above. + pass diff --git a/v1/flask/lib/python3.4/operator.py b/v1/flask/lib/python3.4/operator.py deleted file mode 120000 index 2de0a1c..0000000 --- a/v1/flask/lib/python3.4/operator.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/operator.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/operator.py b/v1/flask/lib/python3.4/operator.py new file mode 100644 index 0000000..b60349f --- /dev/null +++ b/v1/flask/lib/python3.4/operator.py @@ -0,0 +1,411 @@ +""" +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +""" + +__all__ = ['abs', 'add', 'and_', 'attrgetter', 'concat', 'contains', 'countOf', + 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', + 'iconcat', 'ifloordiv', 'ilshift', 'imod', 'imul', 'index', + 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', 'is_', + 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le', + 'length_hint', 'lshift', 'lt', 'methodcaller', 'mod', 'mul', 'ne', + 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', 'setitem', 'sub', + 'truediv', 'truth', 'xor'] + +from builtins import abs as _abs + + +# Comparison Operations *******************************************************# + +def lt(a, b): + "Same as a < b." + return a < b + +def le(a, b): + "Same as a <= b." + return a <= b + +def eq(a, b): + "Same as a == b." + return a == b + +def ne(a, b): + "Same as a != b." + return a != b + +def ge(a, b): + "Same as a >= b." + return a >= b + +def gt(a, b): + "Same as a > b." + return a > b + +# Logical Operations **********************************************************# + +def not_(a): + "Same as not a." + return not a + +def truth(a): + "Return True if a is true, False otherwise." + return True if a else False + +def is_(a, b): + "Same as a is b." + return a is b + +def is_not(a, b): + "Same as a is not b." + return a is not b + +# Mathematical/Bitwise Operations *********************************************# + +def abs(a): + "Same as abs(a)." + return _abs(a) + +def add(a, b): + "Same as a + b." + return a + b + +def and_(a, b): + "Same as a & b." + return a & b + +def floordiv(a, b): + "Same as a // b." + return a // b + +def index(a): + "Same as a.__index__()." + return a.__index__() + +def inv(a): + "Same as ~a." + return ~a +invert = inv + +def lshift(a, b): + "Same as a << b." + return a << b + +def mod(a, b): + "Same as a % b." + return a % b + +def mul(a, b): + "Same as a * b." + return a * b + +def neg(a): + "Same as -a." + return -a + +def or_(a, b): + "Same as a | b." + return a | b + +def pos(a): + "Same as +a." + return +a + +def pow(a, b): + "Same as a ** b." + return a ** b + +def rshift(a, b): + "Same as a >> b." + return a >> b + +def sub(a, b): + "Same as a - b." + return a - b + +def truediv(a, b): + "Same as a / b." + return a / b + +def xor(a, b): + "Same as a ^ b." + return a ^ b + +# Sequence Operations *********************************************************# + +def concat(a, b): + "Same as a + b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + return a + b + +def contains(a, b): + "Same as b in a (note reversed operands)." + return b in a + +def countOf(a, b): + "Return the number of times b occurs in a." + count = 0 + for i in a: + if i == b: + count += 1 + return count + +def delitem(a, b): + "Same as del a[b]." + del a[b] + +def getitem(a, b): + "Same as a[b]." + return a[b] + +def indexOf(a, b): + "Return the first index of b in a." + for i, j in enumerate(a): + if j == b: + return i + else: + raise ValueError('sequence.index(x): x not in sequence') + +def setitem(a, b, c): + "Same as a[b] = c." + a[b] = c + +def length_hint(obj, default=0): + """ + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + """ + if not isinstance(default, int): + msg = ("'%s' object cannot be interpreted as an integer" % + type(default).__name__) + raise TypeError(msg) + + try: + return len(obj) + except TypeError: + pass + + try: + hint = type(obj).__length_hint__ + except AttributeError: + return default + + try: + val = hint(obj) + except TypeError: + return default + if val is NotImplemented: + return default + if not isinstance(val, int): + msg = ('__length_hint__ must be integer, not %s' % + type(val).__name__) + raise TypeError(msg) + if val < 0: + msg = '__length_hint__() should return >= 0' + raise ValueError(msg) + return val + +# Generalized Lookup Objects **************************************************# + +class attrgetter: + """ + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + """ + def __init__(self, attr, *attrs): + if not attrs: + if not isinstance(attr, str): + raise TypeError('attribute name must be a string') + names = attr.split('.') + def func(obj): + for name in names: + obj = getattr(obj, name) + return obj + self._call = func + else: + getters = tuple(map(attrgetter, (attr,) + attrs)) + def func(obj): + return tuple(getter(obj) for getter in getters) + self._call = func + + def __call__(self, obj): + return self._call(obj) + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + def __init__(self, item, *items): + if not items: + def func(obj): + return obj[item] + self._call = func + else: + items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj): + return self._call(obj) + +class methodcaller: + """ + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + """ + + def __init__(*args, **kwargs): + if len(args) < 2: + msg = "methodcaller needs at least one argument, the method name" + raise TypeError(msg) + self = args[0] + self._name = args[1] + self._args = args[2:] + self._kwargs = kwargs + + def __call__(self, obj): + return getattr(obj, self._name)(*self._args, **self._kwargs) + +# In-place Operations *********************************************************# + +def iadd(a, b): + "Same as a += b." + a += b + return a + +def iand(a, b): + "Same as a &= b." + a &= b + return a + +def iconcat(a, b): + "Same as a += b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + a += b + return a + +def ifloordiv(a, b): + "Same as a //= b." + a //= b + return a + +def ilshift(a, b): + "Same as a <<= b." + a <<= b + return a + +def imod(a, b): + "Same as a %= b." + a %= b + return a + +def imul(a, b): + "Same as a *= b." + a *= b + return a + +def ior(a, b): + "Same as a |= b." + a |= b + return a + +def ipow(a, b): + "Same as a **= b." + a **=b + return a + +def irshift(a, b): + "Same as a >>= b." + a >>= b + return a + +def isub(a, b): + "Same as a -= b." + a -= b + return a + +def itruediv(a, b): + "Same as a /= b." + a /= b + return a + +def ixor(a, b): + "Same as a ^= b." + a ^= b + return a + + +try: + from _operator import * +except ImportError: + pass +else: + from _operator import __doc__ + +# All of these "__func__ = func" assignments have to happen after importing +# from _operator to make sure they're set to the right function +__lt__ = lt +__le__ = le +__eq__ = eq +__ne__ = ne +__ge__ = ge +__gt__ = gt +__not__ = not_ +__abs__ = abs +__add__ = add +__and__ = and_ +__floordiv__ = floordiv +__index__ = index +__inv__ = inv +__invert__ = invert +__lshift__ = lshift +__mod__ = mod +__mul__ = mul +__neg__ = neg +__or__ = or_ +__pos__ = pos +__pow__ = pow +__rshift__ = rshift +__sub__ = sub +__truediv__ = truediv +__xor__ = xor +__concat__ = concat +__contains__ = contains +__delitem__ = delitem +__getitem__ = getitem +__setitem__ = setitem +__iadd__ = iadd +__iand__ = iand +__iconcat__ = iconcat +__ifloordiv__ = ifloordiv +__ilshift__ = ilshift +__imod__ = imod +__imul__ = imul +__ior__ = ior +__ipow__ = ipow +__irshift__ = irshift +__isub__ = isub +__itruediv__ = itruediv +__ixor__ = ixor diff --git a/v1/flask/lib/python3.4/os.py b/v1/flask/lib/python3.4/os.py deleted file mode 120000 index ebf5df2..0000000 --- a/v1/flask/lib/python3.4/os.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/os.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/os.py b/v1/flask/lib/python3.4/os.py new file mode 100644 index 0000000..27b241a --- /dev/null +++ b/v1/flask/lib/python3.4/os.py @@ -0,0 +1,982 @@ +r"""OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix, nt or ce, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix', 'nt' or 'ce'. + - os.curdir is a string representing the current directory ('.' or ':') + - os.pardir is a string representing the parent directory ('..' or '::') + - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +""" + +#' + +import sys, errno +import stat as st + +_names = sys.builtin_module_names + +# Note: more names are added to __all__ later. +__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", + "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", + "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", + "popen", "extsep"] + +def _exists(name): + return name in globals() + +def _get_exports_list(module): + try: + return list(module.__all__) + except AttributeError: + return [n for n in dir(module) if n[0] != '_'] + +# Any new dependencies of the os module and/or changes in path separator +# requires updating importlib as well. +if 'posix' in _names: + name = 'posix' + linesep = '\n' + from posix import * + try: + from posix import _exit + __all__.append('_exit') + except ImportError: + pass + import posixpath as path + + try: + from posix import _have_functions + except ImportError: + pass + +elif 'nt' in _names: + name = 'nt' + linesep = '\r\n' + from nt import * + try: + from nt import _exit + __all__.append('_exit') + except ImportError: + pass + import ntpath as path + + import nt + __all__.extend(_get_exports_list(nt)) + del nt + + try: + from nt import _have_functions + except ImportError: + pass + +elif 'ce' in _names: + name = 'ce' + linesep = '\r\n' + from ce import * + try: + from ce import _exit + __all__.append('_exit') + except ImportError: + pass + # We can use the standard Windows path. + import ntpath as path + + import ce + __all__.extend(_get_exports_list(ce)) + del ce + + try: + from ce import _have_functions + except ImportError: + pass + +else: + raise ImportError('no os specific module found') + +sys.modules['os.path'] = path +from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, + devnull) + +del _names + + +if _exists("_have_functions"): + _globals = globals() + def _add(str, fn): + if (fn in _globals) and (str in _have_functions): + _set.add(_globals[fn]) + + _set = set() + _add("HAVE_FACCESSAT", "access") + _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_FUTIMESAT", "utime") + _add("HAVE_LINKAT", "link") + _add("HAVE_MKDIRAT", "mkdir") + _add("HAVE_MKFIFOAT", "mkfifo") + _add("HAVE_MKNODAT", "mknod") + _add("HAVE_OPENAT", "open") + _add("HAVE_READLINKAT", "readlink") + _add("HAVE_RENAMEAT", "rename") + _add("HAVE_SYMLINKAT", "symlink") + _add("HAVE_UNLINKAT", "unlink") + _add("HAVE_UNLINKAT", "rmdir") + _add("HAVE_UTIMENSAT", "utime") + supports_dir_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + supports_effective_ids = _set + + _set = set() + _add("HAVE_FCHDIR", "chdir") + _add("HAVE_FCHMOD", "chmod") + _add("HAVE_FCHOWN", "chown") + _add("HAVE_FDOPENDIR", "listdir") + _add("HAVE_FEXECVE", "execve") + _set.add(stat) # fstat always works + _add("HAVE_FTRUNCATE", "truncate") + _add("HAVE_FUTIMENS", "utime") + _add("HAVE_FUTIMES", "utime") + _add("HAVE_FPATHCONF", "pathconf") + if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 + _add("HAVE_FSTATVFS", "statvfs") + supports_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + # Some platforms don't support lchmod(). Often the function exists + # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. + # (No, I don't know why that's a good design.) ./configure will detect + # this and reject it--so HAVE_LCHMOD still won't be defined on such + # platforms. This is Very Helpful. + # + # However, sometimes platforms without a working lchmod() *do* have + # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, + # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes + # it behave like lchmod(). So in theory it would be a suitable + # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s + # flag doesn't work *either*. Sadly ./configure isn't sophisticated + # enough to detect this condition--it only determines whether or not + # fchmodat() minimally works. + # + # Therefore we simply ignore fchmodat() when deciding whether or not + # os.chmod supports follow_symlinks. Just checking lchmod() is + # sufficient. After all--if you have a working fchmodat(), your + # lchmod() almost certainly works too. + # + # _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LCHFLAGS", "chflags") + _add("HAVE_LCHMOD", "chmod") + if _exists("lchown"): # mac os x10.3 + _add("HAVE_LCHOWN", "chown") + _add("HAVE_LINKAT", "link") + _add("HAVE_LUTIMES", "utime") + _add("HAVE_LSTAT", "stat") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_UTIMENSAT", "utime") + _add("MS_WINDOWS", "stat") + supports_follow_symlinks = _set + + del _set + del _have_functions + del _globals + del _add + + +# Python uses fixed values for the SEEK_ constants; they are mapped +# to native constants if necessary in posixmodule.c +# Other possible SEEK values are directly imported from posixmodule.c +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Super directory utilities. +# (Inspired by Eric Raymond; the doc strings are mostly his) + +def makedirs(name, mode=0o777, exist_ok=False): + """makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + """ + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + if head and tail and not path.exists(head): + try: + makedirs(head, mode, exist_ok) + except FileExistsError: + # Defeats race condition when another thread created the path + pass + cdir = curdir + if isinstance(tail, bytes): + cdir = bytes(curdir, 'ASCII') + if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists + return + try: + mkdir(name, mode) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not path.isdir(name): + raise + +def removedirs(name): + """removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + """ + rmdir(name) + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + while head and tail: + try: + rmdir(head) + except OSError: + break + head, tail = path.split(head) + +def renames(old, new): + """renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + """ + head, tail = path.split(new) + if head and tail and not path.exists(head): + makedirs(head) + rename(old, new) + head, tail = path.split(old) + if head and tail: + try: + removedirs(head) + except OSError: + pass + +__all__.extend(["makedirs", "removedirs", "renames"]) + +def walk(top, topdown=True, onerror=None, followlinks=False): + """Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false is ineffective, since the directories in dirnames have + already been generated by the time dirnames itself is generated. No matter + the value of topdown, the list of subdirectories is retrieved before the + tuples for the directory and its subdirectories are generated. + + By default errors from the os.listdir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print(root, "consumes", end="") + print(sum([getsize(join(root, name)) for name in files]), end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + """ + + islink, join, isdir = path.islink, path.join, path.isdir + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. os.walk + # always suppressed the exception then, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. That logic is copied here. + try: + # Note that listdir is global in this module due + # to earlier import-*. + names = listdir(top) + except OSError as err: + if onerror is not None: + onerror(err) + return + + dirs, nondirs = [], [] + for name in names: + if isdir(join(top, name)): + dirs.append(name) + else: + nondirs.append(name) + + if topdown: + yield top, dirs, nondirs + for name in dirs: + new_path = join(top, name) + if followlinks or not islink(new_path): + yield from walk(new_path, topdown, onerror, followlinks) + if not topdown: + yield top, dirs, nondirs + +__all__.append("walk") + +if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd: + + def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): + """Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): + print(root, "consumes", end="") + print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]), + end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + """ + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) + topfd = open(top, O_RDONLY, dir_fd=dir_fd) + try: + if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and + path.samestat(orig_st, stat(topfd)))): + yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks) + finally: + close(topfd) + + def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks): + # Note: This uses O(depth of the directory tree) file descriptors: if + # necessary, it can be adapted to only require O(1) FDs, see issue + # #13734. + + names = listdir(topfd) + dirs, nondirs = [], [] + for name in names: + try: + # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with + # walk() which reports symlinks to directories as directories. + # We do however check for symlinks before recursing into + # a subdirectory. + if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode): + dirs.append(name) + else: + nondirs.append(name) + except FileNotFoundError: + try: + # Add dangling symlinks, ignore disappeared files + if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False) + .st_mode): + nondirs.append(name) + except FileNotFoundError: + continue + + if topdown: + yield toppath, dirs, nondirs, topfd + + for name in dirs: + try: + orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks) + dirfd = open(name, O_RDONLY, dir_fd=topfd) + except OSError as err: + if onerror is not None: + onerror(err) + return + try: + if follow_symlinks or path.samestat(orig_st, stat(dirfd)): + dirpath = path.join(toppath, name) + yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks) + finally: + close(dirfd) + + if not topdown: + yield toppath, dirs, nondirs, topfd + + __all__.append("fwalk") + +# Make sure os.environ exists, at least +try: + environ +except NameError: + environ = {} + +def execl(file, *args): + """execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. """ + execv(file, args) + +def execle(file, *args): + """execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. """ + env = args[-1] + execve(file, args[:-1], env) + +def execlp(file, *args): + """execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. """ + execvp(file, args) + +def execlpe(file, *args): + """execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. """ + env = args[-1] + execvpe(file, args[:-1], env) + +def execvp(file, args): + """execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. """ + _execvpe(file, args) + +def execvpe(file, args, env): + """execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env , replacing the + current process. + args may be a list or tuple of strings. """ + _execvpe(file, args, env) + +__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) + +def _execvpe(file, args, env=None): + if env is not None: + exec_func = execve + argrest = (args, env) + else: + exec_func = execv + argrest = (args,) + env = environ + + head, tail = path.split(file) + if head: + exec_func(file, *argrest) + return + last_exc = saved_exc = None + saved_tb = None + path_list = get_exec_path(env) + if name != 'nt': + file = fsencode(file) + path_list = map(fsencode, path_list) + for dir in path_list: + fullname = path.join(dir, file) + try: + exec_func(fullname, *argrest) + except OSError as e: + last_exc = e + tb = sys.exc_info()[2] + if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR + and saved_exc is None): + saved_exc = e + saved_tb = tb + if saved_exc: + raise saved_exc.with_traceback(saved_tb) + raise last_exc.with_traceback(tb) + + +def get_exec_path(env=None): + """Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + """ + # Use a local import instead of a global import to limit the number of + # modules loaded at startup: the os module is always loaded at startup by + # Python. It may also avoid a bootstrap issue. + import warnings + + if env is None: + env = environ + + # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a + # BytesWarning when using python -b or python -bb: ignore the warning + with warnings.catch_warnings(): + warnings.simplefilter("ignore", BytesWarning) + + try: + path_list = env.get('PATH') + except TypeError: + path_list = None + + if supports_bytes_environ: + try: + path_listb = env[b'PATH'] + except (KeyError, TypeError): + pass + else: + if path_list is not None: + raise ValueError( + "env cannot contain 'PATH' and b'PATH' keys") + path_list = path_listb + + if path_list is not None and isinstance(path_list, bytes): + path_list = fsdecode(path_list) + + if path_list is None: + path_list = defpath + return path_list.split(pathsep) + + +# Change environ to automatically call putenv(), unsetenv if they exist. +from _collections_abc import MutableMapping + +class _Environ(MutableMapping): + def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv): + self.encodekey = encodekey + self.decodekey = decodekey + self.encodevalue = encodevalue + self.decodevalue = decodevalue + self.putenv = putenv + self.unsetenv = unsetenv + self._data = data + + def __getitem__(self, key): + try: + value = self._data[self.encodekey(key)] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + return self.decodevalue(value) + + def __setitem__(self, key, value): + key = self.encodekey(key) + value = self.encodevalue(value) + self.putenv(key, value) + self._data[key] = value + + def __delitem__(self, key): + encodedkey = self.encodekey(key) + self.unsetenv(encodedkey) + try: + del self._data[encodedkey] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + + def __iter__(self): + for key in self._data: + yield self.decodekey(key) + + def __len__(self): + return len(self._data) + + def __repr__(self): + return 'environ({{{}}})'.format(', '.join( + ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) + for key, value in self._data.items()))) + + def copy(self): + return dict(self) + + def setdefault(self, key, value): + if key not in self: + self[key] = value + return self[key] + +try: + _putenv = putenv +except NameError: + _putenv = lambda key, value: None +else: + if "putenv" not in __all__: + __all__.append("putenv") + +try: + _unsetenv = unsetenv +except NameError: + _unsetenv = lambda key: _putenv(key, "") +else: + if "unsetenv" not in __all__: + __all__.append("unsetenv") + +def _createenviron(): + if name == 'nt': + # Where Env Var Names Must Be UPPERCASE + def check_str(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value + encode = check_str + decode = str + def encodekey(key): + return encode(key).upper() + data = {} + for key, value in environ.items(): + data[encodekey(key)] = value + else: + # Where Env Var Names Can Be Mixed Case + encoding = sys.getfilesystemencoding() + def encode(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value.encode(encoding, 'surrogateescape') + def decode(value): + return value.decode(encoding, 'surrogateescape') + encodekey = encode + data = environ + return _Environ(data, + encodekey, decode, + encode, decode, + _putenv, _unsetenv) + +# unicode environ +environ = _createenviron() +del _createenviron + + +def getenv(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.""" + return environ.get(key, default) + +supports_bytes_environ = (name != 'nt') +__all__.extend(("getenv", "supports_bytes_environ")) + +if supports_bytes_environ: + def _check_bytes(value): + if not isinstance(value, bytes): + raise TypeError("bytes expected, not %s" % type(value).__name__) + return value + + # bytes environ + environb = _Environ(environ._data, + _check_bytes, bytes, + _check_bytes, bytes, + _putenv, _unsetenv) + del _check_bytes + + def getenvb(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.""" + return environb.get(key, default) + + __all__.extend(("environb", "getenvb")) + +def _fscodec(): + encoding = sys.getfilesystemencoding() + if encoding == 'mbcs': + errors = 'strict' + else: + errors = 'surrogateescape' + + def fsencode(filename): + """ + Encode filename to the filesystem encoding with 'surrogateescape' error + handler, return bytes unchanged. On Windows, use 'strict' error handler if + the file system encoding is 'mbcs' (which is the default encoding). + """ + if isinstance(filename, bytes): + return filename + elif isinstance(filename, str): + return filename.encode(encoding, errors) + else: + raise TypeError("expect bytes or str, not %s" % type(filename).__name__) + + def fsdecode(filename): + """ + Decode filename from the filesystem encoding with 'surrogateescape' error + handler, return str unchanged. On Windows, use 'strict' error handler if + the file system encoding is 'mbcs' (which is the default encoding). + """ + if isinstance(filename, str): + return filename + elif isinstance(filename, bytes): + return filename.decode(encoding, errors) + else: + raise TypeError("expect bytes or str, not %s" % type(filename).__name__) + + return fsencode, fsdecode + +fsencode, fsdecode = _fscodec() +del _fscodec + +# Supply spawn*() (probably only for Unix) +if _exists("fork") and not _exists("spawnv") and _exists("execv"): + + P_WAIT = 0 + P_NOWAIT = P_NOWAITO = 1 + + __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) + + # XXX Should we support P_DETACH? I suppose it could fork()**2 + # and close the std I/O streams. Also, P_OVERLAY is the same + # as execv*()? + + def _spawnvef(mode, file, args, env, func): + # Internal helper; func is the exec*() function to use + pid = fork() + if not pid: + # Child + try: + if env is None: + func(file, args) + else: + func(file, args, env) + except: + _exit(127) + else: + # Parent + if mode == P_NOWAIT: + return pid # Caller is responsible for waiting! + while 1: + wpid, sts = waitpid(pid, 0) + if WIFSTOPPED(sts): + continue + elif WIFSIGNALED(sts): + return -WTERMSIG(sts) + elif WIFEXITED(sts): + return WEXITSTATUS(sts) + else: + raise OSError("Not stopped, signaled or exited???") + + def spawnv(mode, file, args): + """spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execv) + + def spawnve(mode, file, args, env): + """spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execve) + + # Note: spawnvp[e] is't currently supported on Windows + + def spawnvp(mode, file, args): + """spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execvp) + + def spawnvpe(mode, file, args, env): + """spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execvpe) + + + __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) + + +if _exists("spawnv"): + # These aren't supplied by the basic Windows code + # but can be easily implemented in Python + + def spawnl(mode, file, *args): + """spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnv(mode, file, args) + + def spawnle(mode, file, *args): + """spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnve(mode, file, args[:-1], env) + + + __all__.extend(["spawnl", "spawnle"]) + + +if _exists("spawnvp"): + # At the moment, Windows doesn't implement spawnvp[e], + # so it won't have spawnlp[e] either. + def spawnlp(mode, file, *args): + """spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnvp(mode, file, args) + + def spawnlpe(mode, file, *args): + """spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnvpe(mode, file, args[:-1], env) + + + __all__.extend(["spawnlp", "spawnlpe"]) + + +# Supply os.popen() +def popen(cmd, mode="r", buffering=-1): + if not isinstance(cmd, str): + raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) + if mode not in ("r", "w"): + raise ValueError("invalid mode %r" % mode) + if buffering == 0 or buffering is None: + raise ValueError("popen() does not support unbuffered streams") + import subprocess, io + if mode == "r": + proc = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(io.TextIOWrapper(proc.stdout), proc) + else: + proc = subprocess.Popen(cmd, + shell=True, + stdin=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(io.TextIOWrapper(proc.stdin), proc) + +# Helper for popen() -- a proxy for a file whose close waits for the process +class _wrap_close: + def __init__(self, stream, proc): + self._stream = stream + self._proc = proc + def close(self): + self._stream.close() + returncode = self._proc.wait() + if returncode == 0: + return None + if name == 'nt': + return returncode + else: + return returncode << 8 # Shift left to match old behavior + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __getattr__(self, name): + return getattr(self._stream, name) + def __iter__(self): + return iter(self._stream) + +# Supply os.fdopen() +def fdopen(fd, *args, **kwargs): + if not isinstance(fd, int): + raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) + import io + return io.open(fd, *args, **kwargs) diff --git a/v1/flask/lib/python3.4/posixpath.py b/v1/flask/lib/python3.4/posixpath.py deleted file mode 120000 index 1af8268..0000000 --- a/v1/flask/lib/python3.4/posixpath.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/posixpath.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/posixpath.py b/v1/flask/lib/python3.4/posixpath.py new file mode 100644 index 0000000..0aa53fe --- /dev/null +++ b/v1/flask/lib/python3.4/posixpath.py @@ -0,0 +1,457 @@ +"""Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Mac, Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. macpath, ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +""" + +import os +import sys +import stat +import genericpath +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime","islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "samefile","sameopenfile","samestat", + "curdir","pardir","sep","pathsep","defpath","altsep","extsep", + "devnull","realpath","supports_unicode_filenames","relpath"] + +# Strings representing various path-related bits and pieces. +# These are primarily for export; internally, they are hardcoded. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '/' +pathsep = ':' +defpath = ':/bin:/usr/bin' +altsep = None +devnull = '/dev/null' + +def _get_sep(path): + if isinstance(path, bytes): + return b'/' + else: + return '/' + +# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. +# On MS-DOS this may also turn slashes into backslashes; however, other +# normalizations (such as optimizing '../' away) are not allowed +# (another function should be defined to do that). + +def normcase(s): + """Normalize case of pathname. Has no effect under Posix""" + if not isinstance(s, (bytes, str)): + raise TypeError("normcase() argument must be str or bytes, " + "not '{}'".format(s.__class__.__name__)) + return s + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. + +def isabs(s): + """Test whether a path is absolute""" + sep = _get_sep(s) + return s.startswith(sep) + + +# Join pathnames. +# Ignore the previous parts if a part is absolute. +# Insert a '/' unless the first part is empty or already ends in '/'. + +def join(a, *p): + """Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.""" + sep = _get_sep(a) + path = a + try: + for b in p: + if b.startswith(sep): + path = b + elif not path or path.endswith(sep): + path += b + else: + path += sep + b + except TypeError: + if all(isinstance(s, (str, bytes)) for s in (a,) + p): + # Must have a mixture of text and binary data + raise TypeError("Can't mix strings and bytes in path " + "components") from None + raise + return path + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). If the path ends in '/', tail will be empty. If there is no +# '/' in the path, head will be empty. +# Trailing '/'es are stripped from head unless it is the root. + +def split(p): + """Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.""" + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head, tail = p[:i], p[i:] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + if isinstance(p, bytes): + sep = b'/' + extsep = b'.' + else: + sep = '/' + extsep = '.' + return genericpath._splitext(p, sep, None, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + +# Split a pathname into a drive specification and the rest of the +# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. + +def splitdrive(p): + """Split a pathname into drive and path. On Posix, drive is always + empty.""" + return p[:0], p + + +# Return the tail (basename) part of a path, same as split(path)[1]. + +def basename(p): + """Returns the final component of a pathname""" + sep = _get_sep(p) + i = p.rfind(sep) + 1 + return p[i:] + + +# Return the head (dirname) part of a path, same as split(path)[0]. + +def dirname(p): + """Returns the directory component of a pathname""" + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head = p[:i] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head + + +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link""" + try: + st = os.lstat(path) + except (OSError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + +# Being true for dangling symbolic links is also useful. + +def lexists(path): + """Test whether a path exists. Returns True for broken symbolic links""" + try: + os.lstat(path) + except OSError: + return False + return True + + +# Is a path a mount point? +# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) + +def ismount(path): + """Test whether a path is a mount point""" + try: + s1 = os.lstat(path) + except OSError: + # It doesn't exist -- so not a mount point. :-) + return False + else: + # A symlink can never be a mount point + if stat.S_ISLNK(s1.st_mode): + return False + + if isinstance(path, bytes): + parent = join(path, b'..') + else: + parent = join(path, '..') + try: + s2 = os.lstat(parent) + except OSError: + return False + + dev1 = s1.st_dev + dev2 = s2.st_dev + if dev1 != dev2: + return True # path/.. on a different device as path + ino1 = s1.st_ino + ino2 = s2.st_ino + if ino1 == ino2: + return True # path/.. is the same i-node as path + return False + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.""" + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + sep = _get_sep(path) + i = path.find(sep, 1) + if i < 0: + i = len(path) + if i == 1: + if 'HOME' not in os.environ: + import pwd + userhome = pwd.getpwuid(os.getuid()).pw_dir + else: + userhome = os.environ['HOME'] + else: + import pwd + name = path[1:i] + if isinstance(name, bytes): + name = str(name, 'ASCII') + try: + pwent = pwd.getpwnam(name) + except KeyError: + return path + userhome = pwent.pw_dir + if isinstance(path, bytes): + userhome = os.fsencode(userhome) + root = b'/' + else: + root = '/' + userhome = userhome.rstrip(root) + return (userhome + path[i:]) or root + + +# Expand paths containing shell variable substitutions. +# This expands the forms $variable and ${variable} only. +# Non-existent variables are left unchanged. + +_varprog = None +_varprogb = None + +def expandvars(path): + """Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.""" + global _varprog, _varprogb + if isinstance(path, bytes): + if b'$' not in path: + return path + if not _varprogb: + import re + _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII) + search = _varprogb.search + start = b'{' + end = b'}' + environ = getattr(os, 'environb', None) + else: + if '$' not in path: + return path + if not _varprog: + import re + _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII) + search = _varprog.search + start = '{' + end = '}' + environ = os.environ + i = 0 + while True: + m = search(path, i) + if not m: + break + i, j = m.span(0) + name = m.group(1) + if name.startswith(start) and name.endswith(end): + name = name[1:-1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(name)]) + else: + value = environ[name] + except KeyError: + i = j + else: + tail = path[j:] + path = path[:i] + value + i = len(path) + path += tail + return path + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. +# It should be understood that this may change the meaning of the path +# if it contains symbolic links! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + if isinstance(path, bytes): + sep = b'/' + empty = b'' + dot = b'.' + dotdot = b'..' + else: + sep = '/' + empty = '' + dot = '.' + dotdot = '..' + if path == empty: + return dot + initial_slashes = path.startswith(sep) + # POSIX allows one or two initial slashes, but treats three or more + # as single slash. + if (initial_slashes and + path.startswith(sep*2) and not path.startswith(sep*3)): + initial_slashes = 2 + comps = path.split(sep) + new_comps = [] + for comp in comps: + if comp in (empty, dot): + continue + if (comp != dotdot or (not initial_slashes and not new_comps) or + (new_comps and new_comps[-1] == dotdot)): + new_comps.append(comp) + elif new_comps: + new_comps.pop() + comps = new_comps + path = sep.join(comps) + if initial_slashes: + path = sep*initial_slashes + path + return path or dot + + +def abspath(path): + """Return an absolute path.""" + if not isabs(path): + if isinstance(path, bytes): + cwd = os.getcwdb() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + + +# Return a canonical path (i.e. the absolute location of a file on the +# filesystem). + +def realpath(filename): + """Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.""" + path, ok = _joinrealpath(filename[:0], filename, {}) + return abspath(path) + +# Join two paths, normalizing ang eliminating any symbolic links +# encountered in the second path. +def _joinrealpath(path, rest, seen): + if isinstance(path, bytes): + sep = b'/' + curdir = b'.' + pardir = b'..' + else: + sep = '/' + curdir = '.' + pardir = '..' + + if isabs(rest): + rest = rest[1:] + path = sep + + while rest: + name, _, rest = rest.partition(sep) + if not name or name == curdir: + # current dir + continue + if name == pardir: + # parent dir + if path: + path, name = split(path) + if name == pardir: + path = join(path, pardir, pardir) + else: + path = pardir + continue + newpath = join(path, name) + if not islink(newpath): + path = newpath + continue + # Resolve the symbolic link + if newpath in seen: + # Already seen this path + path = seen[newpath] + if path is not None: + # use cached value + continue + # The symlink is not resolved, so we must have a symlink loop. + # Return already resolved part + rest of the path unchanged. + return join(newpath, rest), False + seen[newpath] = None # not resolved symlink + path, ok = _joinrealpath(path, os.readlink(newpath), seen) + if not ok: + return join(path, rest), False + seen[newpath] = path # resolved symlink + + return path, True + + +supports_unicode_filenames = (sys.platform == 'darwin') + +def relpath(path, start=None): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + + if isinstance(path, bytes): + curdir = b'.' + sep = b'/' + pardir = b'..' + else: + curdir = '.' + sep = '/' + pardir = '..' + + if start is None: + start = curdir + + start_list = [x for x in abspath(start).split(sep) if x] + path_list = [x for x in abspath(path).split(sep) if x] + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) diff --git a/v1/flask/lib/python3.4/random.py b/v1/flask/lib/python3.4/random.py deleted file mode 120000 index e86b7b2..0000000 --- a/v1/flask/lib/python3.4/random.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/random.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/random.py b/v1/flask/lib/python3.4/random.py new file mode 100644 index 0000000..4642928 --- /dev/null +++ b/v1/flask/lib/python3.4/random.py @@ -0,0 +1,742 @@ +"""Random variable generators. + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +""" + +from warnings import warn as _warn +from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType +from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil +from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin +from os import urandom as _urandom +from _collections_abc import Set as _Set, Sequence as _Sequence +from hashlib import sha512 as _sha512 + +__all__ = ["Random","seed","random","uniform","randint","choice","sample", + "randrange","shuffle","normalvariate","lognormvariate", + "expovariate","vonmisesvariate","gammavariate","triangular", + "gauss","betavariate","paretovariate","weibullvariate", + "getstate","setstate", "getrandbits", + "SystemRandom"] + +NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) +TWOPI = 2.0*_pi +LOG4 = _log(4.0) +SG_MAGICCONST = 1.0 + _log(4.5) +BPF = 53 # Number of bits in a float +RECIP_BPF = 2**-BPF + + +# Translated by Guido van Rossum from C source provided by +# Adrian Baddeley. Adapted by Raymond Hettinger for use with +# the Mersenne Twister and os.urandom() core generators. + +import _random + +class Random(_random.Random): + """Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + """ + + VERSION = 3 # used by getstate/setstate + + def __init__(self, x=None): + """Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + """ + + self.seed(x) + self.gauss_next = None + + def seed(self, a=None, version=2): + """Initialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1, the hash() of *a* is used instead. + + If *a* is an int, all bits are used. + + """ + + if a is None: + try: + # Seed with enough bytes to span the 19937 bit + # state space for the Mersenne Twister + a = int.from_bytes(_urandom(2500), 'big') + except NotImplementedError: + import time + a = int(time.time() * 256) # use fractional seconds + + if version == 2: + if isinstance(a, (str, bytes, bytearray)): + if isinstance(a, str): + a = a.encode() + a += _sha512(a).digest() + a = int.from_bytes(a, 'big') + + super().seed(a) + self.gauss_next = None + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, super().getstate(), self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 3: + version, internalstate, self.gauss_next = state + super().setstate(internalstate) + elif version == 2: + version, internalstate, self.gauss_next = state + # In version 2, the state was saved as signed ints, which causes + # inconsistencies between 32/64-bit systems. The state is + # really unsigned 32-bit ints, so we convert negative ints from + # version 2 to positive longs for version 3. + try: + internalstate = tuple(x % (2**32) for x in internalstate) + except ValueError as e: + raise TypeError from e + super().setstate(internalstate) + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + +## ---- Methods below this point do not need to be overridden when +## ---- subclassing for the purpose of using a different core generator. + +## -------------------- pickle support ------------------- + + # Issue 17489: Since __reduce__ was defined to fix #759889 this is no + # longer called; we leave it here because it has been here since random was + # rewritten back in 2001 and why risk breaking something. + def __getstate__(self): # for pickle + return self.getstate() + + def __setstate__(self, state): # for pickle + self.setstate(state) + + def __reduce__(self): + return self.__class__, (), self.getstate() + +## -------------------- integer methods ------------------- + + def randrange(self, start, stop=None, step=1, _int=int): + """Choose a random item from range(start, stop[, step]). + + This fixes the problem with randint() which includes the + endpoint; in Python this is usually not what you want. + + """ + + # This code is a bit messy to make it fast for the + # common case while still doing adequate error checking. + istart = _int(start) + if istart != start: + raise ValueError("non-integer arg 1 for randrange()") + if stop is None: + if istart > 0: + return self._randbelow(istart) + raise ValueError("empty range for randrange()") + + # stop argument supplied. + istop = _int(stop) + if istop != stop: + raise ValueError("non-integer stop for randrange()") + width = istop - istart + if step == 1 and width > 0: + return istart + self._randbelow(width) + if step == 1: + raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width)) + + # Non-unit step argument supplied. + istep = _int(step) + if istep != step: + raise ValueError("non-integer step for randrange()") + if istep > 0: + n = (width + istep - 1) // istep + elif istep < 0: + n = (width + istep + 1) // istep + else: + raise ValueError("zero step for randrange()") + + if n <= 0: + raise ValueError("empty range for randrange()") + + return istart + istep*self._randbelow(n) + + def randint(self, a, b): + """Return random integer in range [a, b], including both end points. + """ + + return self.randrange(a, b+1) + + def _randbelow(self, n, int=int, maxsize=1<= n: + r = getrandbits(k) + return r + # There's an overriden random() method but no new getrandbits() method, + # so we can only use random() from here. + if n >= maxsize: + _warn("Underlying random() generator does not supply \n" + "enough bits to choose from a population range this large.\n" + "To remove the range limitation, add a getrandbits() method.") + return int(random() * n) + rem = maxsize % n + limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 + r = random() + while r >= limit: + r = random() + return int(r*maxsize) % n + +## -------------------- sequence methods ------------------- + + def choice(self, seq): + """Choose a random element from a non-empty sequence.""" + try: + i = self._randbelow(len(seq)) + except ValueError: + raise IndexError('Cannot choose from an empty sequence') + return seq[i] + + def shuffle(self, x, random=None): + """Shuffle list x in place, and return None. + + Optional argument random is a 0-argument function returning a + random float in [0.0, 1.0); if it is the default None, the + standard random.random will be used. + + """ + + if random is None: + randbelow = self._randbelow + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = randbelow(i+1) + x[i], x[j] = x[j], x[i] + else: + _int = int + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = _int(random() * (i+1)) + x[i], x[j] = x[j], x[i] + + def sample(self, population, k): + """Chooses k unique random elements from a population sequence or set. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + To choose a sample in a range of integers, use range as an argument. + This is especially fast and space efficient for sampling from a + large population: sample(range(10000000), 60) + """ + + # Sampling without replacement entails tracking either potential + # selections (the pool) in a list or previous selections in a set. + + # When the number of selections is small compared to the + # population, then tracking selections is efficient, requiring + # only a small set and an occasional reselection. For + # a larger number of selections, the pool tracking method is + # preferred since the list takes less space than the + # set and it doesn't suffer from frequent reselections. + + if isinstance(population, _Set): + population = tuple(population) + if not isinstance(population, _Sequence): + raise TypeError("Population must be a sequence or set. For dicts, use list(d).") + randbelow = self._randbelow + n = len(population) + if not 0 <= k <= n: + raise ValueError("Sample larger than population") + result = [None] * k + setsize = 21 # size of a small set minus size of an empty list + if k > 5: + setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets + if n <= setsize: + # An n-length list is smaller than a k-length set + pool = list(population) + for i in range(k): # invariant: non-selected at [0,n-i) + j = randbelow(n-i) + result[i] = pool[j] + pool[j] = pool[n-i-1] # move non-selected item into vacancy + else: + selected = set() + selected_add = selected.add + for i in range(k): + j = randbelow(n) + while j in selected: + j = randbelow(n) + selected_add(j) + result[i] = population[j] + return result + +## -------------------- real-valued distributions ------------------- + +## -------------------- uniform distribution ------------------- + + def uniform(self, a, b): + "Get a random number in the range [a, b) or [a, b] depending on rounding." + return a + (b-a) * self.random() + +## -------------------- triangular -------------------- + + def triangular(self, low=0.0, high=1.0, mode=None): + """Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + """ + u = self.random() + try: + c = 0.5 if mode is None else (mode - low) / (high - low) + except ZeroDivisionError: + return low + if u > c: + u = 1.0 - u + c = 1.0 - c + low, high = high, low + return low + (high - low) * (u * c) ** 0.5 + +## -------------------- normal distribution -------------------- + + def normalvariate(self, mu, sigma): + """Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + """ + # mu = mean, sigma = standard deviation + + # Uses Kinderman and Monahan method. Reference: Kinderman, + # A.J. and Monahan, J.F., "Computer generation of random + # variables using the ratio of uniform deviates", ACM Trans + # Math Software, 3, (1977), pp257-260. + + random = self.random + while 1: + u1 = random() + u2 = 1.0 - random() + z = NV_MAGICCONST*(u1-0.5)/u2 + zz = z*z/4.0 + if zz <= -_log(u2): + break + return mu + z*sigma + +## -------------------- lognormal distribution -------------------- + + def lognormvariate(self, mu, sigma): + """Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + """ + return _exp(self.normalvariate(mu, sigma)) + +## -------------------- exponential distribution -------------------- + + def expovariate(self, lambd): + """Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + """ + # lambd: rate lambd = 1/mean + # ('lambda' is a Python reserved word) + + # we use 1-random() instead of random() to preclude the + # possibility of taking the log of zero. + return -_log(1.0 - self.random())/lambd + +## -------------------- von Mises distribution -------------------- + + def vonmisesvariate(self, mu, kappa): + """Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + """ + # mu: mean angle (in radians between 0 and 2*pi) + # kappa: concentration parameter kappa (>= 0) + # if kappa = 0 generate uniform random angle + + # Based upon an algorithm published in: Fisher, N.I., + # "Statistical Analysis of Circular Data", Cambridge + # University Press, 1993. + + # Thanks to Magnus Kessler for a correction to the + # implementation of step 4. + + random = self.random + if kappa <= 1e-6: + return TWOPI * random() + + s = 0.5 / kappa + r = s + _sqrt(1.0 + s * s) + + while 1: + u1 = random() + z = _cos(_pi * u1) + + d = z / (r + z) + u2 = random() + if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): + break + + q = 1.0 / r + f = (q + z) / (1.0 + q * z) + u3 = random() + if u3 > 0.5: + theta = (mu + _acos(f)) % TWOPI + else: + theta = (mu - _acos(f)) % TWOPI + + return theta + +## -------------------- gamma distribution -------------------- + + def gammavariate(self, alpha, beta): + """Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + """ + + # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 + + # Warning: a few older sources define the gamma distribution in terms + # of alpha > -1.0 + if alpha <= 0.0 or beta <= 0.0: + raise ValueError('gammavariate: alpha and beta must be > 0.0') + + random = self.random + if alpha > 1.0: + + # Uses R.C.H. Cheng, "The generation of Gamma + # variables with non-integral shape parameters", + # Applied Statistics, (1977), 26, No. 1, p71-74 + + ainv = _sqrt(2.0 * alpha - 1.0) + bbb = alpha - LOG4 + ccc = alpha + ainv + + while 1: + u1 = random() + if not 1e-7 < u1 < .9999999: + continue + u2 = 1.0 - random() + v = _log(u1/(1.0-u1))/ainv + x = alpha*_exp(v) + z = u1*u1*u2 + r = bbb+ccc*v-x + if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): + return x * beta + + elif alpha == 1.0: + # expovariate(1) + u = random() + while u <= 1e-7: + u = random() + return -_log(u) * beta + + else: # alpha is between 0 and 1 (exclusive) + + # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle + + while 1: + u = random() + b = (_e + alpha)/_e + p = b*u + if p <= 1.0: + x = p ** (1.0/alpha) + else: + x = -_log((b-p)/alpha) + u1 = random() + if p > 1.0: + if u1 <= x ** (alpha - 1.0): + break + elif u1 <= _exp(-x): + break + return x * beta + +## -------------------- Gauss (faster alternative) -------------------- + + def gauss(self, mu, sigma): + """Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + """ + + # When x and y are two variables from [0, 1), uniformly + # distributed, then + # + # cos(2*pi*x)*sqrt(-2*log(1-y)) + # sin(2*pi*x)*sqrt(-2*log(1-y)) + # + # are two *independent* variables with normal distribution + # (mu = 0, sigma = 1). + # (Lambert Meertens) + # (corrected version; bug discovered by Mike Miller, fixed by LM) + + # Multithreading note: When two threads call this function + # simultaneously, it is possible that they will receive the + # same return value. The window is very small though. To + # avoid this, you have to use a lock around all calls. (I + # didn't want to slow this down in the serial case by using a + # lock here.) + + random = self.random + z = self.gauss_next + self.gauss_next = None + if z is None: + x2pi = random() * TWOPI + g2rad = _sqrt(-2.0 * _log(1.0 - random())) + z = _cos(x2pi) * g2rad + self.gauss_next = _sin(x2pi) * g2rad + + return mu + z*sigma + +## -------------------- beta -------------------- +## See +## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html +## for Ivan Frohne's insightful analysis of why the original implementation: +## +## def betavariate(self, alpha, beta): +## # Discrete Event Simulation in C, pp 87-88. +## +## y = self.expovariate(alpha) +## z = self.expovariate(1.0/beta) +## return z/(y+z) +## +## was dead wrong, and how it probably got that way. + + def betavariate(self, alpha, beta): + """Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + """ + + # This version due to Janne Sinkkonen, and matches all the std + # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). + y = self.gammavariate(alpha, 1.) + if y == 0: + return 0.0 + else: + return y / (y + self.gammavariate(beta, 1.)) + +## -------------------- Pareto -------------------- + + def paretovariate(self, alpha): + """Pareto distribution. alpha is the shape parameter.""" + # Jain, pg. 495 + + u = 1.0 - self.random() + return 1.0 / u ** (1.0/alpha) + +## -------------------- Weibull -------------------- + + def weibullvariate(self, alpha, beta): + """Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + """ + # Jain, pg. 499; bug fix courtesy Bill Arms + + u = 1.0 - self.random() + return alpha * (-_log(u)) ** (1.0/beta) + +## --------------- Operating System Random Source ------------------ + +class SystemRandom(Random): + """Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + """ + + def random(self): + """Get the next random number in the range [0.0, 1.0).""" + return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates an int with k random bits.""" + if k <= 0: + raise ValueError('number of bits must be greater than zero') + if k != int(k): + raise TypeError('number of bits should be an integer') + numbytes = (k + 7) // 8 # bits / 8 and rounded up + x = int.from_bytes(_urandom(numbytes), 'big') + return x >> (numbytes * 8 - k) # trim excess bits + + def seed(self, *args, **kwds): + "Stub method. Not used for a system random number generator." + return None + + def _notimplemented(self, *args, **kwds): + "Method should not be called for a system random number generator." + raise NotImplementedError('System entropy source does not have state.') + getstate = setstate = _notimplemented + +## -------------------- test program -------------------- + +def _test_generator(n, func, args): + import time + print(n, 'times', func.__name__) + total = 0.0 + sqsum = 0.0 + smallest = 1e10 + largest = -1e10 + t0 = time.time() + for i in range(n): + x = func(*args) + total += x + sqsum = sqsum + x*x + smallest = min(x, smallest) + largest = max(x, largest) + t1 = time.time() + print(round(t1-t0, 3), 'sec,', end=' ') + avg = total/n + stddev = _sqrt(sqsum/n - avg*avg) + print('avg %g, stddev %g, min %g, max %g' % \ + (avg, stddev, smallest, largest)) + + +def _test(N=2000): + _test_generator(N, random, ()) + _test_generator(N, normalvariate, (0.0, 1.0)) + _test_generator(N, lognormvariate, (0.0, 1.0)) + _test_generator(N, vonmisesvariate, (0.0, 1.0)) + _test_generator(N, gammavariate, (0.01, 1.0)) + _test_generator(N, gammavariate, (0.1, 1.0)) + _test_generator(N, gammavariate, (0.1, 2.0)) + _test_generator(N, gammavariate, (0.5, 1.0)) + _test_generator(N, gammavariate, (0.9, 1.0)) + _test_generator(N, gammavariate, (1.0, 1.0)) + _test_generator(N, gammavariate, (2.0, 1.0)) + _test_generator(N, gammavariate, (20.0, 1.0)) + _test_generator(N, gammavariate, (200.0, 1.0)) + _test_generator(N, gauss, (0.0, 1.0)) + _test_generator(N, betavariate, (3.0, 3.0)) + _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) + +# Create one instance, seeded from current time, and export its methods +# as module-level functions. The functions share state across all uses +#(both in the user's code and in the Python libraries), but that's fine +# for most programs and is easier for the casual user than making them +# instantiate their own Random() instance. + +_inst = Random() +seed = _inst.seed +random = _inst.random +uniform = _inst.uniform +triangular = _inst.triangular +randint = _inst.randint +choice = _inst.choice +randrange = _inst.randrange +sample = _inst.sample +shuffle = _inst.shuffle +normalvariate = _inst.normalvariate +lognormvariate = _inst.lognormvariate +expovariate = _inst.expovariate +vonmisesvariate = _inst.vonmisesvariate +gammavariate = _inst.gammavariate +gauss = _inst.gauss +betavariate = _inst.betavariate +paretovariate = _inst.paretovariate +weibullvariate = _inst.weibullvariate +getstate = _inst.getstate +setstate = _inst.setstate +getrandbits = _inst.getrandbits + +if __name__ == '__main__': + _test() diff --git a/v1/flask/lib/python3.4/re.py b/v1/flask/lib/python3.4/re.py deleted file mode 120000 index e83df6c..0000000 --- a/v1/flask/lib/python3.4/re.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/re.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/re.py b/v1/flask/lib/python3.4/re.py new file mode 100644 index 0000000..199afee --- /dev/null +++ b/v1/flask/lib/python3.4/re.py @@ -0,0 +1,380 @@ +# +# Secret Labs' Regular Expression Engine +# +# re-compatible interface for the sre matching engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to +those found in Perl. It supports both 8-bit and Unicode strings; both +the pattern and the strings being processed can contain null bytes and +characters outside the US ASCII range. + +Regular expressions can contain both special and ordinary characters. +Most ordinary characters, like "A", "a", or "0", are the simplest +regular expressions; they simply match themselves. You can +concatenate ordinary characters, so last matches the string 'last'. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the newline at + the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding RE. + Greedy means that it will match as many repetitions as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + "\\" Either escapes special characters or signals a special sequence. + [] Indicates a set of characters. + A "^" as the first character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. + The contents can be retrieved or matched later in the string. + (?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below). + (?:...) Non-grouping version of regular parentheses. + (?P...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (?= 0x02020000: + __all__.append("finditer") + def finditer(pattern, string, flags=0): + """Return an iterator over all non-overlapping matches in the + string. For each match, the iterator returns a match object. + + Empty matches are included in the result.""" + return _compile(pattern, flags).finditer(string) + +def compile(pattern, flags=0): + "Compile a regular expression pattern, returning a pattern object." + return _compile(pattern, flags) + +def purge(): + "Clear the regular expression caches" + _cache.clear() + _cache_repl.clear() + +def template(pattern, flags=0): + "Compile a template pattern, returning a pattern object" + return _compile(pattern, flags|T) + +_alphanum_str = frozenset( + "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") +_alphanum_bytes = frozenset( + b"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") + +def escape(pattern): + """ + Escape all the characters in pattern except ASCII letters, numbers and '_'. + """ + if isinstance(pattern, str): + alphanum = _alphanum_str + s = list(pattern) + for i, c in enumerate(pattern): + if c not in alphanum: + if c == "\000": + s[i] = "\\000" + else: + s[i] = "\\" + c + return "".join(s) + else: + alphanum = _alphanum_bytes + s = [] + esc = ord(b"\\") + for c in pattern: + if c in alphanum: + s.append(c) + else: + if c == 0: + s.extend(b"\\000") + else: + s.append(esc) + s.append(c) + return bytes(s) + +# -------------------------------------------------------------------- +# internals + +_cache = {} +_cache_repl = {} + +_pattern_type = type(sre_compile.compile("", 0)) + +_MAXCACHE = 512 +def _compile(pattern, flags): + # internal: compile pattern + bypass_cache = flags & DEBUG + if not bypass_cache: + try: + p, loc = _cache[type(pattern), pattern, flags] + if loc is None or loc == _locale.setlocale(_locale.LC_CTYPE): + return p + except KeyError: + pass + if isinstance(pattern, _pattern_type): + if flags: + raise ValueError( + "Cannot process flags argument with a compiled pattern") + return pattern + if not sre_compile.isstring(pattern): + raise TypeError("first argument must be string or compiled pattern") + p = sre_compile.compile(pattern, flags) + if not bypass_cache: + if len(_cache) >= _MAXCACHE: + _cache.clear() + if p.flags & LOCALE: + if not _locale: + return p + loc = _locale.setlocale(_locale.LC_CTYPE) + else: + loc = None + _cache[type(pattern), pattern, flags] = p, loc + return p + +def _compile_repl(repl, pattern): + # internal: compile replacement pattern + try: + return _cache_repl[repl, pattern] + except KeyError: + pass + p = sre_parse.parse_template(repl, pattern) + if len(_cache_repl) >= _MAXCACHE: + _cache_repl.clear() + _cache_repl[repl, pattern] = p + return p + +def _expand(pattern, match, template): + # internal: match.expand implementation hook + template = sre_parse.parse_template(template, pattern) + return sre_parse.expand_template(template, match) + +def _subx(pattern, template): + # internal: pattern.sub/subn implementation helper + template = _compile_repl(template, pattern) + if not template[0] and len(template[1]) == 1: + # literal replacement + return template[1][0] + def filter(match, template=template): + return sre_parse.expand_template(template, match) + return filter + +# register myself for pickling + +import copyreg + +def _pickle(p): + return _compile, (p.pattern, p.flags) + +copyreg.pickle(_pattern_type, _pickle, _compile) + +# -------------------------------------------------------------------- +# experimental stuff (see python-dev discussions for details) + +class Scanner: + def __init__(self, lexicon, flags=0): + from sre_constants import BRANCH, SUBPATTERN + self.lexicon = lexicon + # combine phrases into a compound pattern + p = [] + s = sre_parse.Pattern() + s.flags = flags + for phrase, action in lexicon: + p.append(sre_parse.SubPattern(s, [ + (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), + ])) + s.groups = len(p)+1 + p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) + self.scanner = sre_compile.compile(p) + def scan(self, string): + result = [] + append = result.append + match = self.scanner.scanner(string).match + i = 0 + while 1: + m = match() + if not m: + break + j = m.end() + if i == j: + break + action = self.lexicon[m.lastindex-1][1] + if callable(action): + self.match = m + action = action(self, m.group()) + if action is not None: + append(action) + i = j + return result, string[i:] diff --git a/v1/flask/lib/python3.4/reprlib.py b/v1/flask/lib/python3.4/reprlib.py deleted file mode 120000 index 065d2a2..0000000 --- a/v1/flask/lib/python3.4/reprlib.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/reprlib.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/reprlib.py b/v1/flask/lib/python3.4/reprlib.py new file mode 100644 index 0000000..f803360 --- /dev/null +++ b/v1/flask/lib/python3.4/reprlib.py @@ -0,0 +1,157 @@ +"""Redo the builtin repr() (representation) but with limits on most sizes.""" + +__all__ = ["Repr", "repr", "recursive_repr"] + +import builtins +from itertools import islice +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + +class Repr: + + def __init__(self): + self.maxlevel = 6 + self.maxtuple = 6 + self.maxlist = 6 + self.maxarray = 5 + self.maxdict = 4 + self.maxset = 6 + self.maxfrozenset = 6 + self.maxdeque = 6 + self.maxstring = 30 + self.maxlong = 40 + self.maxother = 30 + + def repr(self, x): + return self.repr1(x, self.maxlevel) + + def repr1(self, x, level): + typename = type(x).__name__ + if ' ' in typename: + parts = typename.split() + typename = '_'.join(parts) + if hasattr(self, 'repr_' + typename): + return getattr(self, 'repr_' + typename)(x, level) + else: + return self.repr_instance(x, level) + + def _repr_iterable(self, x, level, left, right, maxiter, trail=''): + n = len(x) + if level <= 0 and n: + s = '...' + else: + newlevel = level - 1 + repr1 = self.repr1 + pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] + if n > maxiter: pieces.append('...') + s = ', '.join(pieces) + if n == 1 and trail: right = trail + right + return '%s%s%s' % (left, s, right) + + def repr_tuple(self, x, level): + return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') + + def repr_list(self, x, level): + return self._repr_iterable(x, level, '[', ']', self.maxlist) + + def repr_array(self, x, level): + header = "array('%s', [" % x.typecode + return self._repr_iterable(x, level, header, '])', self.maxarray) + + def repr_set(self, x, level): + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'set([', '])', self.maxset) + + def repr_frozenset(self, x, level): + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'frozenset([', '])', + self.maxfrozenset) + + def repr_deque(self, x, level): + return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) + + def repr_dict(self, x, level): + n = len(x) + if n == 0: return '{}' + if level <= 0: return '{...}' + newlevel = level - 1 + repr1 = self.repr1 + pieces = [] + for key in islice(_possibly_sorted(x), self.maxdict): + keyrepr = repr1(key, newlevel) + valrepr = repr1(x[key], newlevel) + pieces.append('%s: %s' % (keyrepr, valrepr)) + if n > self.maxdict: pieces.append('...') + s = ', '.join(pieces) + return '{%s}' % (s,) + + def repr_str(self, x, level): + s = builtins.repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = builtins.repr(x[:i] + x[len(x)-j:]) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_int(self, x, level): + s = builtins.repr(x) # XXX Hope this isn't too slow... + if len(s) > self.maxlong: + i = max(0, (self.maxlong-3)//2) + j = max(0, self.maxlong-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + try: + s = builtins.repr(x) + # Bugs in x.__repr__() can cause arbitrary + # exceptions -- then make up something + except Exception: + return '<%s instance at %x>' % (x.__class__.__name__, id(x)) + if len(s) > self.maxother: + i = max(0, (self.maxother-3)//2) + j = max(0, self.maxother-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + + +def _possibly_sorted(x): + # Since not all sequences of items can be sorted and comparison + # functions may raise arbitrary exceptions, return an unsorted + # sequence in that case. + try: + return sorted(x) + except Exception: + return list(x) + +aRepr = Repr() +repr = aRepr.repr diff --git a/v1/flask/lib/python3.4/rlcompleter.py b/v1/flask/lib/python3.4/rlcompleter.py deleted file mode 120000 index 96903b0..0000000 --- a/v1/flask/lib/python3.4/rlcompleter.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/rlcompleter.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/rlcompleter.py b/v1/flask/lib/python3.4/rlcompleter.py new file mode 100644 index 0000000..378f5aa --- /dev/null +++ b/v1/flask/lib/python3.4/rlcompleter.py @@ -0,0 +1,177 @@ +"""Word completion for GNU readline. + +The completer completes keywords, built-ins and globals in a selectable +namespace (which defaults to __main__); when completing NAME.NAME..., it +evaluates (!) the expression up to the last dot and completes its attributes. + +It's very cool to do "import sys" type "sys.", hit the completion key (twice), +and see the list of names defined by the sys module! + +Tip: to use the tab key as the completion key, call + + readline.parse_and_bind("tab: complete") + +Notes: + +- Exceptions raised by the completer function are *ignored* (and generally cause + the completion to fail). This is a feature -- since readline sets the tty + device in raw (or cbreak) mode, printing a traceback wouldn't work well + without some complicated hoopla to save, reset and restore the tty state. + +- The evaluation of the NAME.NAME... form may cause arbitrary application + defined code to be executed if an object with a __getattr__ hook is found. + Since it is the responsibility of the application (or the user) to enable this + feature, I consider this an acceptable risk. More complicated expressions + (e.g. function calls or indexing operations) are *not* evaluated. + +- When the original stdin is not a tty device, GNU readline is never + used, and this module (and the readline module) are silently inactive. + +""" + +import atexit +import builtins +import __main__ + +__all__ = ["Completer"] + +class Completer: + def __init__(self, namespace = None): + """Create a new completer for the command line. + + Completer([namespace]) -> completer instance. + + If unspecified, the default namespace where completions are performed + is __main__ (technically, __main__.__dict__). Namespaces should be + given as dictionaries. + + Completer instances should be used as the completion mechanism of + readline via the set_completer() call: + + readline.set_completer(Completer(my_namespace).complete) + """ + + if namespace and not isinstance(namespace, dict): + raise TypeError('namespace must be a dictionary') + + # Don't bind to namespace quite yet, but flag whether the user wants a + # specific namespace or to use __main__.__dict__. This will allow us + # to bind to __main__.__dict__ at completion time, not now. + if namespace is None: + self.use_main_ns = 1 + else: + self.use_main_ns = 0 + self.namespace = namespace + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + This is called successively with state == 0, 1, 2, ... until it + returns None. The completion should begin with 'text'. + + """ + if self.use_main_ns: + self.namespace = __main__.__dict__ + + if not text.strip(): + if state == 0: + return '\t' + else: + return None + + if state == 0: + if "." in text: + self.matches = self.attr_matches(text) + else: + self.matches = self.global_matches(text) + try: + return self.matches[state] + except IndexError: + return None + + def _callable_postfix(self, val, word): + if callable(val): + word = word + "(" + return word + + def global_matches(self, text): + """Compute matches when text is a simple name. + + Return a list of all keywords, built-in functions and names currently + defined in self.namespace that match. + + """ + import keyword + matches = [] + seen = {"__builtins__"} + n = len(text) + for word in keyword.kwlist: + if word[:n] == text: + seen.add(word) + matches.append(word) + for nspace in [self.namespace, builtins.__dict__]: + for word, val in nspace.items(): + if word[:n] == text and word not in seen: + seen.add(word) + matches.append(self._callable_postfix(val, word)) + return matches + + def attr_matches(self, text): + """Compute matches when text contains a dot. + + Assuming the text is of the form NAME.NAME....[NAME], and is + evaluable in self.namespace, it will be evaluated and its attributes + (as revealed by dir()) are used as possible completions. (For class + instances, class members are also considered.) + + WARNING: this can still invoke arbitrary C code, if an object + with a __getattr__ hook is evaluated. + + """ + import re + m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) + if not m: + return [] + expr, attr = m.group(1, 3) + try: + thisobject = eval(expr, self.namespace) + except Exception: + return [] + + # get the content of the object, except __builtins__ + words = set(dir(thisobject)) + words.discard("__builtins__") + + if hasattr(thisobject, '__class__'): + words.add('__class__') + words.update(get_class_members(thisobject.__class__)) + matches = [] + n = len(attr) + for word in words: + if word[:n] == attr: + try: + val = getattr(thisobject, word) + except Exception: + continue # Exclude properties that are not set + word = self._callable_postfix(val, "%s.%s" % (expr, word)) + matches.append(word) + matches.sort() + return matches + +def get_class_members(klass): + ret = dir(klass) + if hasattr(klass,'__bases__'): + for base in klass.__bases__: + ret = ret + get_class_members(base) + return ret + +try: + import readline +except ImportError: + pass +else: + readline.set_completer(Completer().complete) + # Release references early at shutdown (the readline module's + # contents are quasi-immortal, and the completer function holds a + # reference to globals). + atexit.register(lambda: readline.set_completer(None)) diff --git a/v1/flask/lib/python3.4/shutil.py b/v1/flask/lib/python3.4/shutil.py deleted file mode 120000 index 38bc301..0000000 --- a/v1/flask/lib/python3.4/shutil.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/shutil.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/shutil.py b/v1/flask/lib/python3.4/shutil.py new file mode 100644 index 0000000..d767a0c --- /dev/null +++ b/v1/flask/lib/python3.4/shutil.py @@ -0,0 +1,1141 @@ +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +from os.path import abspath +import fnmatch +import collections +import errno +import tarfile + +try: + import bz2 + del bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", + "ignore_patterns", "chown", "which", "get_terminal_size", + "SameFileError"] + # disk_usage is added later, if available on the platform + +class Error(OSError): + pass + +class SameFileError(Error): + """Raised when source and destination are the same file.""" + +class SpecialFileError(OSError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(OSError): + """Raised when a command could not be executed""" + +class ReadError(OSError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registry operation with the archiving + and unpacking registeries fails""" + + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst, *, follow_symlinks=True): + """Copy data from src to dst. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + """ + if _samefile(src, dst): + raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + if not follow_symlinks and os.path.islink(src): + os.symlink(os.readlink(src), dst) + else: + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + return dst + +def copymode(src, dst, *, follow_symlinks=True): + """Copy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + """ + if not follow_symlinks and os.path.islink(src) and os.path.islink(dst): + if hasattr(os, 'lchmod'): + stat_func, chmod_func = os.lstat, os.lchmod + else: + return + elif hasattr(os, 'chmod'): + stat_func, chmod_func = os.stat, os.chmod + else: + return + + st = stat_func(src) + chmod_func(dst, stat.S_IMODE(st.st_mode)) + +if hasattr(os, 'listxattr'): + def _copyxattr(src, dst, *, follow_symlinks=True): + """Copy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + """ + + try: + names = os.listxattr(src, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.ENOTSUP, errno.ENODATA): + raise + return + for name in names: + try: + value = os.getxattr(src, name, follow_symlinks=follow_symlinks) + os.setxattr(dst, name, value, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA): + raise +else: + def _copyxattr(*args, **kwargs): + pass + +def copystat(src, dst, *, follow_symlinks=True): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst. + + If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and + only if both `src` and `dst` are symlinks. + + """ + def _nop(*args, ns=None, follow_symlinks=None): + pass + + # follow symlinks (aka don't not follow symlinks) + follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst)) + if follow: + # use the real function if it exists + def lookup(name): + return getattr(os, name, _nop) + else: + # use the real function only if it exists + # *and* it supports follow_symlinks + def lookup(name): + fn = getattr(os, name, _nop) + if fn in os.supports_follow_symlinks: + return fn + return _nop + + st = lookup("stat")(src, follow_symlinks=follow) + mode = stat.S_IMODE(st.st_mode) + lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), + follow_symlinks=follow) + try: + lookup("chmod")(dst, mode, follow_symlinks=follow) + except NotImplementedError: + # if we got a NotImplementedError, it's because + # * follow_symlinks=False, + # * lchown() is unavailable, and + # * either + # * fchownat() is unavailable or + # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. + # (it returned ENOSUP.) + # therefore we're out of options--we simply cannot chown the + # symlink. give up, suppress the error. + # (which is what shutil always did in this circumstance.) + pass + if hasattr(st, 'st_flags'): + try: + lookup("chflags")(dst, st.st_flags, follow_symlinks=follow) + except OSError as why: + for err in 'EOPNOTSUPP', 'ENOTSUP': + if hasattr(errno, err) and why.errno == getattr(errno, err): + break + else: + raise + _copyxattr(src, dst, follow_symlinks=follow) + +def copy(src, dst, *, follow_symlinks=True): + """Copy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst, follow_symlinks=follow_symlinks) + copymode(src, dst, follow_symlinks=follow_symlinks) + return dst + +def copy2(src, dst, *, follow_symlinks=True): + """Copy data and all stat info ("cp -p src dst"). Return the file's + destination." + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst, follow_symlinks=follow_symlinks) + copystat(src, dst, follow_symlinks=follow_symlinks) + return dst + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + # We can't just leave it to `copy_function` because legacy + # code with a custom `copy_function` may rely on copytree + # doing the right thing. + os.symlink(linkto, dstname) + copystat(srcname, dstname, follow_symlinks=not symlinks) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + if os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, + copy_function) + else: + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except OSError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + # Copying file access times may fail on Windows + if getattr(why, 'winerror', None) is None: + errors.append((src, dst, str(why))) + if errors: + raise Error(errors) + return dst + +# version vulnerable to race conditions +def _rmtree_unsafe(path, onerror): + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + names = [] + try: + names = os.listdir(path) + except OSError: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except OSError: + mode = 0 + if stat.S_ISDIR(mode): + _rmtree_unsafe(fullname, onerror) + else: + try: + os.unlink(fullname) + except OSError: + onerror(os.unlink, fullname, sys.exc_info()) + try: + os.rmdir(path) + except OSError: + onerror(os.rmdir, path, sys.exc_info()) + +# Version using fd-based APIs to protect against races +def _rmtree_safe_fd(topfd, path, onerror): + names = [] + try: + names = os.listdir(topfd) + except OSError as err: + err.filename = path + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False) + mode = orig_st.st_mode + except OSError: + mode = 0 + if stat.S_ISDIR(mode): + try: + dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd) + except OSError: + onerror(os.open, fullname, sys.exc_info()) + else: + try: + if os.path.samestat(orig_st, os.fstat(dirfd)): + _rmtree_safe_fd(dirfd, fullname, onerror) + try: + os.rmdir(name, dir_fd=topfd) + except OSError: + onerror(os.rmdir, fullname, sys.exc_info()) + else: + try: + # This can only happen if someone replaces + # a directory with a symlink after the call to + # stat.S_ISDIR above. + raise OSError("Cannot call rmtree on a symbolic " + "link") + except OSError: + onerror(os.path.islink, fullname, sys.exc_info()) + finally: + os.close(dirfd) + else: + try: + os.unlink(name, dir_fd=topfd) + except OSError: + onerror(os.unlink, fullname, sys.exc_info()) + +_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <= + os.supports_dir_fd and + os.listdir in os.supports_fd and + os.stat in os.supports_follow_symlinks) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + if _use_fd_functions: + # While the unsafe rmtree works fine on bytes, the fd based does not. + if isinstance(path, bytes): + path = os.fsdecode(path) + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + try: + orig_st = os.lstat(path) + except Exception: + onerror(os.lstat, path, sys.exc_info()) + return + try: + fd = os.open(path, os.O_RDONLY) + except Exception: + onerror(os.lstat, path, sys.exc_info()) + return + try: + if os.path.samestat(orig_st, os.fstat(fd)): + _rmtree_safe_fd(fd, path, onerror) + try: + os.rmdir(path) + except OSError: + onerror(os.rmdir, path, sys.exc_info()) + else: + try: + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + finally: + os.close(fd) + else: + return _rmtree_unsafe(path, onerror) + +# Allow introspection of whether or not the hardening against symlink +# attacks is supported on the current platform +rmtree.avoids_symlink_attacks = _use_fd_functions + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + sep = os.path.sep + (os.path.altsep or '') + return os.path.basename(path.rstrip(sep)) + +def move(src, dst): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.islink(src): + linkto = os.readlink(src) + os.symlink(linkto, real_dst) + os.unlink(src) + elif os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) + copytree(src, real_dst, symlinks=True) + rmtree(src) + else: + copy2(src, real_dst) + os.unlink(src) + return real_dst + +def _destinsrc(src, dst): + src = abspath(src) + dst = abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", or ".bz2"). + + Returns the output filename. + """ + tar_compression = {'gzip': 'gz', None: ''} + compress_ext = {'gzip': '.gz'} + + if _BZ2_SUPPORTED: + tar_compression['bzip2'] = 'bz2' + compress_ext['bzip2'] = '.bz2' + + # flags for compression program, each element of list will be an argument + if compress is not None and compress not in compress_ext: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + archive_name = base_name + '.tar' + compress_ext.get(compress, '') + archive_dir = os.path.dirname(archive_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): + # XXX see if we want to keep an external call here + if verbose: + zipoptions = "-r" + else: + zipoptions = "-rq" + from distutils.errors import DistutilsExecError + from distutils.spawn import spawn + try: + spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + except DistutilsExecError: + # XXX really should distinguish between "couldn't find + # external 'zip' command" and "zip failed". + raise ExecError("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Uses either the + "zipfile" Python module (if available) or the InfoZIP "zip" utility + (if installed and found on the default search path). If neither tool is + available, raises ExecError. Returns the name of the output zip + file. + """ + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # If zipfile module is not available, try spawning an external 'zip' + # command. + try: + import zipfile + except ImportError: + zipfile = None + + if zipfile is None: + _call_external_zip(base_dir, zip_filename, verbose, dry_run) + else: + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + with zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) as zf: + path = os.path.normpath(base_dir) + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in sorted(dirnames): + path = os.path.normpath(os.path.join(dirpath, name)) + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + + return zip_filename + +_ARCHIVE_FORMATS = { + 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (_make_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not callable(function): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "bztar" + or "gztar". + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not callable(function): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registery.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + try: + import zipfile + except ImportError: + raise ReadError('zlib not supported, cannot unpack this archive.') + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", or "gztar". Or any + other registered format. If not provided, unpack_archive will use the + filename extension and see if an unpacker was registered for that + extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) + + +if hasattr(os, 'statvfs'): + + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + st = os.statvfs(path) + free = st.f_bavail * st.f_frsize + total = st.f_blocks * st.f_frsize + used = (st.f_blocks - st.f_bfree) * st.f_frsize + return _ntuple_diskusage(total, used, free) + +elif os.name == 'nt': + + import nt + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + total, free = nt._getdiskusage(path) + used = total - free + return _ntuple_diskusage(total, used, free) + + +def chown(path, user=None, group=None): + """Change owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + """ + + if user is None and group is None: + raise ValueError("user and/or group must be set") + + _user = user + _group = group + + # -1 means don't change it + if user is None: + _user = -1 + # user can either be an int (the uid) or a string (the system username) + elif isinstance(user, str): + _user = _get_uid(user) + if _user is None: + raise LookupError("no such user: {!r}".format(user)) + + if group is None: + _group = -1 + elif not isinstance(group, int): + _group = _get_gid(group) + if _group is None: + raise LookupError("no such group: {!r}".format(group)) + + os.chown(path, _user, _group) + +def get_terminal_size(fallback=(80, 24)): + """Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + """ + # columns, lines are the working values + try: + columns = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + columns = 0 + + try: + lines = int(os.environ['LINES']) + except (KeyError, ValueError): + lines = 0 + + # only query if necessary + if columns <= 0 or lines <= 0: + try: + size = os.get_terminal_size(sys.__stdout__.fileno()) + except (NameError, OSError): + size = os.terminal_size(fallback) + if columns <= 0: + columns = size.columns + if lines <= 0: + lines = size.lines + + return os.terminal_size((columns, lines)) + +def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None diff --git a/v1/flask/lib/python3.4/sre_compile.py b/v1/flask/lib/python3.4/sre_compile.py deleted file mode 120000 index f1e9669..0000000 --- a/v1/flask/lib/python3.4/sre_compile.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/sre_compile.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/sre_compile.py b/v1/flask/lib/python3.4/sre_compile.py new file mode 100644 index 0000000..550ea15 --- /dev/null +++ b/v1/flask/lib/python3.4/sre_compile.py @@ -0,0 +1,592 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert template to internal format +# +# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. +# +# See the sre.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +import _sre +import sre_parse +from sre_constants import * +from _sre import MAXREPEAT + +assert _sre.MAGIC == MAGIC, "SRE module mismatch" + +if _sre.CODESIZE == 2: + MAXCODE = 65535 +else: + MAXCODE = 0xFFFFFFFF + +_LITERAL_CODES = set([LITERAL, NOT_LITERAL]) +_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT]) +_SUCCESS_CODES = set([SUCCESS, FAILURE]) +_ASSERT_CODES = set([ASSERT, ASSERT_NOT]) + +# Sets of lowercase characters which have the same uppercase. +_equivalences = ( + # LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I + (0x69, 0x131), # iı + # LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S + (0x73, 0x17f), # sſ + # MICRO SIGN, GREEK SMALL LETTER MU + (0xb5, 0x3bc), # µμ + # COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI + (0x345, 0x3b9, 0x1fbe), # \u0345ιι + # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA + (0x390, 0x1fd3), # ΐΐ + # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA + (0x3b0, 0x1fe3), # ΰΰ + # GREEK SMALL LETTER BETA, GREEK BETA SYMBOL + (0x3b2, 0x3d0), # βϐ + # GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL + (0x3b5, 0x3f5), # εϵ + # GREEK SMALL LETTER THETA, GREEK THETA SYMBOL + (0x3b8, 0x3d1), # θϑ + # GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL + (0x3ba, 0x3f0), # κϰ + # GREEK SMALL LETTER PI, GREEK PI SYMBOL + (0x3c0, 0x3d6), # πϖ + # GREEK SMALL LETTER RHO, GREEK RHO SYMBOL + (0x3c1, 0x3f1), # ρϱ + # GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA + (0x3c2, 0x3c3), # ςσ + # GREEK SMALL LETTER PHI, GREEK PHI SYMBOL + (0x3c6, 0x3d5), # φϕ + # LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE + (0x1e61, 0x1e9b), # ṡẛ + # LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST + (0xfb05, 0xfb06), # ſtst +) + +# Maps the lowercase code to lowercase codes which have the same uppercase. +_ignorecase_fixes = {i: tuple(j for j in t if i != j) + for t in _equivalences for i in t} + +def _compile(code, pattern, flags): + # internal: compile a (sub)pattern + emit = code.append + _len = len + LITERAL_CODES = _LITERAL_CODES + REPEATING_CODES = _REPEATING_CODES + SUCCESS_CODES = _SUCCESS_CODES + ASSERT_CODES = _ASSERT_CODES + if (flags & SRE_FLAG_IGNORECASE and + not (flags & SRE_FLAG_LOCALE) and + flags & SRE_FLAG_UNICODE): + fixes = _ignorecase_fixes + else: + fixes = None + for op, av in pattern: + if op in LITERAL_CODES: + if flags & SRE_FLAG_IGNORECASE: + lo = _sre.getlower(av, flags) + if fixes and lo in fixes: + emit(OPCODES[IN_IGNORE]) + skip = _len(code); emit(0) + if op is NOT_LITERAL: + emit(OPCODES[NEGATE]) + for k in (lo,) + fixes[lo]: + emit(OPCODES[LITERAL]) + emit(k) + emit(OPCODES[FAILURE]) + code[skip] = _len(code) - skip + else: + emit(OPCODES[OP_IGNORE[op]]) + emit(lo) + else: + emit(OPCODES[op]) + emit(av) + elif op is IN: + if flags & SRE_FLAG_IGNORECASE: + emit(OPCODES[OP_IGNORE[op]]) + def fixup(literal, flags=flags): + return _sre.getlower(literal, flags) + else: + emit(OPCODES[op]) + fixup = None + skip = _len(code); emit(0) + _compile_charset(av, flags, code, fixup, fixes) + code[skip] = _len(code) - skip + elif op is ANY: + if flags & SRE_FLAG_DOTALL: + emit(OPCODES[ANY_ALL]) + else: + emit(OPCODES[ANY]) + elif op in REPEATING_CODES: + if flags & SRE_FLAG_TEMPLATE: + raise error("internal: unsupported template operator") + elif _simple(av) and op is not REPEAT: + if op is MAX_REPEAT: + emit(OPCODES[REPEAT_ONE]) + else: + emit(OPCODES[MIN_REPEAT_ONE]) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + emit(OPCODES[SUCCESS]) + code[skip] = _len(code) - skip + else: + emit(OPCODES[REPEAT]) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + code[skip] = _len(code) - skip + if op is MAX_REPEAT: + emit(OPCODES[MAX_UNTIL]) + else: + emit(OPCODES[MIN_UNTIL]) + elif op is SUBPATTERN: + if av[0]: + emit(OPCODES[MARK]) + emit((av[0]-1)*2) + # _compile_info(code, av[1], flags) + _compile(code, av[1], flags) + if av[0]: + emit(OPCODES[MARK]) + emit((av[0]-1)*2+1) + elif op in SUCCESS_CODES: + emit(OPCODES[op]) + elif op in ASSERT_CODES: + emit(OPCODES[op]) + skip = _len(code); emit(0) + if av[0] >= 0: + emit(0) # look ahead + else: + lo, hi = av[1].getwidth() + if lo != hi: + raise error("look-behind requires fixed-width pattern") + emit(lo) # look behind + _compile(code, av[1], flags) + emit(OPCODES[SUCCESS]) + code[skip] = _len(code) - skip + elif op is CALL: + emit(OPCODES[op]) + skip = _len(code); emit(0) + _compile(code, av, flags) + emit(OPCODES[SUCCESS]) + code[skip] = _len(code) - skip + elif op is AT: + emit(OPCODES[op]) + if flags & SRE_FLAG_MULTILINE: + av = AT_MULTILINE.get(av, av) + if flags & SRE_FLAG_LOCALE: + av = AT_LOCALE.get(av, av) + elif flags & SRE_FLAG_UNICODE: + av = AT_UNICODE.get(av, av) + emit(ATCODES[av]) + elif op is BRANCH: + emit(OPCODES[op]) + tail = [] + tailappend = tail.append + for av in av[1]: + skip = _len(code); emit(0) + # _compile_info(code, av, flags) + _compile(code, av, flags) + emit(OPCODES[JUMP]) + tailappend(_len(code)); emit(0) + code[skip] = _len(code) - skip + emit(0) # end of branch + for tail in tail: + code[tail] = _len(code) - tail + elif op is CATEGORY: + emit(OPCODES[op]) + if flags & SRE_FLAG_LOCALE: + av = CH_LOCALE[av] + elif flags & SRE_FLAG_UNICODE: + av = CH_UNICODE[av] + emit(CHCODES[av]) + elif op is GROUPREF: + if flags & SRE_FLAG_IGNORECASE: + emit(OPCODES[OP_IGNORE[op]]) + else: + emit(OPCODES[op]) + emit(av-1) + elif op is GROUPREF_EXISTS: + emit(OPCODES[op]) + emit(av[0]-1) + skipyes = _len(code); emit(0) + _compile(code, av[1], flags) + if av[2]: + emit(OPCODES[JUMP]) + skipno = _len(code); emit(0) + code[skipyes] = _len(code) - skipyes + 1 + _compile(code, av[2], flags) + code[skipno] = _len(code) - skipno + else: + code[skipyes] = _len(code) - skipyes + 1 + else: + raise ValueError("unsupported operand type", op) + +def _compile_charset(charset, flags, code, fixup=None, fixes=None): + # compile charset subprogram + emit = code.append + for op, av in _optimize_charset(charset, fixup, fixes, + flags & SRE_FLAG_UNICODE): + emit(OPCODES[op]) + if op is NEGATE: + pass + elif op is LITERAL: + emit(av) + elif op is RANGE: + emit(av[0]) + emit(av[1]) + elif op is CHARSET: + code.extend(av) + elif op is BIGCHARSET: + code.extend(av) + elif op is CATEGORY: + if flags & SRE_FLAG_LOCALE: + emit(CHCODES[CH_LOCALE[av]]) + elif flags & SRE_FLAG_UNICODE: + emit(CHCODES[CH_UNICODE[av]]) + else: + emit(CHCODES[av]) + else: + raise error("internal: unsupported set operator") + emit(OPCODES[FAILURE]) + +def _optimize_charset(charset, fixup, fixes, isunicode): + # internal: optimize character set + out = [] + tail = [] + charmap = bytearray(256) + for op, av in charset: + while True: + try: + if op is LITERAL: + if fixup: + i = fixup(av) + charmap[i] = 1 + if fixes and i in fixes: + for k in fixes[i]: + charmap[k] = 1 + else: + charmap[av] = 1 + elif op is RANGE: + r = range(av[0], av[1]+1) + if fixup: + r = map(fixup, r) + if fixup and fixes: + for i in r: + charmap[i] = 1 + if i in fixes: + for k in fixes[i]: + charmap[k] = 1 + else: + for i in r: + charmap[i] = 1 + elif op is NEGATE: + out.append((op, av)) + else: + tail.append((op, av)) + except IndexError: + if len(charmap) == 256: + # character set contains non-UCS1 character codes + charmap += b'\0' * 0xff00 + continue + # character set contains non-BMP character codes + if fixup and isunicode and op is RANGE: + lo, hi = av + ranges = [av] + # There are only two ranges of cased astral characters: + # 10400-1044F (Deseret) and 118A0-118DF (Warang Citi). + _fixup_range(max(0x10000, lo), min(0x11fff, hi), + ranges, fixup) + for lo, hi in ranges: + if lo == hi: + tail.append((LITERAL, hi)) + else: + tail.append((RANGE, (lo, hi))) + else: + tail.append((op, av)) + break + + # compress character map + runs = [] + q = 0 + while True: + p = charmap.find(1, q) + if p < 0: + break + if len(runs) >= 2: + runs = None + break + q = charmap.find(0, p) + if q < 0: + runs.append((p, len(charmap))) + break + runs.append((p, q)) + if runs is not None: + # use literal/range + for p, q in runs: + if q - p == 1: + out.append((LITERAL, p)) + else: + out.append((RANGE, (p, q - 1))) + out += tail + # if the case was changed or new representation is more compact + if fixup or len(out) < len(charset): + return out + # else original character set is good enough + return charset + + # use bitmap + if len(charmap) == 256: + data = _mk_bitmap(charmap) + out.append((CHARSET, data)) + out += tail + return out + + # To represent a big charset, first a bitmap of all characters in the + # set is constructed. Then, this bitmap is sliced into chunks of 256 + # characters, duplicate chunks are eliminated, and each chunk is + # given a number. In the compiled expression, the charset is + # represented by a 32-bit word sequence, consisting of one word for + # the number of different chunks, a sequence of 256 bytes (64 words) + # of chunk numbers indexed by their original chunk position, and a + # sequence of 256-bit chunks (8 words each). + + # Compression is normally good: in a typical charset, large ranges of + # Unicode will be either completely excluded (e.g. if only cyrillic + # letters are to be matched), or completely included (e.g. if large + # subranges of Kanji match). These ranges will be represented by + # chunks of all one-bits or all zero-bits. + + # Matching can be also done efficiently: the more significant byte of + # the Unicode character is an index into the chunk number, and the + # less significant byte is a bit index in the chunk (just like the + # CHARSET matching). + + charmap = bytes(charmap) # should be hashable + comps = {} + mapping = bytearray(256) + block = 0 + data = bytearray() + for i in range(0, 65536, 256): + chunk = charmap[i: i + 256] + if chunk in comps: + mapping[i // 256] = comps[chunk] + else: + mapping[i // 256] = comps[chunk] = block + block += 1 + data += chunk + data = _mk_bitmap(data) + data[0:0] = [block] + _bytes_to_codes(mapping) + out.append((BIGCHARSET, data)) + out += tail + return out + +def _fixup_range(lo, hi, ranges, fixup): + for i in map(fixup, range(lo, hi+1)): + for k, (lo, hi) in enumerate(ranges): + if i < lo: + if l == lo - 1: + ranges[k] = (i, hi) + else: + ranges.insert(k, (i, i)) + break + elif i > hi: + if i == hi + 1: + ranges[k] = (lo, i) + break + else: + break + else: + ranges.append((i, i)) + +_CODEBITS = _sre.CODESIZE * 8 +_BITS_TRANS = b'0' + b'1' * 255 +def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): + s = bits.translate(_BITS_TRANS)[::-1] + return [_int(s[i - _CODEBITS: i], 2) + for i in range(len(s), 0, -_CODEBITS)] + +def _bytes_to_codes(b): + # Convert block indices to word array + a = memoryview(b).cast('I') + assert a.itemsize == _sre.CODESIZE + assert len(a) * a.itemsize == len(b) + return a.tolist() + +def _simple(av): + # check if av is a "simple" operator + lo, hi = av[2].getwidth() + return lo == hi == 1 and av[2][0][0] != SUBPATTERN + +def _generate_overlap_table(prefix): + """ + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + """ + table = [0] * len(prefix) + for i in range(1, len(prefix)): + idx = table[i - 1] + while prefix[i] != prefix[idx]: + if idx == 0: + table[i] = 0 + break + idx = table[idx - 1] + else: + table[i] = idx + 1 + return table + +def _compile_info(code, pattern, flags): + # internal: compile an info block. in the current version, + # this contains min/max pattern width, and an optional literal + # prefix or a character map + lo, hi = pattern.getwidth() + if lo == 0: + return # not worth it + # look for a literal prefix + prefix = [] + prefixappend = prefix.append + prefix_skip = 0 + charset = [] # not used + charsetappend = charset.append + if not (flags & SRE_FLAG_IGNORECASE): + # look for literal prefix + for op, av in pattern.data: + if op is LITERAL: + if len(prefix) == prefix_skip: + prefix_skip = prefix_skip + 1 + prefixappend(av) + elif op is SUBPATTERN and len(av[1]) == 1: + op, av = av[1][0] + if op is LITERAL: + prefixappend(av) + else: + break + else: + break + # if no prefix, look for charset prefix + if not prefix and pattern.data: + op, av = pattern.data[0] + if op is SUBPATTERN and av[1]: + op, av = av[1][0] + if op is LITERAL: + charsetappend((op, av)) + elif op is BRANCH: + c = [] + cappend = c.append + for p in av[1]: + if not p: + break + op, av = p[0] + if op is LITERAL: + cappend((op, av)) + else: + break + else: + charset = c + elif op is BRANCH: + c = [] + cappend = c.append + for p in av[1]: + if not p: + break + op, av = p[0] + if op is LITERAL: + cappend((op, av)) + else: + break + else: + charset = c + elif op is IN: + charset = av +## if prefix: +## print "*** PREFIX", prefix, prefix_skip +## if charset: +## print "*** CHARSET", charset + # add an info block + emit = code.append + emit(OPCODES[INFO]) + skip = len(code); emit(0) + # literal flag + mask = 0 + if prefix: + mask = SRE_INFO_PREFIX + if len(prefix) == prefix_skip == len(pattern.data): + mask = mask + SRE_INFO_LITERAL + elif charset: + mask = mask + SRE_INFO_CHARSET + emit(mask) + # pattern length + if lo < MAXCODE: + emit(lo) + else: + emit(MAXCODE) + prefix = prefix[:MAXCODE] + if hi < MAXCODE: + emit(hi) + else: + emit(0) + # add literal prefix + if prefix: + emit(len(prefix)) # length + emit(prefix_skip) # skip + code.extend(prefix) + # generate overlap table + code.extend(_generate_overlap_table(prefix)) + elif charset: + _compile_charset(charset, flags, code) + code[skip] = len(code) - skip + +def isstring(obj): + return isinstance(obj, (str, bytes)) + +def _code(p, flags): + + flags = p.pattern.flags | flags + code = [] + + # compile info block + _compile_info(code, p, flags) + + # compile the pattern + _compile(code, p.data, flags) + + code.append(OPCODES[SUCCESS]) + + return code + +def compile(p, flags=0): + # internal: convert pattern list to internal format + + if isstring(p): + pattern = p + p = sre_parse.parse(p, flags) + else: + pattern = None + + code = _code(p, flags) + + # print code + + # XXX: get rid of this limitation! + if p.pattern.groups > 100: + raise AssertionError( + "sorry, but this version only supports 100 named groups" + ) + + # map in either direction + groupindex = p.pattern.groupdict + indexgroup = [None] * p.pattern.groups + for k, i in groupindex.items(): + indexgroup[i] = k + + return _sre.compile( + pattern, flags | p.pattern.flags, code, + p.pattern.groups-1, + groupindex, indexgroup + ) diff --git a/v1/flask/lib/python3.4/sre_constants.py b/v1/flask/lib/python3.4/sre_constants.py deleted file mode 120000 index 2d113f0..0000000 --- a/v1/flask/lib/python3.4/sre_constants.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/sre_constants.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/sre_constants.py b/v1/flask/lib/python3.4/sre_constants.py new file mode 100644 index 0000000..23e3516 --- /dev/null +++ b/v1/flask/lib/python3.4/sre_constants.py @@ -0,0 +1,261 @@ +# +# Secret Labs' Regular Expression Engine +# +# various symbols used by the regular expression engine. +# run this script to update the _sre include files! +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# See the sre.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +# update when constants are added or removed + +MAGIC = 20031017 + +from _sre import MAXREPEAT + +# SRE standard exception (access as sre.error) +# should this really be here? + +class error(Exception): + pass + +# operators + +FAILURE = "failure" +SUCCESS = "success" + +ANY = "any" +ANY_ALL = "any_all" +ASSERT = "assert" +ASSERT_NOT = "assert_not" +AT = "at" +BIGCHARSET = "bigcharset" +BRANCH = "branch" +CALL = "call" +CATEGORY = "category" +CHARSET = "charset" +GROUPREF = "groupref" +GROUPREF_IGNORE = "groupref_ignore" +GROUPREF_EXISTS = "groupref_exists" +IN = "in" +IN_IGNORE = "in_ignore" +INFO = "info" +JUMP = "jump" +LITERAL = "literal" +LITERAL_IGNORE = "literal_ignore" +MARK = "mark" +MAX_REPEAT = "max_repeat" +MAX_UNTIL = "max_until" +MIN_REPEAT = "min_repeat" +MIN_UNTIL = "min_until" +NEGATE = "negate" +NOT_LITERAL = "not_literal" +NOT_LITERAL_IGNORE = "not_literal_ignore" +RANGE = "range" +REPEAT = "repeat" +REPEAT_ONE = "repeat_one" +SUBPATTERN = "subpattern" +MIN_REPEAT_ONE = "min_repeat_one" + +# positions +AT_BEGINNING = "at_beginning" +AT_BEGINNING_LINE = "at_beginning_line" +AT_BEGINNING_STRING = "at_beginning_string" +AT_BOUNDARY = "at_boundary" +AT_NON_BOUNDARY = "at_non_boundary" +AT_END = "at_end" +AT_END_LINE = "at_end_line" +AT_END_STRING = "at_end_string" +AT_LOC_BOUNDARY = "at_loc_boundary" +AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" +AT_UNI_BOUNDARY = "at_uni_boundary" +AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" + +# categories +CATEGORY_DIGIT = "category_digit" +CATEGORY_NOT_DIGIT = "category_not_digit" +CATEGORY_SPACE = "category_space" +CATEGORY_NOT_SPACE = "category_not_space" +CATEGORY_WORD = "category_word" +CATEGORY_NOT_WORD = "category_not_word" +CATEGORY_LINEBREAK = "category_linebreak" +CATEGORY_NOT_LINEBREAK = "category_not_linebreak" +CATEGORY_LOC_WORD = "category_loc_word" +CATEGORY_LOC_NOT_WORD = "category_loc_not_word" +CATEGORY_UNI_DIGIT = "category_uni_digit" +CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" +CATEGORY_UNI_SPACE = "category_uni_space" +CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" +CATEGORY_UNI_WORD = "category_uni_word" +CATEGORY_UNI_NOT_WORD = "category_uni_not_word" +CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" +CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" + +OPCODES = [ + + # failure=0 success=1 (just because it looks better that way :-) + FAILURE, SUCCESS, + + ANY, ANY_ALL, + ASSERT, ASSERT_NOT, + AT, + BRANCH, + CALL, + CATEGORY, + CHARSET, BIGCHARSET, + GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, + IN, IN_IGNORE, + INFO, + JUMP, + LITERAL, LITERAL_IGNORE, + MARK, + MAX_UNTIL, + MIN_UNTIL, + NOT_LITERAL, NOT_LITERAL_IGNORE, + NEGATE, + RANGE, + REPEAT, + REPEAT_ONE, + SUBPATTERN, + MIN_REPEAT_ONE + +] + +ATCODES = [ + AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, + AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, + AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, + AT_UNI_NON_BOUNDARY +] + +CHCODES = [ + CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, + CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, + CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, + CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, + CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, + CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, + CATEGORY_UNI_NOT_LINEBREAK +] + +def makedict(list): + d = {} + i = 0 + for item in list: + d[item] = i + i = i + 1 + return d + +OPCODES = makedict(OPCODES) +ATCODES = makedict(ATCODES) +CHCODES = makedict(CHCODES) + +# replacement operations for "ignore case" mode +OP_IGNORE = { + GROUPREF: GROUPREF_IGNORE, + IN: IN_IGNORE, + LITERAL: LITERAL_IGNORE, + NOT_LITERAL: NOT_LITERAL_IGNORE +} + +AT_MULTILINE = { + AT_BEGINNING: AT_BEGINNING_LINE, + AT_END: AT_END_LINE +} + +AT_LOCALE = { + AT_BOUNDARY: AT_LOC_BOUNDARY, + AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY +} + +AT_UNICODE = { + AT_BOUNDARY: AT_UNI_BOUNDARY, + AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY +} + +CH_LOCALE = { + CATEGORY_DIGIT: CATEGORY_DIGIT, + CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, + CATEGORY_SPACE: CATEGORY_SPACE, + CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, + CATEGORY_WORD: CATEGORY_LOC_WORD, + CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, + CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, + CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK +} + +CH_UNICODE = { + CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, + CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, + CATEGORY_SPACE: CATEGORY_UNI_SPACE, + CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, + CATEGORY_WORD: CATEGORY_UNI_WORD, + CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, + CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, + CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK +} + +# flags +SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) +SRE_FLAG_IGNORECASE = 2 # case insensitive +SRE_FLAG_LOCALE = 4 # honour system locale +SRE_FLAG_MULTILINE = 8 # treat target as multiline string +SRE_FLAG_DOTALL = 16 # treat target as a single string +SRE_FLAG_UNICODE = 32 # use unicode "locale" +SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments +SRE_FLAG_DEBUG = 128 # debugging +SRE_FLAG_ASCII = 256 # use ascii "locale" + +# flags for INFO primitive +SRE_INFO_PREFIX = 1 # has prefix +SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) +SRE_INFO_CHARSET = 4 # pattern starts with character from given set + +if __name__ == "__main__": + def dump(f, d, prefix): + items = sorted(d.items(), key=lambda a: a[1]) + for k, v in items: + f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) + f = open("sre_constants.h", "w") + f.write("""\ +/* + * Secret Labs' Regular Expression Engine + * + * regular expression matching engine + * + * NOTE: This file is generated by sre_constants.py. If you need + * to change anything in here, edit sre_constants.py and run it. + * + * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. + * + * See the _sre.c file for information on usage and redistribution. + */ + +""") + + f.write("#define SRE_MAGIC %d\n" % MAGIC) + + dump(f, OPCODES, "SRE_OP") + dump(f, ATCODES, "SRE") + dump(f, CHCODES, "SRE") + + f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) + f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) + f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) + f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) + f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) + f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) + f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) + f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG) + f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII) + + f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) + f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) + f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) + + f.close() + print("done") diff --git a/v1/flask/lib/python3.4/sre_parse.py b/v1/flask/lib/python3.4/sre_parse.py deleted file mode 120000 index 532ac60..0000000 --- a/v1/flask/lib/python3.4/sre_parse.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/sre_parse.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/sre_parse.py b/v1/flask/lib/python3.4/sre_parse.py new file mode 100644 index 0000000..df1e643 --- /dev/null +++ b/v1/flask/lib/python3.4/sre_parse.py @@ -0,0 +1,891 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert re-style regular expression to sre pattern +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# See the sre.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +# XXX: show string offset and offending character for all errors + +from sre_constants import * +from _sre import MAXREPEAT + +SPECIAL_CHARS = ".\\[{()*+?^$|" +REPEAT_CHARS = "*+?{" + +DIGITS = set("0123456789") + +OCTDIGITS = set("01234567") +HEXDIGITS = set("0123456789abcdefABCDEF") + +WHITESPACE = set(" \t\n\r\v\f") + +ESCAPES = { + r"\a": (LITERAL, ord("\a")), + r"\b": (LITERAL, ord("\b")), + r"\f": (LITERAL, ord("\f")), + r"\n": (LITERAL, ord("\n")), + r"\r": (LITERAL, ord("\r")), + r"\t": (LITERAL, ord("\t")), + r"\v": (LITERAL, ord("\v")), + r"\\": (LITERAL, ord("\\")) +} + +CATEGORIES = { + r"\A": (AT, AT_BEGINNING_STRING), # start of string + r"\b": (AT, AT_BOUNDARY), + r"\B": (AT, AT_NON_BOUNDARY), + r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), + r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), + r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), + r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), + r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), + r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), + r"\Z": (AT, AT_END_STRING), # end of string +} + +FLAGS = { + # standard flags + "i": SRE_FLAG_IGNORECASE, + "L": SRE_FLAG_LOCALE, + "m": SRE_FLAG_MULTILINE, + "s": SRE_FLAG_DOTALL, + "x": SRE_FLAG_VERBOSE, + # extensions + "a": SRE_FLAG_ASCII, + "t": SRE_FLAG_TEMPLATE, + "u": SRE_FLAG_UNICODE, +} + +class Pattern: + # master pattern object. keeps track of global attributes + def __init__(self): + self.flags = 0 + self.open = [] + self.groups = 1 + self.groupdict = {} + self.lookbehind = 0 + + def opengroup(self, name=None): + gid = self.groups + self.groups = gid + 1 + if name is not None: + ogid = self.groupdict.get(name, None) + if ogid is not None: + raise error("redefinition of group name %s as group %d; " + "was group %d" % (repr(name), gid, ogid)) + self.groupdict[name] = gid + self.open.append(gid) + return gid + def closegroup(self, gid): + self.open.remove(gid) + def checkgroup(self, gid): + return gid < self.groups and gid not in self.open + +class SubPattern: + # a subpattern, in intermediate form + def __init__(self, pattern, data=None): + self.pattern = pattern + if data is None: + data = [] + self.data = data + self.width = None + def dump(self, level=0): + nl = True + seqtypes = (tuple, list) + for op, av in self.data: + print(level*" " + op, end='') + if op == IN: + # member sublanguage + print() + for op, a in av: + print((level+1)*" " + op, a) + elif op == BRANCH: + print() + for i, a in enumerate(av[1]): + if i: + print(level*" " + "or") + a.dump(level+1) + elif op == GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print('', condgroup) + item_yes.dump(level+1) + if item_no: + print(level*" " + "else") + item_no.dump(level+1) + elif isinstance(av, seqtypes): + nl = False + for a in av: + if isinstance(a, SubPattern): + if not nl: + print() + a.dump(level+1) + nl = True + else: + if not nl: + print(' ', end='') + print(a, end='') + nl = False + if not nl: + print() + else: + print('', av) + def __repr__(self): + return repr(self.data) + def __len__(self): + return len(self.data) + def __delitem__(self, index): + del self.data[index] + def __getitem__(self, index): + if isinstance(index, slice): + return SubPattern(self.pattern, self.data[index]) + return self.data[index] + def __setitem__(self, index, code): + self.data[index] = code + def insert(self, index, code): + self.data.insert(index, code) + def append(self, code): + self.data.append(code) + def getwidth(self): + # determine the width (min, max) for this subpattern + if self.width: + return self.width + lo = hi = 0 + UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY) + REPEATCODES = (MIN_REPEAT, MAX_REPEAT) + for op, av in self.data: + if op is BRANCH: + i = MAXREPEAT - 1 + j = 0 + for av in av[1]: + l, h = av.getwidth() + i = min(i, l) + j = max(j, h) + lo = lo + i + hi = hi + j + elif op is CALL: + i, j = av.getwidth() + lo = lo + i + hi = hi + j + elif op is SUBPATTERN: + i, j = av[1].getwidth() + lo = lo + i + hi = hi + j + elif op in REPEATCODES: + i, j = av[2].getwidth() + lo = lo + i * av[0] + hi = hi + j * av[1] + elif op in UNITCODES: + lo = lo + 1 + hi = hi + 1 + elif op == SUCCESS: + break + self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT) + return self.width + +class Tokenizer: + def __init__(self, string): + self.istext = isinstance(string, str) + self.string = string + self.index = 0 + self.__next() + def __next(self): + if self.index >= len(self.string): + self.next = None + return + char = self.string[self.index:self.index+1] + # Special case for the str8, since indexing returns a integer + # XXX This is only needed for test_bug_926075 in test_re.py + if char and not self.istext: + char = chr(char[0]) + if char == "\\": + try: + c = self.string[self.index + 1] + except IndexError: + raise error("bogus escape (end of line)") + if not self.istext: + c = chr(c) + char = char + c + self.index = self.index + len(char) + self.next = char + def match(self, char, skip=1): + if char == self.next: + if skip: + self.__next() + return 1 + return 0 + def get(self): + this = self.next + self.__next() + return this + def getwhile(self, n, charset): + result = '' + for _ in range(n): + c = self.next + if c not in charset: + break + result += c + self.__next() + return result + def tell(self): + return self.index, self.next + def seek(self, index): + self.index, self.next = index + +# The following three functions are not used in this module anymore, but we keep +# them here (with DeprecationWarnings) for backwards compatibility. + +def isident(char): + import warnings + warnings.warn('sre_parse.isident() will be removed in 3.5', + DeprecationWarning, stacklevel=2) + return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_" + +def isdigit(char): + import warnings + warnings.warn('sre_parse.isdigit() will be removed in 3.5', + DeprecationWarning, stacklevel=2) + return "0" <= char <= "9" + +def isname(name): + import warnings + warnings.warn('sre_parse.isname() will be removed in 3.5', + DeprecationWarning, stacklevel=2) + # check that group name is a valid string + if not isident(name[0]): + return False + for char in name[1:]: + if not isident(char) and not isdigit(char): + return False + return True + +def _class_escape(source, escape): + # handle escape code inside character class + code = ESCAPES.get(escape) + if code: + return code + code = CATEGORIES.get(escape) + if code and code[0] == IN: + return code + try: + c = escape[1:2] + if c == "x": + # hexadecimal escape (exactly two digits) + escape += source.getwhile(2, HEXDIGITS) + if len(escape) != 4: + raise ValueError + return LITERAL, int(escape[2:], 16) & 0xff + elif c == "u" and source.istext: + # unicode escape (exactly four digits) + escape += source.getwhile(4, HEXDIGITS) + if len(escape) != 6: + raise ValueError + return LITERAL, int(escape[2:], 16) + elif c == "U" and source.istext: + # unicode escape (exactly eight digits) + escape += source.getwhile(8, HEXDIGITS) + if len(escape) != 10: + raise ValueError + c = int(escape[2:], 16) + chr(c) # raise ValueError for invalid code + return LITERAL, c + elif c in OCTDIGITS: + # octal escape (up to three digits) + escape += source.getwhile(2, OCTDIGITS) + return LITERAL, int(escape[1:], 8) & 0xff + elif c in DIGITS: + raise ValueError + if len(escape) == 2: + return LITERAL, ord(escape[1]) + except ValueError: + pass + raise error("bogus escape: %s" % repr(escape)) + +def _escape(source, escape, state): + # handle escape code in expression + code = CATEGORIES.get(escape) + if code: + return code + code = ESCAPES.get(escape) + if code: + return code + try: + c = escape[1:2] + if c == "x": + # hexadecimal escape + escape += source.getwhile(2, HEXDIGITS) + if len(escape) != 4: + raise ValueError + return LITERAL, int(escape[2:], 16) & 0xff + elif c == "u" and source.istext: + # unicode escape (exactly four digits) + escape += source.getwhile(4, HEXDIGITS) + if len(escape) != 6: + raise ValueError + return LITERAL, int(escape[2:], 16) + elif c == "U" and source.istext: + # unicode escape (exactly eight digits) + escape += source.getwhile(8, HEXDIGITS) + if len(escape) != 10: + raise ValueError + c = int(escape[2:], 16) + chr(c) # raise ValueError for invalid code + return LITERAL, c + elif c == "0": + # octal escape + escape += source.getwhile(2, OCTDIGITS) + return LITERAL, int(escape[1:], 8) & 0xff + elif c in DIGITS: + # octal escape *or* decimal group reference (sigh) + if source.next in DIGITS: + escape = escape + source.get() + if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and + source.next in OCTDIGITS): + # got three octal digits; this is an octal escape + escape = escape + source.get() + return LITERAL, int(escape[1:], 8) & 0xff + # not an octal escape, so this is a group reference + group = int(escape[1:]) + if group < state.groups: + if not state.checkgroup(group): + raise error("cannot refer to open group") + if state.lookbehind: + import warnings + warnings.warn('group references in lookbehind ' + 'assertions are not supported', + RuntimeWarning) + return GROUPREF, group + raise ValueError + if len(escape) == 2: + return LITERAL, ord(escape[1]) + except ValueError: + pass + raise error("bogus escape: %s" % repr(escape)) + +def _parse_sub(source, state, nested=1): + # parse an alternation: a|b|c + + items = [] + itemsappend = items.append + sourcematch = source.match + while 1: + itemsappend(_parse(source, state)) + if sourcematch("|"): + continue + if not nested: + break + if not source.next or sourcematch(")", 0): + break + else: + raise error("pattern not properly closed") + + if len(items) == 1: + return items[0] + + subpattern = SubPattern(state) + subpatternappend = subpattern.append + + # check if all items share a common prefix + while 1: + prefix = None + for item in items: + if not item: + break + if prefix is None: + prefix = item[0] + elif item[0] != prefix: + break + else: + # all subitems start with a common "prefix". + # move it out of the branch + for item in items: + del item[0] + subpatternappend(prefix) + continue # check next one + break + + # check if the branch can be replaced by a character set + for item in items: + if len(item) != 1 or item[0][0] != LITERAL: + break + else: + # we can store this as a character set instead of a + # branch (the compiler may optimize this even more) + set = [] + setappend = set.append + for item in items: + setappend(item[0]) + subpatternappend((IN, set)) + return subpattern + + subpattern.append((BRANCH, (None, items))) + return subpattern + +def _parse_sub_cond(source, state, condgroup): + item_yes = _parse(source, state) + if source.match("|"): + item_no = _parse(source, state) + if source.match("|"): + raise error("conditional backref with more than two branches") + else: + item_no = None + if source.next and not source.match(")", 0): + raise error("pattern not properly closed") + subpattern = SubPattern(state) + subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) + return subpattern + +_PATTERNENDERS = set("|)") +_ASSERTCHARS = set("=!<") +_LOOKBEHINDASSERTCHARS = set("=!") +_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT]) + +def _parse(source, state): + # parse a simple pattern + subpattern = SubPattern(state) + + # precompute constants into local variables + subpatternappend = subpattern.append + sourceget = source.get + sourcematch = source.match + _len = len + PATTERNENDERS = _PATTERNENDERS + ASSERTCHARS = _ASSERTCHARS + LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS + REPEATCODES = _REPEATCODES + + while 1: + + if source.next in PATTERNENDERS: + break # end of subpattern + this = sourceget() + if this is None: + break # end of pattern + + if state.flags & SRE_FLAG_VERBOSE: + # skip whitespace and comments + if this in WHITESPACE: + continue + if this == "#": + while 1: + this = sourceget() + if this in (None, "\n"): + break + continue + + if this and this[0] not in SPECIAL_CHARS: + subpatternappend((LITERAL, ord(this))) + + elif this == "[": + # character set + set = [] + setappend = set.append +## if sourcematch(":"): +## pass # handle character classes + if sourcematch("^"): + setappend((NEGATE, None)) + # check remaining characters + start = set[:] + while 1: + this = sourceget() + if this == "]" and set != start: + break + elif this and this[0] == "\\": + code1 = _class_escape(source, this) + elif this: + code1 = LITERAL, ord(this) + else: + raise error("unexpected end of regular expression") + if sourcematch("-"): + # potential range + this = sourceget() + if this == "]": + if code1[0] is IN: + code1 = code1[1][0] + setappend(code1) + setappend((LITERAL, ord("-"))) + break + elif this: + if this[0] == "\\": + code2 = _class_escape(source, this) + else: + code2 = LITERAL, ord(this) + if code1[0] != LITERAL or code2[0] != LITERAL: + raise error("bad character range") + lo = code1[1] + hi = code2[1] + if hi < lo: + raise error("bad character range") + setappend((RANGE, (lo, hi))) + else: + raise error("unexpected end of regular expression") + else: + if code1[0] is IN: + code1 = code1[1][0] + setappend(code1) + + # XXX: should move set optimization to compiler! + if _len(set)==1 and set[0][0] is LITERAL: + subpatternappend(set[0]) # optimization + elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL: + subpatternappend((NOT_LITERAL, set[1][1])) # optimization + else: + # XXX: should add charmap optimization here + subpatternappend((IN, set)) + + elif this and this[0] in REPEAT_CHARS: + # repeat previous item + if this == "?": + min, max = 0, 1 + elif this == "*": + min, max = 0, MAXREPEAT + + elif this == "+": + min, max = 1, MAXREPEAT + elif this == "{": + if source.next == "}": + subpatternappend((LITERAL, ord(this))) + continue + here = source.tell() + min, max = 0, MAXREPEAT + lo = hi = "" + while source.next in DIGITS: + lo = lo + source.get() + if sourcematch(","): + while source.next in DIGITS: + hi = hi + sourceget() + else: + hi = lo + if not sourcematch("}"): + subpatternappend((LITERAL, ord(this))) + source.seek(here) + continue + if lo: + min = int(lo) + if min >= MAXREPEAT: + raise OverflowError("the repetition number is too large") + if hi: + max = int(hi) + if max >= MAXREPEAT: + raise OverflowError("the repetition number is too large") + if max < min: + raise error("bad repeat interval") + else: + raise error("not supported") + # figure out which item to repeat + if subpattern: + item = subpattern[-1:] + else: + item = None + if not item or (_len(item) == 1 and item[0][0] == AT): + raise error("nothing to repeat") + if item[0][0] in REPEATCODES: + raise error("multiple repeat") + if sourcematch("?"): + subpattern[-1] = (MIN_REPEAT, (min, max, item)) + else: + subpattern[-1] = (MAX_REPEAT, (min, max, item)) + + elif this == ".": + subpatternappend((ANY, None)) + + elif this == "(": + group = 1 + name = None + condgroup = None + if sourcematch("?"): + group = 0 + # options + if sourcematch("P"): + # python extensions + if sourcematch("<"): + # named group: skip forward to end of name + name = "" + while 1: + char = sourceget() + if char is None: + raise error("unterminated name") + if char == ">": + break + name = name + char + group = 1 + if not name: + raise error("missing group name") + if not name.isidentifier(): + raise error("bad character in group name %r" % name) + elif sourcematch("="): + # named backreference + name = "" + while 1: + char = sourceget() + if char is None: + raise error("unterminated name") + if char == ")": + break + name = name + char + if not name: + raise error("missing group name") + if not name.isidentifier(): + raise error("bad character in backref group name " + "%r" % name) + gid = state.groupdict.get(name) + if gid is None: + msg = "unknown group name: {0!r}".format(name) + raise error(msg) + if state.lookbehind: + import warnings + warnings.warn('group references in lookbehind ' + 'assertions are not supported', + RuntimeWarning) + subpatternappend((GROUPREF, gid)) + continue + else: + char = sourceget() + if char is None: + raise error("unexpected end of pattern") + raise error("unknown specifier: ?P%s" % char) + elif sourcematch(":"): + # non-capturing group + group = 2 + elif sourcematch("#"): + # comment + while 1: + if source.next is None or source.next == ")": + break + sourceget() + if not sourcematch(")"): + raise error("unbalanced parenthesis") + continue + elif source.next in ASSERTCHARS: + # lookahead assertions + char = sourceget() + dir = 1 + if char == "<": + if source.next not in LOOKBEHINDASSERTCHARS: + raise error("syntax error") + dir = -1 # lookbehind + char = sourceget() + state.lookbehind += 1 + p = _parse_sub(source, state) + if dir < 0: + state.lookbehind -= 1 + if not sourcematch(")"): + raise error("unbalanced parenthesis") + if char == "=": + subpatternappend((ASSERT, (dir, p))) + else: + subpatternappend((ASSERT_NOT, (dir, p))) + continue + elif sourcematch("("): + # conditional backreference group + condname = "" + while 1: + char = sourceget() + if char is None: + raise error("unterminated name") + if char == ")": + break + condname = condname + char + group = 2 + if not condname: + raise error("missing group name") + if condname.isidentifier(): + condgroup = state.groupdict.get(condname) + if condgroup is None: + msg = "unknown group name: {0!r}".format(condname) + raise error(msg) + else: + try: + condgroup = int(condname) + except ValueError: + raise error("bad character in group name") + if state.lookbehind: + import warnings + warnings.warn('group references in lookbehind ' + 'assertions are not supported', + RuntimeWarning) + else: + # flags + if not source.next in FLAGS: + raise error("unexpected end of pattern") + while source.next in FLAGS: + state.flags = state.flags | FLAGS[sourceget()] + if group: + # parse group contents + if group == 2: + # anonymous group + group = None + else: + group = state.opengroup(name) + if condgroup: + p = _parse_sub_cond(source, state, condgroup) + else: + p = _parse_sub(source, state) + if not sourcematch(")"): + raise error("unbalanced parenthesis") + if group is not None: + state.closegroup(group) + subpatternappend((SUBPATTERN, (group, p))) + else: + while 1: + char = sourceget() + if char is None: + raise error("unexpected end of pattern") + if char == ")": + break + raise error("unknown extension") + + elif this == "^": + subpatternappend((AT, AT_BEGINNING)) + + elif this == "$": + subpattern.append((AT, AT_END)) + + elif this and this[0] == "\\": + code = _escape(source, this, state) + subpatternappend(code) + + else: + raise error("parser error") + + return subpattern + +def fix_flags(src, flags): + # Check and fix flags according to the type of pattern (str or bytes) + if isinstance(src, str): + if not flags & SRE_FLAG_ASCII: + flags |= SRE_FLAG_UNICODE + elif flags & SRE_FLAG_UNICODE: + raise ValueError("ASCII and UNICODE flags are incompatible") + else: + if flags & SRE_FLAG_UNICODE: + raise ValueError("can't use UNICODE flag with a bytes pattern") + return flags + +def parse(str, flags=0, pattern=None): + # parse 're' pattern into list of (opcode, argument) tuples + + source = Tokenizer(str) + + if pattern is None: + pattern = Pattern() + pattern.flags = flags + pattern.str = str + + p = _parse_sub(source, pattern, 0) + p.pattern.flags = fix_flags(str, p.pattern.flags) + + tail = source.get() + if tail == ")": + raise error("unbalanced parenthesis") + elif tail: + raise error("bogus characters at end of regular expression") + + if flags & SRE_FLAG_DEBUG: + p.dump() + + if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE: + # the VERBOSE flag was switched on inside the pattern. to be + # on the safe side, we'll parse the whole thing again... + return parse(str, p.pattern.flags) + + return p + +def parse_template(source, pattern): + # parse 're' replacement string into list of literals and + # group references + s = Tokenizer(source) + sget = s.get + groups = [] + literals = [] + literal = [] + lappend = literal.append + def addgroup(index): + if literal: + literals.append(''.join(literal)) + del literal[:] + groups.append((len(literals), index)) + literals.append(None) + while True: + this = sget() + if this is None: + break # end of replacement string + if this[0] == "\\": + # group + c = this[1] + if c == "g": + name = "" + if s.match("<"): + while True: + char = sget() + if char is None: + raise error("unterminated group name") + if char == ">": + break + name += char + if not name: + raise error("missing group name") + try: + index = int(name) + if index < 0: + raise error("negative group number") + except ValueError: + if not name.isidentifier(): + raise error("bad character in group name") + try: + index = pattern.groupindex[name] + except KeyError: + msg = "unknown group name: {0!r}".format(name) + raise IndexError(msg) + addgroup(index) + elif c == "0": + if s.next in OCTDIGITS: + this += sget() + if s.next in OCTDIGITS: + this += sget() + lappend(chr(int(this[1:], 8) & 0xff)) + elif c in DIGITS: + isoctal = False + if s.next in DIGITS: + this += sget() + if (c in OCTDIGITS and this[2] in OCTDIGITS and + s.next in OCTDIGITS): + this += sget() + isoctal = True + lappend(chr(int(this[1:], 8) & 0xff)) + if not isoctal: + addgroup(int(this[1:])) + else: + try: + this = chr(ESCAPES[this][1]) + except KeyError: + pass + lappend(this) + else: + lappend(this) + if literal: + literals.append(''.join(literal)) + if not isinstance(source, str): + # The tokenizer implicitly decodes bytes objects as latin-1, we must + # therefore re-encode the final representation. + literals = [None if s is None else s.encode('latin-1') for s in literals] + return groups, literals + +def expand_template(template, match): + g = match.group + sep = match.string[:0] + groups, literals = template + literals = literals[:] + try: + for index, group in groups: + literals[index] = s = g(group) + if s is None: + raise error("unmatched group") + except IndexError: + raise error("invalid group reference") + return sep.join(literals) diff --git a/v1/flask/lib/python3.4/stat.py b/v1/flask/lib/python3.4/stat.py deleted file mode 120000 index 1e5c721..0000000 --- a/v1/flask/lib/python3.4/stat.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/stat.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/stat.py b/v1/flask/lib/python3.4/stat.py new file mode 100644 index 0000000..3eecc3e --- /dev/null +++ b/v1/flask/lib/python3.4/stat.py @@ -0,0 +1,155 @@ +"""Constants/functions for interpreting results of os.stat() and os.lstat(). + +Suggested usage: from stat import * +""" + +# Indices for stat struct members in the tuple returned by os.stat() + +ST_MODE = 0 +ST_INO = 1 +ST_DEV = 2 +ST_NLINK = 3 +ST_UID = 4 +ST_GID = 5 +ST_SIZE = 6 +ST_ATIME = 7 +ST_MTIME = 8 +ST_CTIME = 9 + +# Extract bits from the mode + +def S_IMODE(mode): + """Return the portion of the file's mode that can be set by + os.chmod(). + """ + return mode & 0o7777 + +def S_IFMT(mode): + """Return the portion of the file's mode that describes the + file type. + """ + return mode & 0o170000 + +# Constants used as S_IFMT() for various file types +# (not all are implemented on all systems) + +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFBLK = 0o060000 # block device +S_IFREG = 0o100000 # regular file +S_IFIFO = 0o010000 # fifo (named pipe) +S_IFLNK = 0o120000 # symbolic link +S_IFSOCK = 0o140000 # socket file + +# Functions to test for each file type + +def S_ISDIR(mode): + """Return True if mode is from a directory.""" + return S_IFMT(mode) == S_IFDIR + +def S_ISCHR(mode): + """Return True if mode is from a character special device file.""" + return S_IFMT(mode) == S_IFCHR + +def S_ISBLK(mode): + """Return True if mode is from a block special device file.""" + return S_IFMT(mode) == S_IFBLK + +def S_ISREG(mode): + """Return True if mode is from a regular file.""" + return S_IFMT(mode) == S_IFREG + +def S_ISFIFO(mode): + """Return True if mode is from a FIFO (named pipe).""" + return S_IFMT(mode) == S_IFIFO + +def S_ISLNK(mode): + """Return True if mode is from a symbolic link.""" + return S_IFMT(mode) == S_IFLNK + +def S_ISSOCK(mode): + """Return True if mode is from a socket.""" + return S_IFMT(mode) == S_IFSOCK + +# Names for permission bits + +S_ISUID = 0o4000 # set UID bit +S_ISGID = 0o2000 # set GID bit +S_ENFMT = S_ISGID # file locking enforcement +S_ISVTX = 0o1000 # sticky bit +S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR +S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR +S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR +S_IRWXU = 0o0700 # mask for owner permissions +S_IRUSR = 0o0400 # read by owner +S_IWUSR = 0o0200 # write by owner +S_IXUSR = 0o0100 # execute by owner +S_IRWXG = 0o0070 # mask for group permissions +S_IRGRP = 0o0040 # read by group +S_IWGRP = 0o0020 # write by group +S_IXGRP = 0o0010 # execute by group +S_IRWXO = 0o0007 # mask for others (not in group) permissions +S_IROTH = 0o0004 # read by others +S_IWOTH = 0o0002 # write by others +S_IXOTH = 0o0001 # execute by others + +# Names for file flags + +UF_NODUMP = 0x00000001 # do not dump file +UF_IMMUTABLE = 0x00000002 # file may not be changed +UF_APPEND = 0x00000004 # file may only be appended to +UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack +UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted +UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed +UF_HIDDEN = 0x00008000 # OS X: file should not be displayed +SF_ARCHIVED = 0x00010000 # file may be archived +SF_IMMUTABLE = 0x00020000 # file may not be changed +SF_APPEND = 0x00040000 # file may only be appended to +SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted +SF_SNAPSHOT = 0x00200000 # file is a snapshot file + + +_filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((S_IRUSR, "r"),), + ((S_IWUSR, "w"),), + ((S_IXUSR|S_ISUID, "s"), + (S_ISUID, "S"), + (S_IXUSR, "x")), + + ((S_IRGRP, "r"),), + ((S_IWGRP, "w"),), + ((S_IXGRP|S_ISGID, "s"), + (S_ISGID, "S"), + (S_IXGRP, "x")), + + ((S_IROTH, "r"),), + ((S_IWOTH, "w"),), + ((S_IXOTH|S_ISVTX, "t"), + (S_ISVTX, "T"), + (S_IXOTH, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form '-rwxrwxrwx'.""" + perm = [] + for table in _filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +# If available, use C implementation +try: + from _stat import * +except ImportError: + pass diff --git a/v1/flask/lib/python3.4/struct.py b/v1/flask/lib/python3.4/struct.py deleted file mode 120000 index d2bae1f..0000000 --- a/v1/flask/lib/python3.4/struct.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/struct.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/struct.py b/v1/flask/lib/python3.4/struct.py new file mode 100644 index 0000000..d6bba58 --- /dev/null +++ b/v1/flask/lib/python3.4/struct.py @@ -0,0 +1,15 @@ +__all__ = [ + # Functions + 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', + 'iter_unpack', + + # Classes + 'Struct', + + # Exceptions + 'error' + ] + +from _struct import * +from _struct import _clearcache +from _struct import __doc__ diff --git a/v1/flask/lib/python3.4/tarfile.py b/v1/flask/lib/python3.4/tarfile.py deleted file mode 120000 index cbc6c34..0000000 --- a/v1/flask/lib/python3.4/tarfile.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tarfile.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/tarfile.py b/v1/flask/lib/python3.4/tarfile.py new file mode 100755 index 0000000..5f1a979 --- /dev/null +++ b/v1/flask/lib/python3.4/tarfile.py @@ -0,0 +1,2529 @@ +#!/usr/bin/env python3 +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +"""Read from and write to tar format archives. +""" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +from builtins import open as bltn_open +import sys +import os +import io +import shutil +import stat +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # OSError (winerror=1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (OSError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] in (0o200, 0o377): + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += s[i + 1] + if s[0] == 0o377: + n = -(256 ** (len(s) - 1) - n) + else: + try: + s = nts(s, "ascii", "strict") + n = int(s.strip() or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 or 0o377 byte indicate this + # particular encoding, the following digits-1 bytes are a big-endian + # base-256 representation. This allows values up to (256**(digits-1))-1. + # A 0o200 byte indicates a positive number, a 0o377 byte a negative + # number. + if 0 <= n < 8 ** (digits - 1): + s = bytes("%0*o" % (digits - 1, int(n)), "ascii") + NUL + elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): + if n >= 0: + s = bytearray([0o200]) + else: + s = bytearray([0o377]) + n = 256 ** digits + n + + for i in range(digits - 1): + s.insert(1, n & 0o377) + n >>= 8 + else: + raise ValueError("overflow in number field") + + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) + signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None, exception=OSError): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + shutil.copyfileobj(src, dst) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise exception("unexpected end of data") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise exception("unexpected end of data") + dst.write(buf) + return + +def filemode(mode): + """Deprecated in this location; use stat.filemode.""" + import warnings + warnings.warn("deprecated in favor of stat.filemode", + DeprecationWarning, 2) + return stat.filemode(mode) + +def _safe_print(s): + encoding = getattr(sys.stdout, 'encoding', None) + if encoding is not None: + s = s.encode(encoding, 'backslashreplace').decode(encoding) + print(s, end=' ') + + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile: + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream: + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + self.exception = zlib.error + else: + self._init_write_gz() + + elif comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + self.exception = OSError + else: + self.cmp = bz2.BZ2Compressor() + + elif comptype == "xz": + try: + import lzma + except ImportError: + raise CompressionError("lzma module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = lzma.LZMADecompressor() + self.exception = lzma.LZMAError + else: + self.cmp = lzma.LZMACompressor() + + elif comptype != "tar": + raise CompressionError("unknown compression type %r" % comptype) + + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except self.exception: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\x1f\x8b\x08"): + return "gz" + elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": + return "bz2" + elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): + return "xz" + else: + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + self.name = getattr(fileobj, "name", None) + self.closed = False + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def flush(self): + pass + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position, whence=io.SEEK_SET): + """Seek to a position in the file. + """ + if whence == io.SEEK_SET: + self.position = min(max(position, 0), self.size) + elif whence == io.SEEK_CUR: + if position < 0: + self.position = max(self.position + position, 0) + else: + self.position = min(self.position + position, self.size) + elif whence == io.SEEK_END: + self.position = max(min(self.size + position, self.size), 0) + else: + raise ValueError("Invalid argument") + return self.position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + b = self.fileobj.read(length) + if len(b) != length: + raise ReadError("unexpected end of data") + buf += b + else: + buf += NUL * length + size -= length + self.position += length + return buf + + def readinto(self, b): + buf = self.read(len(b)) + b[:len(buf)] = buf + return len(buf) + + def close(self): + self.closed = True +#class _FileInFile + +class ExFileObject(io.BufferedReader): + + def __init__(self, tarfile, tarinfo): + fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, + tarinfo.size, tarinfo.sparse) + super().__init__(fileobj) +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf-8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf-8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf-8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf-8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf-8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf-8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf-8", "utf-8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf-8", "utf-8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The file-object for extractfile(). + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + modes = {"r": "rb", "a": "r+b", "w": "wb"} + if mode not in modes: + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = modes[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if (name is None and hasattr(fileobj, "name") and + isinstance(fileobj.name, (str, bytes))): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in ("a", "w"): + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if mode not in ("r", "a", "w"): + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + except OSError: + if fileobj is not None and mode == 'r': + raise ReadError("not a gzip file") + raise + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except OSError: + fileobj.close() + if mode == 'r': + raise ReadError("not a gzip file") + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + fileobj = bz2.BZ2File(fileobj or name, mode, + compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (OSError, EOFError): + fileobj.close() + if mode == 'r': + raise ReadError("not a bzip2 file") + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs): + """Open lzma compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'") + + try: + import lzma + except ImportError: + raise CompressionError("lzma module is not available") + + fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (lzma.LZMAError, EOFError): + fileobj.close() + if mode == 'r': + raise ReadError("not an lzma file") + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open", # bzip2 compressed tar + "xz": "xzopen" # lzma compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + finally: + if not self._extfileobj: + self.fileobj.close() + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + _safe_print(stat.filemode(tarinfo.mode)) + _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid)) + if tarinfo.ischr() or tarinfo.isblk(): + _safe_print("%10s" % + ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor))) + else: + _safe_print("%10d" % tarinfo.size) + _safe_print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6]) + + _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else "")) + + if verbose: + if tarinfo.issym(): + _safe_print("-> " + tarinfo.linkname) + if tarinfo.islnk(): + _safe_print("link to " + tarinfo.linkname) + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, *, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + with bltn_open(name, "rb") as f: + self.addfile(tarinfo, f) + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except OSError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file or a + link, an io.BufferedReader object is returned. Otherwise, None is + returned. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: + # Members with unknown types are treated as regular files. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except FileExistsError: + pass + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + with bltn_open(targetpath, "wb") as target: + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size, ReadError) + else: + copyfileobj(source, target, tarinfo.size, ReadError) + target.seek(tarinfo.size) + target.truncate() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + os.chown(targetpath, u, g) + except OSError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except OSError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except OSError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Advance the file pointer. + if self.offset != self.fileobj.tell(): + self.fileobj.seek(self.offset - 1) + if not self.fileobj.read(1): + raise ReadError("unexpected end of data") + + # Read the next block. + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise OSError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise OSError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname))) + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter: + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + + if self.index == 0 and self.tarfile.firstmember is not None: + tarinfo = self.tarfile.next() + elif self.index < len(self.tarfile.members): + tarinfo = self.tarfile.members[self.index] + elif not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + raise StopIteration + self.index += 1 + return tarinfo + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +open = TarFile.open + + +def main(): + import argparse + + description = 'A simple command line interface for tarfile module.' + parser = argparse.ArgumentParser(description=description) + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='Verbose output') + group = parser.add_mutually_exclusive_group() + group.add_argument('-l', '--list', metavar='', + help='Show listing of a tarfile') + group.add_argument('-e', '--extract', nargs='+', + metavar=('', ''), + help='Extract tarfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create tarfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a tarfile is valid') + args = parser.parse_args() + + if args.test: + src = args.test + if is_tarfile(src): + with open(src, 'r') as tar: + tar.getmembers() + print(tar.getmembers(), file=sys.stderr) + if args.verbose: + print('{!r} is a tar archive.'.format(src)) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.list: + src = args.list + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.list(verbose=args.verbose) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.extract: + if len(args.extract) == 1: + src = args.extract[0] + curdir = os.curdir + elif len(args.extract) == 2: + src, curdir = args.extract + else: + parser.exit(1, parser.format_help()) + + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.extractall(path=curdir) + if args.verbose: + if curdir == '.': + msg = '{!r} file is extracted.'.format(src) + else: + msg = ('{!r} file is extracted ' + 'into {!r} directory.').format(src, curdir) + print(msg) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.create: + tar_name = args.create.pop(0) + _, ext = os.path.splitext(tar_name) + compressions = { + # gz + '.gz': 'gz', + '.tgz': 'gz', + # xz + '.xz': 'xz', + '.txz': 'xz', + # bz2 + '.bz2': 'bz2', + '.tbz': 'bz2', + '.tbz2': 'bz2', + '.tb2': 'bz2', + } + tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w' + tar_files = args.create + + with TarFile.open(tar_name, tar_mode) as tf: + for file_name in tar_files: + tf.add(file_name) + + if args.verbose: + print('{!r} file created.'.format(tar_name)) + + else: + parser.exit(1, parser.format_help()) + +if __name__ == '__main__': + main() diff --git a/v1/flask/lib/python3.4/tempfile.py b/v1/flask/lib/python3.4/tempfile.py deleted file mode 120000 index d75f89e..0000000 --- a/v1/flask/lib/python3.4/tempfile.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tempfile.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/tempfile.py b/v1/flask/lib/python3.4/tempfile.py new file mode 100644 index 0000000..0537228 --- /dev/null +++ b/v1/flask/lib/python3.4/tempfile.py @@ -0,0 +1,713 @@ +"""Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +""" + +__all__ = [ + "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces + "SpooledTemporaryFile", "TemporaryDirectory", + "mkstemp", "mkdtemp", # low level safe interfaces + "mktemp", # deprecated unsafe interface + "TMP_MAX", "gettempprefix", # constants + "tempdir", "gettempdir" + ] + + +# Imports. + +import functools as _functools +import warnings as _warnings +import io as _io +import os as _os +import shutil as _shutil +import errno as _errno +from random import Random as _Random +import weakref as _weakref + +try: + import _thread +except ImportError: + import _dummy_thread as _thread +_allocate_lock = _thread.allocate_lock + +_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL +if hasattr(_os, 'O_NOFOLLOW'): + _text_openflags |= _os.O_NOFOLLOW + +_bin_openflags = _text_openflags +if hasattr(_os, 'O_BINARY'): + _bin_openflags |= _os.O_BINARY + +if hasattr(_os, 'TMP_MAX'): + TMP_MAX = _os.TMP_MAX +else: + TMP_MAX = 10000 + +# Although it does not have an underscore for historical reasons, this +# variable is an internal implementation detail (see issue 10354). +template = "tmp" + +# Internal routines. + +_once_lock = _allocate_lock() + +if hasattr(_os, "lstat"): + _stat = _os.lstat +elif hasattr(_os, "stat"): + _stat = _os.stat +else: + # Fallback. All we need is something that raises OSError if the + # file doesn't exist. + def _stat(fn): + fd = _os.open(fn, _os.O_RDONLY) + _os.close(fd) + +def _exists(fn): + try: + _stat(fn) + except OSError: + return False + else: + return True + +class _RandomNameSequence: + """An instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is six characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.""" + + characters = "abcdefghijklmnopqrstuvwxyz0123456789_" + + @property + def rng(self): + cur_pid = _os.getpid() + if cur_pid != getattr(self, '_rng_pid', None): + self._rng = _Random() + self._rng_pid = cur_pid + return self._rng + + def __iter__(self): + return self + + def __next__(self): + c = self.characters + choose = self.rng.choice + letters = [choose(c) for dummy in range(8)] + return ''.join(letters) + +def _candidate_tempdir_list(): + """Generate a list of candidate temporary directories which + _get_default_tempdir will try.""" + + dirlist = [] + + # First, try the environment. + for envname in 'TMPDIR', 'TEMP', 'TMP': + dirname = _os.getenv(envname) + if dirname: dirlist.append(dirname) + + # Failing that, try OS-specific locations. + if _os.name == 'nt': + dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) + else: + dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) + + # As a last resort, the current directory. + try: + dirlist.append(_os.getcwd()) + except (AttributeError, OSError): + dirlist.append(_os.curdir) + + return dirlist + +def _get_default_tempdir(): + """Calculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.""" + + namer = _RandomNameSequence() + dirlist = _candidate_tempdir_list() + + for dir in dirlist: + if dir != _os.curdir: + dir = _os.path.abspath(dir) + # Try only a few names per directory. + for seq in range(100): + name = next(namer) + filename = _os.path.join(dir, name) + try: + fd = _os.open(filename, _bin_openflags, 0o600) + try: + try: + with _io.open(fd, 'wb', closefd=False) as fp: + fp.write(b'blat') + finally: + _os.close(fd) + finally: + _os.unlink(filename) + return dir + except FileExistsError: + pass + except PermissionError: + # This exception is thrown when a directory with the chosen name + # already exists on windows. + if (_os.name == 'nt' and _os.path.isdir(dir) and + _os.access(dir, _os.W_OK)): + continue + break # no point trying more names in this directory + except OSError: + break # no point trying more names in this directory + raise FileNotFoundError(_errno.ENOENT, + "No usable temporary directory found in %s" % + dirlist) + +_name_sequence = None + +def _get_candidate_names(): + """Common setup sequence for all user-callable interfaces.""" + + global _name_sequence + if _name_sequence is None: + _once_lock.acquire() + try: + if _name_sequence is None: + _name_sequence = _RandomNameSequence() + finally: + _once_lock.release() + return _name_sequence + + +def _mkstemp_inner(dir, pre, suf, flags): + """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" + + names = _get_candidate_names() + + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, pre + name + suf) + try: + fd = _os.open(file, flags, 0o600) + return (fd, _os.path.abspath(file)) + except FileExistsError: + continue # try again + except PermissionError: + # This exception is thrown when a directory with the chosen name + # already exists on windows. + if (_os.name == 'nt' and _os.path.isdir(dir) and + _os.access(dir, _os.W_OK)): + continue + else: + raise + + raise FileExistsError(_errno.EEXIST, + "No usable temporary file name found") + + +# User visible interfaces. + +def gettempprefix(): + """Accessor for tempdir.template.""" + return template + +tempdir = None + +def gettempdir(): + """Accessor for tempfile.tempdir.""" + global tempdir + if tempdir is None: + _once_lock.acquire() + try: + if tempdir is None: + tempdir = _get_default_tempdir() + finally: + _once_lock.release() + return tempdir + +def mkstemp(suffix="", prefix=template, dir=None, text=False): + """User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is specified, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is specified, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is specified, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. On + some operating systems, this makes no difference. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + """ + + if dir is None: + dir = gettempdir() + + if text: + flags = _text_openflags + else: + flags = _bin_openflags + + return _mkstemp_inner(dir, prefix, suffix, flags) + + +def mkdtemp(suffix="", prefix=template, dir=None): + """User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + """ + + if dir is None: + dir = gettempdir() + + names = _get_candidate_names() + + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, prefix + name + suffix) + try: + _os.mkdir(file, 0o700) + return file + except FileExistsError: + continue # try again + except PermissionError: + # This exception is thrown when a directory with the chosen name + # already exists on windows. + if (_os.name == 'nt' and _os.path.isdir(dir) and + _os.access(dir, _os.W_OK)): + continue + else: + raise + + raise FileExistsError(_errno.EEXIST, + "No usable temporary directory name found") + +def mktemp(suffix="", prefix=template, dir=None): + """User-callable function to return a unique temporary file name. The + file is not created. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + This function is unsafe and should not be used. The file name + refers to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + """ + +## from warnings import warn as _warn +## _warn("mktemp is a potential security risk to your program", +## RuntimeWarning, stacklevel=2) + + if dir is None: + dir = gettempdir() + + names = _get_candidate_names() + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, prefix + name + suffix) + if not _exists(file): + return file + + raise FileExistsError(_errno.EEXIST, + "No usable temporary filename found") + + +class _TemporaryFileCloser: + """A separate object allowing proper closing of a temporary file's + underlying file object, without adding a __del__ method to the + temporary file.""" + + file = None # Set here since __del__ checks it + close_called = False + + def __init__(self, file, name, delete=True): + self.file = file + self.name = name + self.delete = delete + + # NT provides delete-on-close as a primitive, so we don't need + # the wrapper to do anything special. We still use it so that + # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile. + if _os.name != 'nt': + # Cache the unlinker so we don't get spurious errors at + # shutdown when the module-level "os" is None'd out. Note + # that this must be referenced as self.unlink, because the + # name TemporaryFileWrapper may also get None'd out before + # __del__ is called. + + def close(self, unlink=_os.unlink): + if not self.close_called and self.file is not None: + self.close_called = True + try: + self.file.close() + finally: + if self.delete: + unlink(self.name) + + # Need to ensure the file is deleted on __del__ + def __del__(self): + self.close() + + else: + def close(self): + if not self.close_called: + self.close_called = True + self.file.close() + + +class _TemporaryFileWrapper: + """Temporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + """ + + def __init__(self, file, name, delete=True): + self.file = file + self.name = name + self.delete = delete + self._closer = _TemporaryFileCloser(file, name, delete) + + def __getattr__(self, name): + # Attribute lookups are delegated to the underlying file + # and cached for non-numeric results + # (i.e. methods are cached, closed and friends are not) + file = self.__dict__['file'] + a = getattr(file, name) + if hasattr(a, '__call__'): + func = a + @_functools.wraps(func) + def func_wrapper(*args, **kwargs): + return func(*args, **kwargs) + # Avoid closing the file as long as the wrapper is alive, + # see issue #18879. + func_wrapper._closer = self._closer + a = func_wrapper + if not isinstance(a, int): + setattr(self, name, a) + return a + + # The underlying __enter__ method returns the wrong object + # (self.file) so override it to return the wrapper + def __enter__(self): + self.file.__enter__() + return self + + # Need to trap __exit__ as well to ensure the file gets + # deleted when used in a with statement + def __exit__(self, exc, value, tb): + result = self.file.__exit__(exc, value, tb) + self.close() + return result + + def close(self): + """ + Close the temporary file, possibly deleting it. + """ + self._closer.close() + + # iter() doesn't use __getattr__ to find the __iter__ method + def __iter__(self): + # Don't return iter(self.file), but yield from it to avoid closing + # file as long as it's being used as iterator (see issue #23700). We + # can't use 'yield from' here because iter(file) returns the file + # object itself, which has a close method, and thus the file would get + # closed when the generator is finalized, due to PEP380 semantics. + for line in self.file: + yield line + + +def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None, + newline=None, suffix="", prefix=template, + dir=None, delete=True): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'delete' -- whether the file is deleted on close (default True). + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as file.name. The file will be automatically deleted + when it is closed unless the 'delete' argument is set to False. + """ + + if dir is None: + dir = gettempdir() + + flags = _bin_openflags + + # Setting O_TEMPORARY in the flags causes the OS to delete + # the file when it is closed. This is only supported by Windows. + if _os.name == 'nt' and delete: + flags |= _os.O_TEMPORARY + + (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) + try: + file = _io.open(fd, mode, buffering=buffering, + newline=newline, encoding=encoding) + + return _TemporaryFileWrapper(file, name, delete) + except Exception: + _os.close(fd) + raise + +if _os.name != 'posix' or _os.sys.platform == 'cygwin': + # On non-POSIX and Cygwin systems, assume that we cannot unlink a file + # while it is open. + TemporaryFile = NamedTemporaryFile + +else: + def TemporaryFile(mode='w+b', buffering=-1, encoding=None, + newline=None, suffix="", prefix=template, + dir=None): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + """ + + if dir is None: + dir = gettempdir() + + flags = _bin_openflags + + (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) + try: + _os.unlink(name) + return _io.open(fd, mode, buffering=buffering, + newline=newline, encoding=encoding) + except: + _os.close(fd) + raise + +class SpooledTemporaryFile: + """Temporary file wrapper, specialized to switch from BytesIO + or StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + """ + _rolled = False + + def __init__(self, max_size=0, mode='w+b', buffering=-1, + encoding=None, newline=None, + suffix="", prefix=template, dir=None): + if 'b' in mode: + self._file = _io.BytesIO() + else: + # Setting newline="\n" avoids newline translation; + # this is important because otherwise on Windows we'd + # get double newline translation upon rollover(). + self._file = _io.StringIO(newline="\n") + self._max_size = max_size + self._rolled = False + self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering, + 'suffix': suffix, 'prefix': prefix, + 'encoding': encoding, 'newline': newline, + 'dir': dir} + + def _check(self, file): + if self._rolled: return + max_size = self._max_size + if max_size and file.tell() > max_size: + self.rollover() + + def rollover(self): + if self._rolled: return + file = self._file + newfile = self._file = TemporaryFile(**self._TemporaryFileArgs) + del self._TemporaryFileArgs + + newfile.write(file.getvalue()) + newfile.seek(file.tell(), 0) + + self._rolled = True + + # The method caching trick from NamedTemporaryFile + # won't work here, because _file may change from a + # BytesIO/StringIO instance to a real file. So we list + # all the methods directly. + + # Context management protocol + def __enter__(self): + if self._file.closed: + raise ValueError("Cannot enter context with closed file") + return self + + def __exit__(self, exc, value, tb): + self._file.close() + + # file protocol + def __iter__(self): + return self._file.__iter__() + + def close(self): + self._file.close() + + @property + def closed(self): + return self._file.closed + + @property + def encoding(self): + try: + return self._file.encoding + except AttributeError: + if 'b' in self._TemporaryFileArgs['mode']: + raise + return self._TemporaryFileArgs['encoding'] + + def fileno(self): + self.rollover() + return self._file.fileno() + + def flush(self): + self._file.flush() + + def isatty(self): + return self._file.isatty() + + @property + def mode(self): + try: + return self._file.mode + except AttributeError: + return self._TemporaryFileArgs['mode'] + + @property + def name(self): + try: + return self._file.name + except AttributeError: + return None + + @property + def newlines(self): + try: + return self._file.newlines + except AttributeError: + if 'b' in self._TemporaryFileArgs['mode']: + raise + return self._TemporaryFileArgs['newline'] + + def read(self, *args): + return self._file.read(*args) + + def readline(self, *args): + return self._file.readline(*args) + + def readlines(self, *args): + return self._file.readlines(*args) + + def seek(self, *args): + self._file.seek(*args) + + @property + def softspace(self): + return self._file.softspace + + def tell(self): + return self._file.tell() + + def truncate(self, size=None): + if size is None: + self._file.truncate() + else: + if size > self._max_size: + self.rollover() + self._file.truncate(size) + + def write(self, s): + file = self._file + rv = file.write(s) + self._check(file) + return rv + + def writelines(self, iterable): + file = self._file + rv = file.writelines(iterable) + self._check(file) + return rv + + +class TemporaryDirectory(object): + """Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everything contained + in it are removed. + """ + + def __init__(self, suffix="", prefix=template, dir=None): + self.name = mkdtemp(suffix, prefix, dir) + self._finalizer = _weakref.finalize( + self, self._cleanup, self.name, + warn_message="Implicitly cleaning up {!r}".format(self)) + + @classmethod + def _cleanup(cls, name, warn_message): + _shutil.rmtree(name) + _warnings.warn(warn_message, ResourceWarning) + + + def __repr__(self): + return "<{} {!r}>".format(self.__class__.__name__, self.name) + + def __enter__(self): + return self.name + + def __exit__(self, exc, value, tb): + self.cleanup() + + def cleanup(self): + if self._finalizer.detach(): + _shutil.rmtree(self.name) diff --git a/v1/flask/lib/python3.4/token.py b/v1/flask/lib/python3.4/token.py deleted file mode 120000 index f62225a..0000000 --- a/v1/flask/lib/python3.4/token.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/token.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/token.py b/v1/flask/lib/python3.4/token.py new file mode 100644 index 0000000..7470c8c --- /dev/null +++ b/v1/flask/lib/python3.4/token.py @@ -0,0 +1,140 @@ +"""Token constants (from "token.h").""" + +__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF'] + +# This file is automatically generated; please don't muck it up! +# +# To update the symbols in this file, 'cd' to the top directory of +# the python source tree after building the interpreter and run: +# +# ./python Lib/token.py + +#--start constants-- +ENDMARKER = 0 +NAME = 1 +NUMBER = 2 +STRING = 3 +NEWLINE = 4 +INDENT = 5 +DEDENT = 6 +LPAR = 7 +RPAR = 8 +LSQB = 9 +RSQB = 10 +COLON = 11 +COMMA = 12 +SEMI = 13 +PLUS = 14 +MINUS = 15 +STAR = 16 +SLASH = 17 +VBAR = 18 +AMPER = 19 +LESS = 20 +GREATER = 21 +EQUAL = 22 +DOT = 23 +PERCENT = 24 +LBRACE = 25 +RBRACE = 26 +EQEQUAL = 27 +NOTEQUAL = 28 +LESSEQUAL = 29 +GREATEREQUAL = 30 +TILDE = 31 +CIRCUMFLEX = 32 +LEFTSHIFT = 33 +RIGHTSHIFT = 34 +DOUBLESTAR = 35 +PLUSEQUAL = 36 +MINEQUAL = 37 +STAREQUAL = 38 +SLASHEQUAL = 39 +PERCENTEQUAL = 40 +AMPEREQUAL = 41 +VBAREQUAL = 42 +CIRCUMFLEXEQUAL = 43 +LEFTSHIFTEQUAL = 44 +RIGHTSHIFTEQUAL = 45 +DOUBLESTAREQUAL = 46 +DOUBLESLASH = 47 +DOUBLESLASHEQUAL = 48 +AT = 49 +RARROW = 50 +ELLIPSIS = 51 +OP = 52 +ERRORTOKEN = 53 +N_TOKENS = 54 +NT_OFFSET = 256 +#--end constants-- + +tok_name = {value: name + for name, value in globals().items() + if isinstance(value, int) and not name.startswith('_')} +__all__.extend(tok_name.values()) + +def ISTERMINAL(x): + return x < NT_OFFSET + +def ISNONTERMINAL(x): + return x >= NT_OFFSET + +def ISEOF(x): + return x == ENDMARKER + + +def _main(): + import re + import sys + args = sys.argv[1:] + inFileName = args and args[0] or "Include/token.h" + outFileName = "Lib/token.py" + if len(args) > 1: + outFileName = args[1] + try: + fp = open(inFileName) + except OSError as err: + sys.stdout.write("I/O error: %s\n" % str(err)) + sys.exit(1) + lines = fp.read().split("\n") + fp.close() + prog = re.compile( + "#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)", + re.IGNORECASE) + tokens = {} + for line in lines: + match = prog.match(line) + if match: + name, val = match.group(1, 2) + val = int(val) + tokens[val] = name # reverse so we can sort them... + keys = sorted(tokens.keys()) + # load the output skeleton from the target: + try: + fp = open(outFileName) + except OSError as err: + sys.stderr.write("I/O error: %s\n" % str(err)) + sys.exit(2) + format = fp.read().split("\n") + fp.close() + try: + start = format.index("#--start constants--") + 1 + end = format.index("#--end constants--") + except ValueError: + sys.stderr.write("target does not contain format markers") + sys.exit(3) + lines = [] + for val in keys: + lines.append("%s = %d" % (tokens[val], val)) + format[start:end] = lines + try: + fp = open(outFileName, 'w') + except OSError as err: + sys.stderr.write("I/O error: %s\n" % str(err)) + sys.exit(4) + fp.write("\n".join(format)) + fp.close() + + +if __name__ == "__main__": + _main() diff --git a/v1/flask/lib/python3.4/tokenize.py b/v1/flask/lib/python3.4/tokenize.py deleted file mode 120000 index bf9b071..0000000 --- a/v1/flask/lib/python3.4/tokenize.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tokenize.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/tokenize.py b/v1/flask/lib/python3.4/tokenize.py new file mode 100644 index 0000000..4d93a83 --- /dev/null +++ b/v1/flask/lib/python3.4/tokenize.py @@ -0,0 +1,712 @@ +"""Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' + 'Michael Foord') +from builtins import open as _builtin_open +from codecs import lookup, BOM_UTF8 +import collections +from io import TextIOWrapper +from itertools import chain +import re +import sys +from token import * + +cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) +blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) + +import token +__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", + "NL", "untokenize", "ENCODING", "TokenInfo"] +del token + +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +ENCODING = N_TOKENS + 2 +tok_name[ENCODING] = 'ENCODING' +N_TOKENS += 3 +EXACT_TOKEN_TYPES = { + '(': LPAR, + ')': RPAR, + '[': LSQB, + ']': RSQB, + ':': COLON, + ',': COMMA, + ';': SEMI, + '+': PLUS, + '-': MINUS, + '*': STAR, + '/': SLASH, + '|': VBAR, + '&': AMPER, + '<': LESS, + '>': GREATER, + '=': EQUAL, + '.': DOT, + '%': PERCENT, + '{': LBRACE, + '}': RBRACE, + '==': EQEQUAL, + '!=': NOTEQUAL, + '<=': LESSEQUAL, + '>=': GREATEREQUAL, + '~': TILDE, + '^': CIRCUMFLEX, + '<<': LEFTSHIFT, + '>>': RIGHTSHIFT, + '**': DOUBLESTAR, + '+=': PLUSEQUAL, + '-=': MINEQUAL, + '*=': STAREQUAL, + '/=': SLASHEQUAL, + '%=': PERCENTEQUAL, + '&=': AMPEREQUAL, + '|=': VBAREQUAL, + '^=': CIRCUMFLEXEQUAL, + '<<=': LEFTSHIFTEQUAL, + '>>=': RIGHTSHIFTEQUAL, + '**=': DOUBLESTAREQUAL, + '//': DOUBLESLASH, + '//=': DOUBLESLASHEQUAL, + '@': AT +} + +class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): + def __repr__(self): + annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) + return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % + self._replace(type=annotated_type)) + + @property + def exact_type(self): + if self.type == OP and self.string in EXACT_TOKEN_TYPES: + return EXACT_TOKEN_TYPES[self.string] + else: + return self.type + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +# Note: we use unicode matching for names ("\w") but ascii matching for +# number literals. +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'\w+' + +Hexnumber = r'0[xX][0-9a-fA-F]+' +Binnumber = r'0[bB][01]+' +Octnumber = r'0[oO][0-7]+' +Decnumber = r'(?:0+|[1-9][0-9]*)' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?[0-9]+' +Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) +Expfloat = r'[0-9]+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group(StringPrefix + "'''", StringPrefix + '"""') +# Single-line ' or " string. +String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", + r"//=?", r"->", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +def _compile(expr): + return re.compile(expr, re.UNICODE) + +endpats = {"'": Single, '"': Double, + "'''": Single3, '"""': Double3, + "r'''": Single3, 'r"""': Double3, + "b'''": Single3, 'b"""': Double3, + "R'''": Single3, 'R"""': Double3, + "B'''": Single3, 'B"""': Double3, + "br'''": Single3, 'br"""': Double3, + "bR'''": Single3, 'bR"""': Double3, + "Br'''": Single3, 'Br"""': Double3, + "BR'''": Single3, 'BR"""': Double3, + "rb'''": Single3, 'rb"""': Double3, + "Rb'''": Single3, 'Rb"""': Double3, + "rB'''": Single3, 'rB"""': Double3, + "RB'''": Single3, 'RB"""': Double3, + "u'''": Single3, 'u"""': Double3, + "R'''": Single3, 'R"""': Double3, + "U'''": Single3, 'U"""': Double3, + 'r': None, 'R': None, 'b': None, 'B': None, + 'u': None, 'U': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "b'''", 'b"""', "B'''", 'B"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""', + "rb'''", 'rb"""', "rB'''", 'rB"""', + "Rb'''", 'Rb"""', "RB'''", 'RB"""', + "u'''", 'u"""', "U'''", 'U"""', + ): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "b'", 'b"', "B'", 'B"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"' , + "rb'", 'rb"', "rB'", 'rB"', + "Rb'", 'Rb"', "RB'", 'RB"' , + "u'", 'u"', "U'", 'U"', + ): + single_quoted[t] = t + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + self.encoding = None + + def add_whitespace(self, start): + row, col = start + if row < self.prev_row or row == self.prev_row and col < self.prev_col: + raise ValueError("start ({},{}) precedes previous end ({},{})" + .format(row, col, self.prev_row, self.prev_col)) + row_offset = row - self.prev_row + if row_offset: + self.tokens.append("\\\n" * row_offset) + self.prev_col = 0 + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def untokenize(self, iterable): + it = iter(iterable) + indents = [] + startline = False + for t in it: + if len(t) == 2: + self.compat(t, it) + break + tok_type, token, start, end, line = t + if tok_type == ENCODING: + self.encoding = token + continue + if tok_type == ENDMARKER: + break + if tok_type == INDENT: + indents.append(token) + continue + elif tok_type == DEDENT: + indents.pop() + self.prev_row, self.prev_col = end + continue + elif tok_type in (NEWLINE, NL): + startline = True + elif startline and indents: + indent = indents[-1] + if start[1] >= len(indent): + self.tokens.append(indent) + self.prev_col = len(indent) + startline = False + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token, iterable): + indents = [] + toks_append = self.tokens.append + startline = token[0] in (NEWLINE, NL) + prevstring = False + + for tok in chain([token], iterable): + toknum, tokval = tok[:2] + if toknum == ENCODING: + self.encoding = tokval + continue + + if toknum in (NAME, NUMBER): + tokval += ' ' + + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + + +def untokenize(iterable): + """Transform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited intput: + # Output bytes will tokenize the back to the input + t1 = [tok[:2] for tok in tokenize(f.readline)] + newcode = untokenize(t1) + readline = BytesIO(newcode).readline + t2 = [tok[:2] for tok in tokenize(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + out = ut.untokenize(iterable) + if ut.encoding is not None: + out = out.encode(ut.encoding) + return out + + +def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + +def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + match = cookie_re.match(line_string) + if not match: + return None + encoding = _get_normal_name(match.group(1)) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if encoding != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + if not blank_re.match(first): + return default, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + + +def open(filename): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = _builtin_open(filename, 'rb') + try: + encoding, lines = detect_encoding(buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True) + text.mode = 'r' + return text + except: + buffer.close() + raise + + +def tokenize(readline): + """ + The tokenize() generator requires one argment, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + """ + # This import is here to avoid problems when the itertools module is not + # built yet and tokenize is imported. + from itertools import chain, repeat + encoding, consumed = detect_encoding(readline) + rl_gen = iter(readline, b"") + empty = repeat(b"") + return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) + + +def _tokenize(readline, encoding): + lnum = parenlev = continued = 0 + numchars = '0123456789' + contstr, needcont = '', 0 + contline = None + indents = [0] + + if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" + yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') + while True: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = b'' + + if encoding is not None: + line = line.decode(encoding) + lnum += 1 + pos, max = 0, len(line) + + if contstr: # continued string + if not line: + raise TokenError("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield TokenInfo(STRING, contstr + line[:end], + strstart, (lnum, end), contline + line) + contstr, needcont = '', 0 + contline = None + elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': + yield TokenInfo(ERRORTOKEN, contstr + line, + strstart, (lnum, len(line)), contline) + contstr = '' + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == ' ': + column += 1 + elif line[pos] == '\t': + column = (column//tabsize + 1)*tabsize + elif line[pos] == '\f': + column = 0 + else: + break + pos += 1 + if pos == max: + break + + if line[pos] in '#\r\n': # skip comments or blank lines + if line[pos] == '#': + comment_token = line[pos:].rstrip('\r\n') + nl_pos = pos + len(comment_token) + yield TokenInfo(COMMENT, comment_token, + (lnum, pos), (lnum, pos + len(comment_token)), line) + yield TokenInfo(NL, line[nl_pos:], + (lnum, nl_pos), (lnum, len(line)), line) + else: + yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:], + (lnum, pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents or dedents + indents.append(column) + yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + while column < indents[-1]: + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line)) + indents = indents[:-1] + yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) + + else: # continued statement + if not line: + raise TokenError("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = _compile(PseudoToken).match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + if start == end: + continue + token, initial = line[start:end], line[start] + + if (initial in numchars or # ordinary number + (initial == '.' and token != '.' and token != '...')): + yield TokenInfo(NUMBER, token, spos, epos, line) + elif initial in '\r\n': + yield TokenInfo(NL if parenlev > 0 else NEWLINE, + token, spos, epos, line) + elif initial == '#': + assert not token.endswith("\n") + yield TokenInfo(COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = _compile(endpats[token]) + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield TokenInfo(STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + strstart = (lnum, start) + endprog = _compile(endpats[initial] or + endpats[token[1]] or + endpats[token[2]]) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + yield TokenInfo(STRING, token, spos, epos, line) + elif initial.isidentifier(): # ordinary name + yield TokenInfo(NAME, token, spos, epos, line) + elif initial == '\\': # continued stmt + continued = 1 + else: + if initial in '([{': + parenlev += 1 + elif initial in ')]}': + parenlev -= 1 + yield TokenInfo(OP, token, spos, epos, line) + else: + yield TokenInfo(ERRORTOKEN, line[pos], + (lnum, pos), (lnum, pos+1), line) + pos += 1 + + for indent in indents[1:]: # pop remaining indent levels + yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') + yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') + + +# An undocumented, backwards compatible, API for all the places in the standard +# library that expect to be able to use tokenize with strings +def generate_tokens(readline): + return _tokenize(readline, None) + +def main(): + import argparse + + # Helper error handling routines + def perror(message): + print(message, file=sys.stderr) + + def error(message, filename=None, location=None): + if location: + args = (filename,) + location + (message,) + perror("%s:%d:%d: error: %s" % args) + elif filename: + perror("%s: error: %s" % (filename, message)) + else: + perror("error: %s" % message) + sys.exit(1) + + # Parse the arguments and options + parser = argparse.ArgumentParser(prog='python -m tokenize') + parser.add_argument(dest='filename', nargs='?', + metavar='filename.py', + help='the file to tokenize; defaults to stdin') + parser.add_argument('-e', '--exact', dest='exact', action='store_true', + help='display token names using the exact type') + args = parser.parse_args() + + try: + # Tokenize the input + if args.filename: + filename = args.filename + with _builtin_open(filename, 'rb') as f: + tokens = list(tokenize(f.readline)) + else: + filename = "" + tokens = _tokenize(sys.stdin.readline, None) + + # Output the tokenization + for token in tokens: + token_type = token.type + if args.exact: + token_type = token.exact_type + token_range = "%d,%d-%d,%d:" % (token.start + token.end) + print("%-20s%-15s%-15r" % + (token_range, tok_name[token_type], token.string)) + except IndentationError as err: + line, column = err.args[1][1:3] + error(err.args[0], filename, (line, column)) + except TokenError as err: + line, column = err.args[1] + error(err.args[0], filename, (line, column)) + except SyntaxError as err: + error(err, filename) + except OSError as err: + error(err) + except KeyboardInterrupt: + print("interrupted\n") + except Exception as err: + perror("unexpected error: %s" % err) + raise + +if __name__ == "__main__": + main() diff --git a/v1/flask/lib/python3.4/types.py b/v1/flask/lib/python3.4/types.py deleted file mode 120000 index 8f7b2cc..0000000 --- a/v1/flask/lib/python3.4/types.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/types.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/types.py b/v1/flask/lib/python3.4/types.py new file mode 100644 index 0000000..4fb2def --- /dev/null +++ b/v1/flask/lib/python3.4/types.py @@ -0,0 +1,161 @@ +""" +Define names for built-in types that aren't directly accessible as a builtin. +""" +import sys + +# Iterators in Python aren't a matter of type but of protocol. A large +# and changing number of builtin types implement *some* flavor of +# iterator. Don't check the type! Use hasattr to check for both +# "__iter__" and "__next__" attributes instead. + +def _f(): pass +FunctionType = type(_f) +LambdaType = type(lambda: None) # Same as FunctionType +CodeType = type(_f.__code__) +MappingProxyType = type(type.__dict__) +SimpleNamespace = type(sys.implementation) + +def _g(): + yield 1 +GeneratorType = type(_g()) + +class _C: + def _m(self): pass +MethodType = type(_C()._m) + +BuiltinFunctionType = type(len) +BuiltinMethodType = type([].append) # Same as BuiltinFunctionType + +ModuleType = type(sys) + +try: + raise TypeError +except TypeError: + tb = sys.exc_info()[2] + TracebackType = type(tb) + FrameType = type(tb.tb_frame) + tb = None; del tb + +# For Jython, the following two types are identical +GetSetDescriptorType = type(FunctionType.__code__) +MemberDescriptorType = type(FunctionType.__globals__) + +del sys, _f, _g, _C, # Not for export + + +# Provide a PEP 3115 compliant mechanism for class creation +def new_class(name, bases=(), kwds=None, exec_body=None): + """Create a class object dynamically using the appropriate metaclass.""" + meta, ns, kwds = prepare_class(name, bases, kwds) + if exec_body is not None: + exec_body(ns) + return meta(name, bases, ns, **kwds) + +def prepare_class(name, bases=(), kwds=None): + """Call the __prepare__ method of the appropriate metaclass. + + Returns (metaclass, namespace, kwds) as a 3-tuple + + *metaclass* is the appropriate metaclass + *namespace* is the prepared class namespace + *kwds* is an updated copy of the passed in kwds argument with any + 'metaclass' entry removed. If no kwds argument is passed in, this will + be an empty dict. + """ + if kwds is None: + kwds = {} + else: + kwds = dict(kwds) # Don't alter the provided mapping + if 'metaclass' in kwds: + meta = kwds.pop('metaclass') + else: + if bases: + meta = type(bases[0]) + else: + meta = type + if isinstance(meta, type): + # when meta is a type, we first determine the most-derived metaclass + # instead of invoking the initial candidate directly + meta = _calculate_meta(meta, bases) + if hasattr(meta, '__prepare__'): + ns = meta.__prepare__(name, bases, **kwds) + else: + ns = {} + return meta, ns, kwds + +def _calculate_meta(meta, bases): + """Calculate the most derived metaclass.""" + winner = meta + for base in bases: + base_meta = type(base) + if issubclass(winner, base_meta): + continue + if issubclass(base_meta, winner): + winner = base_meta + continue + # else: + raise TypeError("metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases") + return winner + +class DynamicClassAttribute: + """Route attribute access on a class to __getattr__. + + This is a descriptor, used to define attributes that act differently when + accessed through an instance and through a class. Instance access remains + normal, but access to an attribute through a class will be routed to the + class's __getattr__ method; this is done by raising AttributeError. + + This allows one to have properties active on an instance, and have virtual + attributes on the class with the same name (see Enum for an example). + + """ + def __init__(self, fget=None, fset=None, fdel=None, doc=None): + self.fget = fget + self.fset = fset + self.fdel = fdel + # next two lines make DynamicClassAttribute act the same as property + self.__doc__ = doc or fget.__doc__ + self.overwrite_doc = doc is None + # support for abstract methods + self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False)) + + def __get__(self, instance, ownerclass=None): + if instance is None: + if self.__isabstractmethod__: + return self + raise AttributeError() + elif self.fget is None: + raise AttributeError("unreadable attribute") + return self.fget(instance) + + def __set__(self, instance, value): + if self.fset is None: + raise AttributeError("can't set attribute") + self.fset(instance, value) + + def __delete__(self, instance): + if self.fdel is None: + raise AttributeError("can't delete attribute") + self.fdel(instance) + + def getter(self, fget): + fdoc = fget.__doc__ if self.overwrite_doc else None + result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + def setter(self, fset): + result = type(self)(self.fget, fset, self.fdel, self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + def deleter(self, fdel): + result = type(self)(self.fget, self.fset, fdel, self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + +__all__ = [n for n in globals() if n[:1] != '_'] diff --git a/v1/flask/lib/python3.4/warnings.py b/v1/flask/lib/python3.4/warnings.py deleted file mode 120000 index c464a50..0000000 --- a/v1/flask/lib/python3.4/warnings.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/warnings.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/warnings.py b/v1/flask/lib/python3.4/warnings.py new file mode 100644 index 0000000..70d087e --- /dev/null +++ b/v1/flask/lib/python3.4/warnings.py @@ -0,0 +1,410 @@ +"""Python part of the warnings subsystem.""" + +import sys + +__all__ = ["warn", "warn_explicit", "showwarning", + "formatwarning", "filterwarnings", "simplefilter", + "resetwarnings", "catch_warnings"] + + +def showwarning(message, category, filename, lineno, file=None, line=None): + """Hook to write a warning to a file; replace if you like.""" + if file is None: + file = sys.stderr + if file is None: + # sys.stderr is None when run with pythonw.exe - warnings get lost + return + try: + file.write(formatwarning(message, category, filename, lineno, line)) + except OSError: + pass # the file (probably stderr) is invalid - this warning gets lost. + +def formatwarning(message, category, filename, lineno, line=None): + """Function to format a warning the standard way.""" + import linecache + s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message) + line = linecache.getline(filename, lineno) if line is None else line + if line: + line = line.strip() + s += " %s\n" % line + return s + +def filterwarnings(action, message="", category=Warning, module="", lineno=0, + append=False): + """Insert an entry into the list of warnings filters (at the front). + + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'message' -- a regex that the warning message must match + 'category' -- a class that the warning must be a subclass of + 'module' -- a regex that the module name must match + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + """ + import re + assert action in ("error", "ignore", "always", "default", "module", + "once"), "invalid action: %r" % (action,) + assert isinstance(message, str), "message must be a string" + assert isinstance(category, type), "category must be a class" + assert issubclass(category, Warning), "category must be a Warning subclass" + assert isinstance(module, str), "module must be a string" + assert isinstance(lineno, int) and lineno >= 0, \ + "lineno must be an int >= 0" + item = (action, re.compile(message, re.I), category, + re.compile(module), lineno) + if append: + filters.append(item) + else: + filters.insert(0, item) + _filters_mutated() + +def simplefilter(action, category=Warning, lineno=0, append=False): + """Insert a simple entry into the list of warnings filters (at the front). + + A simple filter matches all modules and messages. + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'category' -- a class that the warning must be a subclass of + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + """ + assert action in ("error", "ignore", "always", "default", "module", + "once"), "invalid action: %r" % (action,) + assert isinstance(lineno, int) and lineno >= 0, \ + "lineno must be an int >= 0" + item = (action, None, category, None, lineno) + if append: + filters.append(item) + else: + filters.insert(0, item) + _filters_mutated() + +def resetwarnings(): + """Clear the list of warning filters, so that no filters are active.""" + filters[:] = [] + _filters_mutated() + +class _OptionError(Exception): + """Exception used by option processing helpers.""" + pass + +# Helper to process -W options passed via sys.warnoptions +def _processoptions(args): + for arg in args: + try: + _setoption(arg) + except _OptionError as msg: + print("Invalid -W option ignored:", msg, file=sys.stderr) + +# Helper for _processoptions() +def _setoption(arg): + import re + parts = arg.split(':') + if len(parts) > 5: + raise _OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append('') + action, message, category, module, lineno = [s.strip() + for s in parts] + action = _getaction(action) + message = re.escape(message) + category = _getcategory(category) + module = re.escape(module) + if module: + module = module + '$' + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise _OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + filterwarnings(action, message, category, module, lineno) + +# Helper for _setoption() +def _getaction(action): + if not action: + return "default" + if action == "all": return "always" # Alias + for a in ('default', 'always', 'ignore', 'module', 'once', 'error'): + if a.startswith(action): + return a + raise _OptionError("invalid action: %r" % (action,)) + +# Helper for _setoption() +def _getcategory(category): + import re + if not category: + return Warning + if re.match("^[a-zA-Z0-9_]+$", category): + try: + cat = eval(category) + except NameError: + raise _OptionError("unknown warning category: %r" % (category,)) + else: + i = category.rfind(".") + module = category[:i] + klass = category[i+1:] + try: + m = __import__(module, None, None, [klass]) + except ImportError: + raise _OptionError("invalid module name: %r" % (module,)) + try: + cat = getattr(m, klass) + except AttributeError: + raise _OptionError("unknown warning category: %r" % (category,)) + if not issubclass(cat, Warning): + raise _OptionError("invalid warning category: %r" % (category,)) + return cat + + +# Code typically replaced by _warnings +def warn(message, category=None, stacklevel=1): + """Issue a warning, or maybe ignore it or raise an exception.""" + # Check if message is already a Warning object + if isinstance(message, Warning): + category = message.__class__ + # Check category argument + if category is None: + category = UserWarning + assert issubclass(category, Warning) + # Get context information + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith((".pyc", ".pyo")): + filename = filename[:-1] + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + registry = globals.setdefault("__warningregistry__", {}) + warn_explicit(message, category, filename, lineno, module, registry, + globals) + +def warn_explicit(message, category, filename, lineno, + module=None, registry=None, module_globals=None): + lineno = int(lineno) + if module is None: + module = filename or "" + if module[-3:].lower() == ".py": + module = module[:-3] # XXX What about leading pathname? + if registry is None: + registry = {} + if registry.get('version', 0) != _filters_version: + registry.clear() + registry['version'] = _filters_version + if isinstance(message, Warning): + text = str(message) + category = message.__class__ + else: + text = message + message = category(message) + key = (text, category, lineno) + # Quick test for common case + if registry.get(key): + return + # Search the filters + for item in filters: + action, msg, cat, mod, ln = item + if ((msg is None or msg.match(text)) and + issubclass(category, cat) and + (mod is None or mod.match(module)) and + (ln == 0 or lineno == ln)): + break + else: + action = defaultaction + # Early exit actions + if action == "ignore": + registry[key] = 1 + return + + # Prime the linecache for formatting, in case the + # "file" is actually in a zipfile or something. + import linecache + linecache.getlines(filename, module_globals) + + if action == "error": + raise message + # Other actions + if action == "once": + registry[key] = 1 + oncekey = (text, category) + if onceregistry.get(oncekey): + return + onceregistry[oncekey] = 1 + elif action == "always": + pass + elif action == "module": + registry[key] = 1 + altkey = (text, category, 0) + if registry.get(altkey): + return + registry[altkey] = 1 + elif action == "default": + registry[key] = 1 + else: + # Unrecognized actions are errors + raise RuntimeError( + "Unrecognized action (%r) in warnings.filters:\n %s" % + (action, item)) + if not callable(showwarning): + raise TypeError("warnings.showwarning() must be set to a " + "function or method") + # Print message and context + showwarning(message, category, filename, lineno) + + +class WarningMessage(object): + + """Holds the result of a single showwarning() call.""" + + _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", + "line") + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + self._category_name = category.__name__ if category else None + + def __str__(self): + return ("{message : %r, category : %r, filename : %r, lineno : %s, " + "line : %r}" % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + + +class catch_warnings(object): + + """A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of warnings.showwarning() and be appended to a list + returned by the context manager. Otherwise None is returned by the context + manager. The objects appended to the list are arguments whose attributes + mirror the arguments to showwarning(). + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + """ + + def __init__(self, *, record=False, module=None): + """Specify whether to record warnings and if an alternative module + should be used other than sys.modules['warnings']. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + """ + self._record = record + self._module = sys.modules['warnings'] if module is None else module + self._entered = False + + def __repr__(self): + args = [] + if self._record: + args.append("record=True") + if self._module is not sys.modules['warnings']: + args.append("module=%r" % self._module) + name = type(self).__name__ + return "%s(%s)" % (name, ", ".join(args)) + + def __enter__(self): + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._module._filters_mutated() + self._showwarning = self._module.showwarning + if self._record: + log = [] + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return log + else: + return None + + def __exit__(self, *exc_info): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module._filters_mutated() + self._module.showwarning = self._showwarning + + +# filters contains a sequence of filter 5-tuples +# The components of the 5-tuple are: +# - an action: error, ignore, always, default, module, or once +# - a compiled regex that must match the warning message +# - a class representing the warning category +# - a compiled regex that must match the module that is being warned +# - a line number for the line being warning, or 0 to mean any line +# If either if the compiled regexs are None, match anything. +_warnings_defaults = False +try: + from _warnings import (filters, _defaultaction, _onceregistry, + warn, warn_explicit, _filters_mutated) + defaultaction = _defaultaction + onceregistry = _onceregistry + _warnings_defaults = True + +except ImportError: + filters = [] + defaultaction = "default" + onceregistry = {} + + _filters_version = 1 + + def _filters_mutated(): + global _filters_version + _filters_version += 1 + + +# Module initialization +_processoptions(sys.warnoptions) +if not _warnings_defaults: + silence = [ImportWarning, PendingDeprecationWarning] + silence.append(DeprecationWarning) + for cls in silence: + simplefilter("ignore", category=cls) + bytes_warning = sys.flags.bytes_warning + if bytes_warning > 1: + bytes_action = "error" + elif bytes_warning: + bytes_action = "default" + else: + bytes_action = "ignore" + simplefilter(bytes_action, category=BytesWarning, append=1) + # resource usage warnings are enabled by default in pydebug mode + if hasattr(sys, 'gettotalrefcount'): + resource_action = "always" + else: + resource_action = "ignore" + simplefilter(resource_action, category=ResourceWarning, append=1) + +del _warnings_defaults diff --git a/v1/flask/lib/python3.4/weakref.py b/v1/flask/lib/python3.4/weakref.py deleted file mode 120000 index 1c3eeb4..0000000 --- a/v1/flask/lib/python3.4/weakref.py +++ /dev/null @@ -1 +0,0 @@ -/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/weakref.py \ No newline at end of file diff --git a/v1/flask/lib/python3.4/weakref.py b/v1/flask/lib/python3.4/weakref.py new file mode 100644 index 0000000..5d09497 --- /dev/null +++ b/v1/flask/lib/python3.4/weakref.py @@ -0,0 +1,603 @@ +"""Weak reference support for Python. + +This module is an implementation of PEP 205: + +http://www.python.org/dev/peps/pep-0205/ +""" + +# Naming convention: Variables named "wr" are weak reference objects; +# they are called this instead of "ref" to avoid name collisions with +# the module-global ref() function imported from _weakref. + +from _weakref import ( + getweakrefcount, + getweakrefs, + ref, + proxy, + CallableProxyType, + ProxyType, + ReferenceType) + +from _weakrefset import WeakSet, _IterationGuard + +import collections # Import after _weakref to avoid circular import. +import sys +import itertools + +ProxyTypes = (ProxyType, CallableProxyType) + +__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", + "WeakKeyDictionary", "ReferenceType", "ProxyType", + "CallableProxyType", "ProxyTypes", "WeakValueDictionary", + "WeakSet", "WeakMethod", "finalize"] + + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError("argument should be a bound method, not {}" + .format(type(meth))) from None + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super().__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return False + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return True + + __hash__ = ref.__hash__ + + +class WeakValueDictionary(collections.MutableMapping): + """Mapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + """ + # We inherit the constructor without worrying about the input + # dictionary; since it uses our .update() method, we get the right + # checks (if the other dictionary is a WeakValueDictionary, + # objects are unwrapped on the way out, and we always wrap on the + # way in). + + def __init__(*args, **kw): + if not args: + raise TypeError("descriptor '__init__' of 'WeakValueDictionary' " + "object needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + def remove(wr, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(wr.key) + else: + del self.data[wr.key] + self._remove = remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + self.data = d = {} + self.update(*args, **kw) + + def _commit_removals(self): + l = self._pending_removals + d = self.data + # We shouldn't encounter any KeyError, because this method should + # always be called *before* mutating the dict. + while l: + del d[l.pop()] + + def __getitem__(self, key): + o = self.data[key]() + if o is None: + raise KeyError(key) + else: + return o + + def __delitem__(self, key): + if self._pending_removals: + self._commit_removals() + del self.data[key] + + def __len__(self): + return len(self.data) - len(self._pending_removals) + + def __contains__(self, key): + try: + o = self.data[key]() + except KeyError: + return False + return o is not None + + def __repr__(self): + return "" % id(self) + + def __setitem__(self, key, value): + if self._pending_removals: + self._commit_removals() + self.data[key] = KeyedRef(value, self._remove, key) + + def copy(self): + new = WeakValueDictionary() + for key, wr in self.data.items(): + o = wr() + if o is not None: + new[key] = o + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, wr in self.data.items(): + o = wr() + if o is not None: + new[deepcopy(key, memo)] = o + return new + + def get(self, key, default=None): + try: + wr = self.data[key] + except KeyError: + return default + else: + o = wr() + if o is None: + # This should only happen + return default + else: + return o + + def items(self): + with _IterationGuard(self): + for k, wr in self.data.items(): + v = wr() + if v is not None: + yield k, v + + def keys(self): + with _IterationGuard(self): + for k, wr in self.data.items(): + if wr() is not None: + yield k + + __iter__ = keys + + def itervaluerefs(self): + """Return an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + with _IterationGuard(self): + yield from self.data.values() + + def values(self): + with _IterationGuard(self): + for wr in self.data.values(): + obj = wr() + if obj is not None: + yield obj + + def popitem(self): + if self._pending_removals: + self._commit_removals() + while True: + key, wr = self.data.popitem() + o = wr() + if o is not None: + return key, o + + def pop(self, key, *args): + if self._pending_removals: + self._commit_removals() + try: + o = self.data.pop(key)() + except KeyError: + if args: + return args[0] + raise + if o is None: + raise KeyError(key) + else: + return o + + def setdefault(self, key, default=None): + try: + wr = self.data[key] + except KeyError: + if self._pending_removals: + self._commit_removals() + self.data[key] = KeyedRef(default, self._remove, key) + return default + else: + return wr() + + def update(*args, **kwargs): + if not args: + raise TypeError("descriptor 'update' of 'WeakValueDictionary' " + "object needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + dict = args[0] if args else None + if self._pending_removals: + self._commit_removals() + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, o in dict.items(): + d[key] = KeyedRef(o, self._remove, key) + if len(kwargs): + self.update(kwargs) + + def valuerefs(self): + """Return a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + return list(self.data.values()) + + +class KeyedRef(ref): + """Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + """ + + __slots__ = "key", + + def __new__(type, ob, callback, key): + self = ref.__new__(type, ob, callback) + self.key = key + return self + + def __init__(self, ob, callback, key): + super().__init__(ob, callback) + + +class WeakKeyDictionary(collections.MutableMapping): + """ Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + """ + + def __init__(self, dict=None): + self.data = {} + def remove(k, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(k) + else: + del self.data[k] + self._remove = remove + # A list of dead weakrefs (keys to be removed) + self._pending_removals = [] + self._iterating = set() + self._dirty_len = False + if dict is not None: + self.update(dict) + + def _commit_removals(self): + # NOTE: We don't need to call this method before mutating the dict, + # because a dead weakref never compares equal to a live weakref, + # even if they happened to refer to equal objects. + # However, it means keys may already have been removed. + l = self._pending_removals + d = self.data + while l: + try: + del d[l.pop()] + except KeyError: + pass + + def _scrub_removals(self): + d = self.data + self._pending_removals = [k for k in self._pending_removals if k in d] + self._dirty_len = False + + def __delitem__(self, key): + self._dirty_len = True + del self.data[ref(key)] + + def __getitem__(self, key): + return self.data[ref(key)] + + def __len__(self): + if self._dirty_len and self._pending_removals: + # self._pending_removals may still contain keys which were + # explicitly removed, we have to scrub them (see issue #21173). + self._scrub_removals() + return len(self.data) - len(self._pending_removals) + + def __repr__(self): + return "" % id(self) + + def __setitem__(self, key, value): + self.data[ref(key, self._remove)] = value + + def copy(self): + new = WeakKeyDictionary() + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = value + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = deepcopy(value, memo) + return new + + def get(self, key, default=None): + return self.data.get(ref(key),default) + + def __contains__(self, key): + try: + wr = ref(key) + except TypeError: + return False + return wr in self.data + + def items(self): + with _IterationGuard(self): + for wr, value in self.data.items(): + key = wr() + if key is not None: + yield key, value + + def keys(self): + with _IterationGuard(self): + for wr in self.data: + obj = wr() + if obj is not None: + yield obj + + __iter__ = keys + + def values(self): + with _IterationGuard(self): + for wr, value in self.data.items(): + if wr() is not None: + yield value + + def keyrefs(self): + """Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + return list(self.data) + + def popitem(self): + self._dirty_len = True + while True: + key, value = self.data.popitem() + o = key() + if o is not None: + return o, value + + def pop(self, key, *args): + self._dirty_len = True + return self.data.pop(ref(key), *args) + + def setdefault(self, key, default=None): + return self.data.setdefault(ref(key, self._remove),default) + + def update(self, dict=None, **kwargs): + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, value in dict.items(): + d[ref(key, self._remove)] = value + if len(kwargs): + self.update(kwargs) + + +class finalize: + """Class for finalization of weakrefable objects + + finalize(obj, func, *args, **kwargs) returns a callable finalizer + object which will be called when obj is garbage collected. The + first time the finalizer is called it evaluates func(*arg, **kwargs) + and returns the result. After this the finalizer is dead, and + calling it just returns None. + + When the program exits any remaining finalizers for which the + atexit attribute is true will be run in reverse order of creation. + By default atexit is true. + """ + + # Finalizer objects don't have any state of their own. They are + # just used as keys to lookup _Info objects in the registry. This + # ensures that they cannot be part of a ref-cycle. + + __slots__ = () + _registry = {} + _shutdown = False + _index_iter = itertools.count() + _dirty = False + _registered_with_atexit = False + + class _Info: + __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") + + def __init__(self, obj, func, *args, **kwargs): + if not self._registered_with_atexit: + # We may register the exit function more than once because + # of a thread race, but that is harmless + import atexit + atexit.register(self._exitfunc) + finalize._registered_with_atexit = True + info = self._Info() + info.weakref = ref(obj, self) + info.func = func + info.args = args + info.kwargs = kwargs or None + info.atexit = True + info.index = next(self._index_iter) + self._registry[self] = info + finalize._dirty = True + + def __call__(self, _=None): + """If alive then mark as dead and return func(*args, **kwargs); + otherwise return None""" + info = self._registry.pop(self, None) + if info and not self._shutdown: + return info.func(*info.args, **(info.kwargs or {})) + + def detach(self): + """If alive then mark as dead and return (obj, func, args, kwargs); + otherwise return None""" + info = self._registry.get(self) + obj = info and info.weakref() + if obj is not None and self._registry.pop(self, None): + return (obj, info.func, info.args, info.kwargs or {}) + + def peek(self): + """If alive then return (obj, func, args, kwargs); + otherwise return None""" + info = self._registry.get(self) + obj = info and info.weakref() + if obj is not None: + return (obj, info.func, info.args, info.kwargs or {}) + + @property + def alive(self): + """Whether finalizer is alive""" + return self in self._registry + + @property + def atexit(self): + """Whether finalizer should be called at exit""" + info = self._registry.get(self) + return bool(info) and info.atexit + + @atexit.setter + def atexit(self, value): + info = self._registry.get(self) + if info: + info.atexit = bool(value) + + def __repr__(self): + info = self._registry.get(self) + obj = info and info.weakref() + if obj is None: + return '<%s object at %#x; dead>' % (type(self).__name__, id(self)) + else: + return '<%s object at %#x; for %r at %#x>' % \ + (type(self).__name__, id(self), type(obj).__name__, id(obj)) + + @classmethod + def _select_for_exit(cls): + # Return live finalizers marked for exit, oldest first + L = [(f,i) for (f,i) in cls._registry.items() if i.atexit] + L.sort(key=lambda item:item[1].index) + return [f for (f,i) in L] + + @classmethod + def _exitfunc(cls): + # At shutdown invoke finalizers for which atexit is true. + # This is called once all other non-daemonic threads have been + # joined. + reenable_gc = False + try: + if cls._registry: + import gc + if gc.isenabled(): + reenable_gc = True + gc.disable() + pending = None + while True: + if pending is None or finalize._dirty: + pending = cls._select_for_exit() + finalize._dirty = False + if not pending: + break + f = pending.pop() + try: + # gc is disabled, so (assuming no daemonic + # threads) the following is the only line in + # this function which might trigger creation + # of a new finalizer + f() + except Exception: + sys.excepthook(*sys.exc_info()) + assert f not in cls._registry + finally: + # prevent any more finalizers from executing during shutdown + finalize._shutdown = True + if reenable_gc: + gc.enable() diff --git a/v1/plexMovies.py b/v1/plexMovies.py new file mode 100755 index 0000000..24d3323 --- /dev/null +++ b/v1/plexMovies.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# @Author: KevinMidboe +# @Date: 2017-01-28 23:21:22 +# @Last Modified by: KevinMidboe +# @Last Modified time: 2017-02-06 11:58:31 + +from os import system +import xml.etree.ElementTree as ET + +import sys + +from time import time + +def getLibraryXML(): + # Every call saves the info of session.xml to a file named plexPlaying + system('curl --silent http://10.0.0.41:32400/library/sections/1/all > xmlMovieLib.xml') + # XML parsing, creates a tree and saves the root node as root + try: + parser = ET.parse('xmlMovieLib.xml') + xmlTreeRoot = parser.getroot() + return xmlTreeRoot + + except xml.etree.ElementTree.ParseError: + return None + +def getMovieExistance(): + pass + +def getSpecificMovieInfo(movieTitle, movieYear=None): + xmlTreeRoot = getLibraryXML() + + try: + treeSize = int(xmlTreeRoot.get('size')) + except TypeError: + return None + + + if (treeSize > 0): + for video in xmlTreeRoot.findall('Video'): + if video.get('title') == movieTitle: + title = movieTitle + year = video.get('year') + if movieYear == None or movieYear == year: + mediaInfo = video.find('Media') + bitrate = mediaInfo.get('bitrate') + width = mediaInfo.get('width') + height = mediaInfo.get('height') + + return { 'title':title, 'year': year, 'bitrate':bitrate, + 'width':width, 'height':height } + else: + # field: 404? + return { 'Error': 'Movie matching that year does not exist, did '\ + 'you mean ' + title + ' (' + year + ')?'} + + # Return none + +def plexMovies(xmlTreeRoot, query='title'): + test = int(xmlTreeRoot.get('size')) + sys.exit() + # The root node named MediaContainer has a size variable that holds number of active processes. + # If this is '0' then there are none playing, no need to compute. + if (root.get('size') != '0'): + # Goes through all the 'video' elements in MediaContainer + for video in root.findall('Video'): + if query=='title' or query=='year': + result = video.get(query) + print(result) + + elif query=='bitrate' or query=='width' or query=='height': + mediaInfo = video.find('Media') + result = mediaInfo.get(query) + print(result) + +if __name__ == '__main__': + # Query: !title, !year, bitrate, width, height + start_time = time() + # xmlTreeRoot = getLibraryXML() + # plexMovies(xmlTreeRoot) + + print(getSpecificMovieInfo('10 Cloverfield Lane')) + print("--- %s seconds ---" % (time() - start_time)) \ No newline at end of file diff --git a/v1/uptime.py b/v1/uptime.py index 1ff020f..5ab8e3e 100755 --- a/v1/uptime.py +++ b/v1/uptime.py @@ -3,7 +3,7 @@ # @Author: KevinMidboe # @Date: 2017-01-27 19:48:42 # @Last Modified by: KevinMidboe -# @Last Modified time: 2017-01-31 23:13:20 +# @Last Modified time: 2017-02-03 12:33:51 # TODO add better error handling to return statements diff --git a/v1/xmlMovieLib.xml b/v1/xmlMovieLib.xml new file mode 100644 index 0000000..3566909 --- /dev/null +++ b/v1/xmlMovieLib.xml @@ -0,0 +1,9718 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +