mirror of
				https://github.com/KevinMidboe/Node-Com-Handler.git
				synced 2025-10-29 17:50:27 +00:00 
			
		
		
		
	
							
								
								
									
										62
									
								
								v1/app.py
									
									
									
									
									
								
							
							
						
						
									
										62
									
								
								v1/app.py
									
									
									
									
									
								
							| @@ -5,12 +5,16 @@ | |||||||
| from flask import Flask, jsonify, make_response, request, url_for, abort | from flask import Flask, jsonify, make_response, request, url_for, abort | ||||||
| from flask_httpauth import HTTPBasicAuth | from flask_httpauth import HTTPBasicAuth | ||||||
| from json import loads, dumps | from json import loads, dumps | ||||||
|  | import requests | ||||||
|  |  | ||||||
| from werkzeug.security import generate_password_hash, \ | from werkzeug.security import generate_password_hash, \ | ||||||
| 	check_password_hash | 	check_password_hash | ||||||
|  |  | ||||||
| from diskusage import diskUsage | from diskusage import diskUsage | ||||||
| from uptime import timeSinceBoot | from uptime import timeSinceBoot | ||||||
|  | from cpuTemp import getCpuTemp | ||||||
|  |  | ||||||
|  | from plexMovies import getSpecificMovieInfo | ||||||
|  |  | ||||||
| app = Flask(__name__, static_url_path = "") | app = Flask(__name__, static_url_path = "") | ||||||
| auth = HTTPBasicAuth() | auth = HTTPBasicAuth() | ||||||
| @@ -22,6 +26,8 @@ users = { | |||||||
| 	"test": "test" | 	"test": "test" | ||||||
| } | } | ||||||
|  |  | ||||||
|  | tmdbBaseURL = "https://api.themoviedb.org/3/" | ||||||
|  |  | ||||||
| # Flask function for checking password sent with http request | # Flask function for checking password sent with http request | ||||||
| # @auth.verify_password | # @auth.verify_password | ||||||
| # def verify_password(email, password): | # def verify_password(email, password): | ||||||
| @@ -64,10 +70,10 @@ def bad_request(error): | |||||||
| @app.route('/api/v1/disks', methods=['GET']) | @app.route('/api/v1/disks', methods=['GET']) | ||||||
| @auth.login_required | @auth.login_required | ||||||
| def get_diskUsage(): | def get_diskUsage(): | ||||||
| 	try: |  | ||||||
| 	returningDiskUsage = diskUsage(request.args.get('dir')) | 	returningDiskUsage = diskUsage(request.args.get('dir')) | ||||||
|  | 	if returningDiskUsage != None: | ||||||
| 		return jsonify(returningDiskUsage) | 		return jsonify(returningDiskUsage) | ||||||
| 	except: | 	else: | ||||||
| 		abort(404) | 		abort(404) | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -79,6 +85,56 @@ def get_uptimes(): | |||||||
| 	except: | 	except: | ||||||
| 		abort(404) | 		abort(404) | ||||||
|  |  | ||||||
|  | @app.route('/api/v1/temps', methods=['GET']) | ||||||
|  | def get_temps(): | ||||||
|  | 	cpuTemp = getCpuTemp() | ||||||
|  | 	if cpuTemp != None: | ||||||
|  | 		return jsonify( {"Avg cpu temp": cpuTemp} ) | ||||||
|  | 	else: | ||||||
|  | 		return jsonify( {"Error":"Temp reading not supported for host machine."} ) | ||||||
|  |  | ||||||
|  | # TODO PLEX | ||||||
|  | # Search, watching, +photo | ||||||
|  | @app.route('/api/v1/plex/request', methods=['GET']) | ||||||
|  | def get_movieRequest(): | ||||||
|  | 	if (request.args.get("query") != None): | ||||||
|  | 		requestType = "search/multi?" | ||||||
|  | 		requestAPI = "api_key=" + "9fa154f5355c37a1b9b57ac06e7d6712" | ||||||
|  | 		requestQuery = "&query=" + str(request.args.get('query')) | ||||||
|  | 		requestLanguage = "&language=en.US" | ||||||
|  |  | ||||||
|  | 		url = tmdbBaseURL + requestType + requestAPI + requestQuery + requestLanguage | ||||||
|  | 		# url = "https://api.themoviedb.org/3/search/multi?include_adult=false&query=home%20alone&language=en-US&api_key=9fa154f5355c37a1b9b57ac06e7d6712" | ||||||
|  |  | ||||||
|  | 		payload = "{}" | ||||||
|  | 		response = requests.request("GET", url, data=payload) | ||||||
|  |  | ||||||
|  | 		print(response.text) | ||||||
|  | 		return response.text | ||||||
|  |  | ||||||
|  | 	else: return jsonify ({ "Error": "Query not defined." }) | ||||||
|  |  | ||||||
|  | @app.route('/api/v1/plex/movies', methods=['GET']) | ||||||
|  | @auth.login_required | ||||||
|  | def getPlexMovies(): | ||||||
|  | 	title = request.args.get('title') | ||||||
|  |  | ||||||
|  | 	movieInfo = getSpecificMovieInfo(title) | ||||||
|  | 	if movieInfo != None: | ||||||
|  | 		return jsonify(movieInfo) | ||||||
|  |  | ||||||
|  | 	abort(500) | ||||||
|  |  | ||||||
|  | @app.route('/api/v1/plex/watchings', methods=['GET']) | ||||||
|  | @auth.login_required | ||||||
|  | def getPlexWatchings(): | ||||||
|  | 	r = requests.get('http://10.0.0.41:32400/status/sessions') | ||||||
|  |  | ||||||
|  | 	return r.text | ||||||
|  | 	movieInfo = getSpecificMovieInfo(title) | ||||||
|  | 	if movieInfo != None: | ||||||
|  | 		return jsonify(movieInfo) | ||||||
|  |  | ||||||
|  |  | ||||||
| @app.route('/api/v1/uptimes/duration', methods=['GET']) | @app.route('/api/v1/uptimes/duration', methods=['GET']) | ||||||
| @auth.login_required | @auth.login_required | ||||||
| @@ -101,4 +157,4 @@ def get_uptimesLoad(): | |||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
| 	app.run(host='0.0.0.0', port=63588) | 	app.run(port=63590, debug=True) | ||||||
|   | |||||||
| @@ -1,9 +0,0 @@ | |||||||
| import linuxcpureader |  | ||||||
|  |  | ||||||
| def main(): |  | ||||||
| 	cpu = linuxcpureader.LinuxCpuTemperatureReader() |  | ||||||
| 	print(cpu) |  | ||||||
| 	print(cpu.get_reader()) |  | ||||||
| 	print(', '.join("%s: %s" % item for item in cpu.items())) |  | ||||||
|  |  | ||||||
| main()  |  | ||||||
| @@ -1,15 +1,35 @@ | |||||||
| #!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||||
| # -*- coding: utf-8 -*- | # -*- coding: utf-8 -*- | ||||||
| # @Author: KevinMidboe | import psutil | ||||||
| # @Date:   2017-01-28 13:56:48 |  | ||||||
| # @Last Modified by:   KevinMidboe |  | ||||||
| # @Last Modified time: 2017-01-28 13:58:35 |  | ||||||
|  |  | ||||||
| from pyspectator.processor import Cpu | def getCpuTemp(): | ||||||
| from time import sleep | 	# Check if sensors_temperatures exists | ||||||
|  | 	try: | ||||||
|  | 		# Define cpu as function of sensors_temperatures | ||||||
|  | 		cpu = psutil.sensors_temperatures() | ||||||
|  | 	except AttributeError: | ||||||
|  | 		error = "'sensors_temperatures' is not supported in this verison of psutil or your OS." | ||||||
|  | 		print(error) | ||||||
|  | 		return None | ||||||
|  |  | ||||||
| cpu = Cpu(monitoring_latency=1) | 	# Array for temps for each core. | ||||||
| with cpu: | 	curCpuTemps = [] | ||||||
| 	for _ in range(8): | 	# Itterate through all cores of coretemps  | ||||||
| 		cpu.load, cpu.temperature | 	for temp in cpu['coretemp']: | ||||||
| 		sleep(1.1) | 		curCpuTemps.append(temp[1]) # Append to list | ||||||
|  | 		print(temp[0]+': '+str(temp[1])) # Print output | ||||||
|  | 	 | ||||||
|  | 	# Check if len of curCpuTemps is something so not to  | ||||||
|  | 	# calculate on a empty list | ||||||
|  | 	if len(curCpuTemps) > 0: | ||||||
|  | 		# Compute avg of curCpuTemps | ||||||
|  | 		avgCpuTemps = sum(curCpuTemps)/len(curCpuTemps) | ||||||
|  | 		return avgCpuTemps | ||||||
|  | 		print("Avg: " + str(avgCpuTemps)) | ||||||
|  | 	else: | ||||||
|  | 		print("Couldn't get cpu temp. (division by zero)") | ||||||
|  | 		return None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  | 	print(getCpuTemp()) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/__future__.py |  | ||||||
							
								
								
									
										134
									
								
								v1/flask/lib/python3.4/__future__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								v1/flask/lib/python3.4/__future__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,134 @@ | |||||||
|  | """Record of phased-in incompatible language changes. | ||||||
|  |  | ||||||
|  | Each line is of the form: | ||||||
|  |  | ||||||
|  |     FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," | ||||||
|  |                               CompilerFlag ")" | ||||||
|  |  | ||||||
|  | where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples | ||||||
|  | of the same form as sys.version_info: | ||||||
|  |  | ||||||
|  |     (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int | ||||||
|  |      PY_MINOR_VERSION, # the 1; an int | ||||||
|  |      PY_MICRO_VERSION, # the 0; an int | ||||||
|  |      PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string | ||||||
|  |      PY_RELEASE_SERIAL # the 3; an int | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  | OptionalRelease records the first release in which | ||||||
|  |  | ||||||
|  |     from __future__ import FeatureName | ||||||
|  |  | ||||||
|  | was accepted. | ||||||
|  |  | ||||||
|  | In the case of MandatoryReleases that have not yet occurred, | ||||||
|  | MandatoryRelease predicts the release in which the feature will become part | ||||||
|  | of the language. | ||||||
|  |  | ||||||
|  | Else MandatoryRelease records when the feature became part of the language; | ||||||
|  | in releases at or after that, modules no longer need | ||||||
|  |  | ||||||
|  |     from __future__ import FeatureName | ||||||
|  |  | ||||||
|  | to use the feature in question, but may continue to use such imports. | ||||||
|  |  | ||||||
|  | MandatoryRelease may also be None, meaning that a planned feature got | ||||||
|  | dropped. | ||||||
|  |  | ||||||
|  | Instances of class _Feature have two corresponding methods, | ||||||
|  | .getOptionalRelease() and .getMandatoryRelease(). | ||||||
|  |  | ||||||
|  | CompilerFlag is the (bitfield) flag that should be passed in the fourth | ||||||
|  | argument to the builtin function compile() to enable the feature in | ||||||
|  | dynamically compiled code.  This flag is stored in the .compiler_flag | ||||||
|  | attribute on _Future instances.  These values must match the appropriate | ||||||
|  | #defines of CO_xxx flags in Include/compile.h. | ||||||
|  |  | ||||||
|  | No feature line is ever to be deleted from this file. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | all_feature_names = [ | ||||||
|  |     "nested_scopes", | ||||||
|  |     "generators", | ||||||
|  |     "division", | ||||||
|  |     "absolute_import", | ||||||
|  |     "with_statement", | ||||||
|  |     "print_function", | ||||||
|  |     "unicode_literals", | ||||||
|  |     "barry_as_FLUFL", | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | __all__ = ["all_feature_names"] + all_feature_names | ||||||
|  |  | ||||||
|  | # The CO_xxx symbols are defined here under the same names used by | ||||||
|  | # compile.h, so that an editor search will find them here.  However, | ||||||
|  | # they're not exported in __all__, because they don't really belong to | ||||||
|  | # this module. | ||||||
|  | CO_NESTED            = 0x0010   # nested_scopes | ||||||
|  | CO_GENERATOR_ALLOWED = 0        # generators (obsolete, was 0x1000) | ||||||
|  | CO_FUTURE_DIVISION   = 0x2000   # division | ||||||
|  | CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default | ||||||
|  | CO_FUTURE_WITH_STATEMENT  = 0x8000   # with statement | ||||||
|  | CO_FUTURE_PRINT_FUNCTION  = 0x10000   # print function | ||||||
|  | CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals | ||||||
|  | CO_FUTURE_BARRY_AS_BDFL = 0x40000 | ||||||
|  |  | ||||||
|  | class _Feature: | ||||||
|  |     def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): | ||||||
|  |         self.optional = optionalRelease | ||||||
|  |         self.mandatory = mandatoryRelease | ||||||
|  |         self.compiler_flag = compiler_flag | ||||||
|  |  | ||||||
|  |     def getOptionalRelease(self): | ||||||
|  |         """Return first release in which this feature was recognized. | ||||||
|  |  | ||||||
|  |         This is a 5-tuple, of the same form as sys.version_info. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         return self.optional | ||||||
|  |  | ||||||
|  |     def getMandatoryRelease(self): | ||||||
|  |         """Return release in which this feature will become mandatory. | ||||||
|  |  | ||||||
|  |         This is a 5-tuple, of the same form as sys.version_info, or, if | ||||||
|  |         the feature was dropped, is None. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         return self.mandatory | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return "_Feature" + repr((self.optional, | ||||||
|  |                                   self.mandatory, | ||||||
|  |                                   self.compiler_flag)) | ||||||
|  |  | ||||||
|  | nested_scopes = _Feature((2, 1, 0, "beta",  1), | ||||||
|  |                          (2, 2, 0, "alpha", 0), | ||||||
|  |                          CO_NESTED) | ||||||
|  |  | ||||||
|  | generators = _Feature((2, 2, 0, "alpha", 1), | ||||||
|  |                       (2, 3, 0, "final", 0), | ||||||
|  |                       CO_GENERATOR_ALLOWED) | ||||||
|  |  | ||||||
|  | division = _Feature((2, 2, 0, "alpha", 2), | ||||||
|  |                     (3, 0, 0, "alpha", 0), | ||||||
|  |                     CO_FUTURE_DIVISION) | ||||||
|  |  | ||||||
|  | absolute_import = _Feature((2, 5, 0, "alpha", 1), | ||||||
|  |                            (3, 0, 0, "alpha", 0), | ||||||
|  |                            CO_FUTURE_ABSOLUTE_IMPORT) | ||||||
|  |  | ||||||
|  | with_statement = _Feature((2, 5, 0, "alpha", 1), | ||||||
|  |                           (2, 6, 0, "alpha", 0), | ||||||
|  |                           CO_FUTURE_WITH_STATEMENT) | ||||||
|  |  | ||||||
|  | print_function = _Feature((2, 6, 0, "alpha", 2), | ||||||
|  |                           (3, 0, 0, "alpha", 0), | ||||||
|  |                           CO_FUTURE_PRINT_FUNCTION) | ||||||
|  |  | ||||||
|  | unicode_literals = _Feature((2, 6, 0, "alpha", 2), | ||||||
|  |                             (3, 0, 0, "alpha", 0), | ||||||
|  |                             CO_FUTURE_UNICODE_LITERALS) | ||||||
|  |  | ||||||
|  | barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), | ||||||
|  |                          (3, 9, 0, "alpha", 0), | ||||||
|  |                          CO_FUTURE_BARRY_AS_BDFL) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_bootlocale.py |  | ||||||
							
								
								
									
										34
									
								
								v1/flask/lib/python3.4/_bootlocale.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								v1/flask/lib/python3.4/_bootlocale.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | |||||||
|  | """A minimal subset of the locale module used at interpreter startup | ||||||
|  | (imported by the _io module), in order to reduce startup time. | ||||||
|  |  | ||||||
|  | Don't import directly from third-party code; use the `locale` module instead! | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | import _locale | ||||||
|  |  | ||||||
|  | if sys.platform.startswith("win"): | ||||||
|  |     def getpreferredencoding(do_setlocale=True): | ||||||
|  |         return _locale._getdefaultlocale()[1] | ||||||
|  | else: | ||||||
|  |     try: | ||||||
|  |         _locale.CODESET | ||||||
|  |     except AttributeError: | ||||||
|  |         def getpreferredencoding(do_setlocale=True): | ||||||
|  |             # This path for legacy systems needs the more complex | ||||||
|  |             # getdefaultlocale() function, import the full locale module. | ||||||
|  |             import locale | ||||||
|  |             return locale.getpreferredencoding(do_setlocale) | ||||||
|  |     else: | ||||||
|  |         def getpreferredencoding(do_setlocale=True): | ||||||
|  |             assert not do_setlocale | ||||||
|  |             result = _locale.nl_langinfo(_locale.CODESET) | ||||||
|  |             if not result and sys.platform == 'darwin': | ||||||
|  |                 # nl_langinfo can return an empty string | ||||||
|  |                 # when the setting has an invalid value. | ||||||
|  |                 # Default to UTF-8 in that case because | ||||||
|  |                 # UTF-8 is the default charset on OSX and | ||||||
|  |                 # returning nothing will crash the | ||||||
|  |                 # interpreter. | ||||||
|  |                 result = 'UTF-8' | ||||||
|  |             return result | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_collections_abc.py |  | ||||||
							
								
								
									
										748
									
								
								v1/flask/lib/python3.4/_collections_abc.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										748
									
								
								v1/flask/lib/python3.4/_collections_abc.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,748 @@ | |||||||
|  | # Copyright 2007 Google, Inc. All Rights Reserved. | ||||||
|  | # Licensed to PSF under a Contributor Agreement. | ||||||
|  |  | ||||||
|  | """Abstract Base Classes (ABCs) for collections, according to PEP 3119. | ||||||
|  |  | ||||||
|  | Unit tests are in test_collections. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from abc import ABCMeta, abstractmethod | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | __all__ = ["Hashable", "Iterable", "Iterator", | ||||||
|  |            "Sized", "Container", "Callable", | ||||||
|  |            "Set", "MutableSet", | ||||||
|  |            "Mapping", "MutableMapping", | ||||||
|  |            "MappingView", "KeysView", "ItemsView", "ValuesView", | ||||||
|  |            "Sequence", "MutableSequence", | ||||||
|  |            "ByteString", | ||||||
|  |            ] | ||||||
|  |  | ||||||
|  | # This module has been renamed from collections.abc to _collections_abc to | ||||||
|  | # speed up interpreter startup. Some of the types such as MutableMapping are | ||||||
|  | # required early but collections module imports a lot of other modules. | ||||||
|  | # See issue #19218 | ||||||
|  | __name__ = "collections.abc" | ||||||
|  |  | ||||||
|  | # Private list of types that we want to register with the various ABCs | ||||||
|  | # so that they will pass tests like: | ||||||
|  | #       it = iter(somebytearray) | ||||||
|  | #       assert isinstance(it, Iterable) | ||||||
|  | # Note:  in other implementations, these types many not be distinct | ||||||
|  | # and they make have their own implementation specific types that | ||||||
|  | # are not included on this list. | ||||||
|  | bytes_iterator = type(iter(b'')) | ||||||
|  | bytearray_iterator = type(iter(bytearray())) | ||||||
|  | #callable_iterator = ??? | ||||||
|  | dict_keyiterator = type(iter({}.keys())) | ||||||
|  | dict_valueiterator = type(iter({}.values())) | ||||||
|  | dict_itemiterator = type(iter({}.items())) | ||||||
|  | list_iterator = type(iter([])) | ||||||
|  | list_reverseiterator = type(iter(reversed([]))) | ||||||
|  | range_iterator = type(iter(range(0))) | ||||||
|  | set_iterator = type(iter(set())) | ||||||
|  | str_iterator = type(iter("")) | ||||||
|  | tuple_iterator = type(iter(())) | ||||||
|  | zip_iterator = type(iter(zip())) | ||||||
|  | ## views ## | ||||||
|  | dict_keys = type({}.keys()) | ||||||
|  | dict_values = type({}.values()) | ||||||
|  | dict_items = type({}.items()) | ||||||
|  | ## misc ## | ||||||
|  | mappingproxy = type(type.__dict__) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ### ONE-TRICK PONIES ### | ||||||
|  |  | ||||||
|  | class Hashable(metaclass=ABCMeta): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __hash__(self): | ||||||
|  |         return 0 | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def __subclasshook__(cls, C): | ||||||
|  |         if cls is Hashable: | ||||||
|  |             for B in C.__mro__: | ||||||
|  |                 if "__hash__" in B.__dict__: | ||||||
|  |                     if B.__dict__["__hash__"]: | ||||||
|  |                         return True | ||||||
|  |                     break | ||||||
|  |         return NotImplemented | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Iterable(metaclass=ABCMeta): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __iter__(self): | ||||||
|  |         while False: | ||||||
|  |             yield None | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def __subclasshook__(cls, C): | ||||||
|  |         if cls is Iterable: | ||||||
|  |             if any("__iter__" in B.__dict__ for B in C.__mro__): | ||||||
|  |                 return True | ||||||
|  |         return NotImplemented | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Iterator(Iterable): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __next__(self): | ||||||
|  |         'Return the next item from the iterator. When exhausted, raise StopIteration' | ||||||
|  |         raise StopIteration | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def __subclasshook__(cls, C): | ||||||
|  |         if cls is Iterator: | ||||||
|  |             if (any("__next__" in B.__dict__ for B in C.__mro__) and | ||||||
|  |                 any("__iter__" in B.__dict__ for B in C.__mro__)): | ||||||
|  |                 return True | ||||||
|  |         return NotImplemented | ||||||
|  |  | ||||||
|  | Iterator.register(bytes_iterator) | ||||||
|  | Iterator.register(bytearray_iterator) | ||||||
|  | #Iterator.register(callable_iterator) | ||||||
|  | Iterator.register(dict_keyiterator) | ||||||
|  | Iterator.register(dict_valueiterator) | ||||||
|  | Iterator.register(dict_itemiterator) | ||||||
|  | Iterator.register(list_iterator) | ||||||
|  | Iterator.register(list_reverseiterator) | ||||||
|  | Iterator.register(range_iterator) | ||||||
|  | Iterator.register(set_iterator) | ||||||
|  | Iterator.register(str_iterator) | ||||||
|  | Iterator.register(tuple_iterator) | ||||||
|  | Iterator.register(zip_iterator) | ||||||
|  |  | ||||||
|  | class Sized(metaclass=ABCMeta): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __len__(self): | ||||||
|  |         return 0 | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def __subclasshook__(cls, C): | ||||||
|  |         if cls is Sized: | ||||||
|  |             if any("__len__" in B.__dict__ for B in C.__mro__): | ||||||
|  |                 return True | ||||||
|  |         return NotImplemented | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Container(metaclass=ABCMeta): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __contains__(self, x): | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def __subclasshook__(cls, C): | ||||||
|  |         if cls is Container: | ||||||
|  |             if any("__contains__" in B.__dict__ for B in C.__mro__): | ||||||
|  |                 return True | ||||||
|  |         return NotImplemented | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Callable(metaclass=ABCMeta): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __call__(self, *args, **kwds): | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def __subclasshook__(cls, C): | ||||||
|  |         if cls is Callable: | ||||||
|  |             if any("__call__" in B.__dict__ for B in C.__mro__): | ||||||
|  |                 return True | ||||||
|  |         return NotImplemented | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ### SETS ### | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Set(Sized, Iterable, Container): | ||||||
|  |  | ||||||
|  |     """A set is a finite, iterable container. | ||||||
|  |  | ||||||
|  |     This class provides concrete generic implementations of all | ||||||
|  |     methods except for __contains__, __iter__ and __len__. | ||||||
|  |  | ||||||
|  |     To override the comparisons (presumably for speed, as the | ||||||
|  |     semantics are fixed), redefine __le__ and __ge__, | ||||||
|  |     then the other operations will automatically follow suit. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     def __le__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             return NotImplemented | ||||||
|  |         if len(self) > len(other): | ||||||
|  |             return False | ||||||
|  |         for elem in self: | ||||||
|  |             if elem not in other: | ||||||
|  |                 return False | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def __lt__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             return NotImplemented | ||||||
|  |         return len(self) < len(other) and self.__le__(other) | ||||||
|  |  | ||||||
|  |     def __gt__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             return NotImplemented | ||||||
|  |         return len(self) > len(other) and self.__ge__(other) | ||||||
|  |  | ||||||
|  |     def __ge__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             return NotImplemented | ||||||
|  |         if len(self) < len(other): | ||||||
|  |             return False | ||||||
|  |         for elem in other: | ||||||
|  |             if elem not in self: | ||||||
|  |                 return False | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def __eq__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             return NotImplemented | ||||||
|  |         return len(self) == len(other) and self.__le__(other) | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def _from_iterable(cls, it): | ||||||
|  |         '''Construct an instance of the class from any iterable input. | ||||||
|  |  | ||||||
|  |         Must override this method if the class constructor signature | ||||||
|  |         does not accept an iterable for an input. | ||||||
|  |         ''' | ||||||
|  |         return cls(it) | ||||||
|  |  | ||||||
|  |     def __and__(self, other): | ||||||
|  |         if not isinstance(other, Iterable): | ||||||
|  |             return NotImplemented | ||||||
|  |         return self._from_iterable(value for value in other if value in self) | ||||||
|  |  | ||||||
|  |     __rand__ = __and__ | ||||||
|  |  | ||||||
|  |     def isdisjoint(self, other): | ||||||
|  |         'Return True if two sets have a null intersection.' | ||||||
|  |         for value in other: | ||||||
|  |             if value in self: | ||||||
|  |                 return False | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def __or__(self, other): | ||||||
|  |         if not isinstance(other, Iterable): | ||||||
|  |             return NotImplemented | ||||||
|  |         chain = (e for s in (self, other) for e in s) | ||||||
|  |         return self._from_iterable(chain) | ||||||
|  |  | ||||||
|  |     __ror__ = __or__ | ||||||
|  |  | ||||||
|  |     def __sub__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             if not isinstance(other, Iterable): | ||||||
|  |                 return NotImplemented | ||||||
|  |             other = self._from_iterable(other) | ||||||
|  |         return self._from_iterable(value for value in self | ||||||
|  |                                    if value not in other) | ||||||
|  |  | ||||||
|  |     def __rsub__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             if not isinstance(other, Iterable): | ||||||
|  |                 return NotImplemented | ||||||
|  |             other = self._from_iterable(other) | ||||||
|  |         return self._from_iterable(value for value in other | ||||||
|  |                                    if value not in self) | ||||||
|  |  | ||||||
|  |     def __xor__(self, other): | ||||||
|  |         if not isinstance(other, Set): | ||||||
|  |             if not isinstance(other, Iterable): | ||||||
|  |                 return NotImplemented | ||||||
|  |             other = self._from_iterable(other) | ||||||
|  |         return (self - other) | (other - self) | ||||||
|  |  | ||||||
|  |     __rxor__ = __xor__ | ||||||
|  |  | ||||||
|  |     def _hash(self): | ||||||
|  |         """Compute the hash value of a set. | ||||||
|  |  | ||||||
|  |         Note that we don't define __hash__: not all sets are hashable. | ||||||
|  |         But if you define a hashable set type, its __hash__ should | ||||||
|  |         call this function. | ||||||
|  |  | ||||||
|  |         This must be compatible __eq__. | ||||||
|  |  | ||||||
|  |         All sets ought to compare equal if they contain the same | ||||||
|  |         elements, regardless of how they are implemented, and | ||||||
|  |         regardless of the order of the elements; so there's not much | ||||||
|  |         freedom for __eq__ or __hash__.  We match the algorithm used | ||||||
|  |         by the built-in frozenset type. | ||||||
|  |         """ | ||||||
|  |         MAX = sys.maxsize | ||||||
|  |         MASK = 2 * MAX + 1 | ||||||
|  |         n = len(self) | ||||||
|  |         h = 1927868237 * (n + 1) | ||||||
|  |         h &= MASK | ||||||
|  |         for x in self: | ||||||
|  |             hx = hash(x) | ||||||
|  |             h ^= (hx ^ (hx << 16) ^ 89869747)  * 3644798167 | ||||||
|  |             h &= MASK | ||||||
|  |         h = h * 69069 + 907133923 | ||||||
|  |         h &= MASK | ||||||
|  |         if h > MAX: | ||||||
|  |             h -= MASK + 1 | ||||||
|  |         if h == -1: | ||||||
|  |             h = 590923713 | ||||||
|  |         return h | ||||||
|  |  | ||||||
|  | Set.register(frozenset) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MutableSet(Set): | ||||||
|  |     """A mutable set is a finite, iterable container. | ||||||
|  |  | ||||||
|  |     This class provides concrete generic implementations of all | ||||||
|  |     methods except for __contains__, __iter__, __len__, | ||||||
|  |     add(), and discard(). | ||||||
|  |  | ||||||
|  |     To override the comparisons (presumably for speed, as the | ||||||
|  |     semantics are fixed), all you have to do is redefine __le__ and | ||||||
|  |     then the other operations will automatically follow suit. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def add(self, value): | ||||||
|  |         """Add an element.""" | ||||||
|  |         raise NotImplementedError | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def discard(self, value): | ||||||
|  |         """Remove an element.  Do not raise an exception if absent.""" | ||||||
|  |         raise NotImplementedError | ||||||
|  |  | ||||||
|  |     def remove(self, value): | ||||||
|  |         """Remove an element. If not a member, raise a KeyError.""" | ||||||
|  |         if value not in self: | ||||||
|  |             raise KeyError(value) | ||||||
|  |         self.discard(value) | ||||||
|  |  | ||||||
|  |     def pop(self): | ||||||
|  |         """Return the popped value.  Raise KeyError if empty.""" | ||||||
|  |         it = iter(self) | ||||||
|  |         try: | ||||||
|  |             value = next(it) | ||||||
|  |         except StopIteration: | ||||||
|  |             raise KeyError | ||||||
|  |         self.discard(value) | ||||||
|  |         return value | ||||||
|  |  | ||||||
|  |     def clear(self): | ||||||
|  |         """This is slow (creates N new iterators!) but effective.""" | ||||||
|  |         try: | ||||||
|  |             while True: | ||||||
|  |                 self.pop() | ||||||
|  |         except KeyError: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     def __ior__(self, it): | ||||||
|  |         for value in it: | ||||||
|  |             self.add(value) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __iand__(self, it): | ||||||
|  |         for value in (self - it): | ||||||
|  |             self.discard(value) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __ixor__(self, it): | ||||||
|  |         if it is self: | ||||||
|  |             self.clear() | ||||||
|  |         else: | ||||||
|  |             if not isinstance(it, Set): | ||||||
|  |                 it = self._from_iterable(it) | ||||||
|  |             for value in it: | ||||||
|  |                 if value in self: | ||||||
|  |                     self.discard(value) | ||||||
|  |                 else: | ||||||
|  |                     self.add(value) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __isub__(self, it): | ||||||
|  |         if it is self: | ||||||
|  |             self.clear() | ||||||
|  |         else: | ||||||
|  |             for value in it: | ||||||
|  |                 self.discard(value) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  | MutableSet.register(set) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ### MAPPINGS ### | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Mapping(Sized, Iterable, Container): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     """A Mapping is a generic container for associating key/value | ||||||
|  |     pairs. | ||||||
|  |  | ||||||
|  |     This class provides concrete generic implementations of all | ||||||
|  |     methods except for __getitem__, __iter__, and __len__. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __getitem__(self, key): | ||||||
|  |         raise KeyError | ||||||
|  |  | ||||||
|  |     def get(self, key, default=None): | ||||||
|  |         'D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.' | ||||||
|  |         try: | ||||||
|  |             return self[key] | ||||||
|  |         except KeyError: | ||||||
|  |             return default | ||||||
|  |  | ||||||
|  |     def __contains__(self, key): | ||||||
|  |         try: | ||||||
|  |             self[key] | ||||||
|  |         except KeyError: | ||||||
|  |             return False | ||||||
|  |         else: | ||||||
|  |             return True | ||||||
|  |  | ||||||
|  |     def keys(self): | ||||||
|  |         "D.keys() -> a set-like object providing a view on D's keys" | ||||||
|  |         return KeysView(self) | ||||||
|  |  | ||||||
|  |     def items(self): | ||||||
|  |         "D.items() -> a set-like object providing a view on D's items" | ||||||
|  |         return ItemsView(self) | ||||||
|  |  | ||||||
|  |     def values(self): | ||||||
|  |         "D.values() -> an object providing a view on D's values" | ||||||
|  |         return ValuesView(self) | ||||||
|  |  | ||||||
|  |     def __eq__(self, other): | ||||||
|  |         if not isinstance(other, Mapping): | ||||||
|  |             return NotImplemented | ||||||
|  |         return dict(self.items()) == dict(other.items()) | ||||||
|  |  | ||||||
|  | Mapping.register(mappingproxy) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MappingView(Sized): | ||||||
|  |  | ||||||
|  |     def __init__(self, mapping): | ||||||
|  |         self._mapping = mapping | ||||||
|  |  | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self._mapping) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return '{0.__class__.__name__}({0._mapping!r})'.format(self) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class KeysView(MappingView, Set): | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def _from_iterable(self, it): | ||||||
|  |         return set(it) | ||||||
|  |  | ||||||
|  |     def __contains__(self, key): | ||||||
|  |         return key in self._mapping | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         yield from self._mapping | ||||||
|  |  | ||||||
|  | KeysView.register(dict_keys) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ItemsView(MappingView, Set): | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def _from_iterable(self, it): | ||||||
|  |         return set(it) | ||||||
|  |  | ||||||
|  |     def __contains__(self, item): | ||||||
|  |         key, value = item | ||||||
|  |         try: | ||||||
|  |             v = self._mapping[key] | ||||||
|  |         except KeyError: | ||||||
|  |             return False | ||||||
|  |         else: | ||||||
|  |             return v == value | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         for key in self._mapping: | ||||||
|  |             yield (key, self._mapping[key]) | ||||||
|  |  | ||||||
|  | ItemsView.register(dict_items) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ValuesView(MappingView): | ||||||
|  |  | ||||||
|  |     def __contains__(self, value): | ||||||
|  |         for key in self._mapping: | ||||||
|  |             if value == self._mapping[key]: | ||||||
|  |                 return True | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         for key in self._mapping: | ||||||
|  |             yield self._mapping[key] | ||||||
|  |  | ||||||
|  | ValuesView.register(dict_values) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MutableMapping(Mapping): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     """A MutableMapping is a generic container for associating | ||||||
|  |     key/value pairs. | ||||||
|  |  | ||||||
|  |     This class provides concrete generic implementations of all | ||||||
|  |     methods except for __getitem__, __setitem__, __delitem__, | ||||||
|  |     __iter__, and __len__. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __setitem__(self, key, value): | ||||||
|  |         raise KeyError | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __delitem__(self, key): | ||||||
|  |         raise KeyError | ||||||
|  |  | ||||||
|  |     __marker = object() | ||||||
|  |  | ||||||
|  |     def pop(self, key, default=__marker): | ||||||
|  |         '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. | ||||||
|  |           If key is not found, d is returned if given, otherwise KeyError is raised. | ||||||
|  |         ''' | ||||||
|  |         try: | ||||||
|  |             value = self[key] | ||||||
|  |         except KeyError: | ||||||
|  |             if default is self.__marker: | ||||||
|  |                 raise | ||||||
|  |             return default | ||||||
|  |         else: | ||||||
|  |             del self[key] | ||||||
|  |             return value | ||||||
|  |  | ||||||
|  |     def popitem(self): | ||||||
|  |         '''D.popitem() -> (k, v), remove and return some (key, value) pair | ||||||
|  |            as a 2-tuple; but raise KeyError if D is empty. | ||||||
|  |         ''' | ||||||
|  |         try: | ||||||
|  |             key = next(iter(self)) | ||||||
|  |         except StopIteration: | ||||||
|  |             raise KeyError | ||||||
|  |         value = self[key] | ||||||
|  |         del self[key] | ||||||
|  |         return key, value | ||||||
|  |  | ||||||
|  |     def clear(self): | ||||||
|  |         'D.clear() -> None.  Remove all items from D.' | ||||||
|  |         try: | ||||||
|  |             while True: | ||||||
|  |                 self.popitem() | ||||||
|  |         except KeyError: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     def update(*args, **kwds): | ||||||
|  |         ''' D.update([E, ]**F) -> None.  Update D from mapping/iterable E and F. | ||||||
|  |             If E present and has a .keys() method, does:     for k in E: D[k] = E[k] | ||||||
|  |             If E present and lacks .keys() method, does:     for (k, v) in E: D[k] = v | ||||||
|  |             In either case, this is followed by: for k, v in F.items(): D[k] = v | ||||||
|  |         ''' | ||||||
|  |         if not args: | ||||||
|  |             raise TypeError("descriptor 'update' of 'MutableMapping' object " | ||||||
|  |                             "needs an argument") | ||||||
|  |         self, *args = args | ||||||
|  |         if len(args) > 1: | ||||||
|  |             raise TypeError('update expected at most 1 arguments, got %d' % | ||||||
|  |                             len(args)) | ||||||
|  |         if args: | ||||||
|  |             other = args[0] | ||||||
|  |             if isinstance(other, Mapping): | ||||||
|  |                 for key in other: | ||||||
|  |                     self[key] = other[key] | ||||||
|  |             elif hasattr(other, "keys"): | ||||||
|  |                 for key in other.keys(): | ||||||
|  |                     self[key] = other[key] | ||||||
|  |             else: | ||||||
|  |                 for key, value in other: | ||||||
|  |                     self[key] = value | ||||||
|  |         for key, value in kwds.items(): | ||||||
|  |             self[key] = value | ||||||
|  |  | ||||||
|  |     def setdefault(self, key, default=None): | ||||||
|  |         'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' | ||||||
|  |         try: | ||||||
|  |             return self[key] | ||||||
|  |         except KeyError: | ||||||
|  |             self[key] = default | ||||||
|  |         return default | ||||||
|  |  | ||||||
|  | MutableMapping.register(dict) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ### SEQUENCES ### | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Sequence(Sized, Iterable, Container): | ||||||
|  |  | ||||||
|  |     """All the operations on a read-only sequence. | ||||||
|  |  | ||||||
|  |     Concrete subclasses must override __new__ or __init__, | ||||||
|  |     __getitem__, and __len__. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __getitem__(self, index): | ||||||
|  |         raise IndexError | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         i = 0 | ||||||
|  |         try: | ||||||
|  |             while True: | ||||||
|  |                 v = self[i] | ||||||
|  |                 yield v | ||||||
|  |                 i += 1 | ||||||
|  |         except IndexError: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |     def __contains__(self, value): | ||||||
|  |         for v in self: | ||||||
|  |             if v == value: | ||||||
|  |                 return True | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def __reversed__(self): | ||||||
|  |         for i in reversed(range(len(self))): | ||||||
|  |             yield self[i] | ||||||
|  |  | ||||||
|  |     def index(self, value): | ||||||
|  |         '''S.index(value) -> integer -- return first index of value. | ||||||
|  |            Raises ValueError if the value is not present. | ||||||
|  |         ''' | ||||||
|  |         for i, v in enumerate(self): | ||||||
|  |             if v == value: | ||||||
|  |                 return i | ||||||
|  |         raise ValueError | ||||||
|  |  | ||||||
|  |     def count(self, value): | ||||||
|  |         'S.count(value) -> integer -- return number of occurrences of value' | ||||||
|  |         return sum(1 for v in self if v == value) | ||||||
|  |  | ||||||
|  | Sequence.register(tuple) | ||||||
|  | Sequence.register(str) | ||||||
|  | Sequence.register(range) | ||||||
|  | Sequence.register(memoryview) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ByteString(Sequence): | ||||||
|  |  | ||||||
|  |     """This unifies bytes and bytearray. | ||||||
|  |  | ||||||
|  |     XXX Should add all their methods. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  | ByteString.register(bytes) | ||||||
|  | ByteString.register(bytearray) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MutableSequence(Sequence): | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |  | ||||||
|  |     """All the operations on a read-write sequence. | ||||||
|  |  | ||||||
|  |     Concrete subclasses must provide __new__ or __init__, | ||||||
|  |     __getitem__, __setitem__, __delitem__, __len__, and insert(). | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __setitem__(self, index, value): | ||||||
|  |         raise IndexError | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def __delitem__(self, index): | ||||||
|  |         raise IndexError | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def insert(self, index, value): | ||||||
|  |         'S.insert(index, value) -- insert value before index' | ||||||
|  |         raise IndexError | ||||||
|  |  | ||||||
|  |     def append(self, value): | ||||||
|  |         'S.append(value) -- append value to the end of the sequence' | ||||||
|  |         self.insert(len(self), value) | ||||||
|  |  | ||||||
|  |     def clear(self): | ||||||
|  |         'S.clear() -> None -- remove all items from S' | ||||||
|  |         try: | ||||||
|  |             while True: | ||||||
|  |                 self.pop() | ||||||
|  |         except IndexError: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     def reverse(self): | ||||||
|  |         'S.reverse() -- reverse *IN PLACE*' | ||||||
|  |         n = len(self) | ||||||
|  |         for i in range(n//2): | ||||||
|  |             self[i], self[n-i-1] = self[n-i-1], self[i] | ||||||
|  |  | ||||||
|  |     def extend(self, values): | ||||||
|  |         'S.extend(iterable) -- extend sequence by appending elements from the iterable' | ||||||
|  |         for v in values: | ||||||
|  |             self.append(v) | ||||||
|  |  | ||||||
|  |     def pop(self, index=-1): | ||||||
|  |         '''S.pop([index]) -> item -- remove and return item at index (default last). | ||||||
|  |            Raise IndexError if list is empty or index is out of range. | ||||||
|  |         ''' | ||||||
|  |         v = self[index] | ||||||
|  |         del self[index] | ||||||
|  |         return v | ||||||
|  |  | ||||||
|  |     def remove(self, value): | ||||||
|  |         '''S.remove(value) -- remove first occurrence of value. | ||||||
|  |            Raise ValueError if the value is not present. | ||||||
|  |         ''' | ||||||
|  |         del self[self.index(value)] | ||||||
|  |  | ||||||
|  |     def __iadd__(self, values): | ||||||
|  |         self.extend(values) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  | MutableSequence.register(list) | ||||||
|  | MutableSequence.register(bytearray)  # Multiply inheriting, see ByteString | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_dummy_thread.py |  | ||||||
							
								
								
									
										155
									
								
								v1/flask/lib/python3.4/_dummy_thread.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										155
									
								
								v1/flask/lib/python3.4/_dummy_thread.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,155 @@ | |||||||
|  | """Drop-in replacement for the thread module. | ||||||
|  |  | ||||||
|  | Meant to be used as a brain-dead substitute so that threaded code does | ||||||
|  | not need to be rewritten for when the thread module is not present. | ||||||
|  |  | ||||||
|  | Suggested usage is:: | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         import _thread | ||||||
|  |     except ImportError: | ||||||
|  |         import _dummy_thread as _thread | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | # Exports only things specified by thread documentation; | ||||||
|  | # skipping obsolete synonyms allocate(), start_new(), exit_thread(). | ||||||
|  | __all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', | ||||||
|  |            'interrupt_main', 'LockType'] | ||||||
|  |  | ||||||
|  | # A dummy value | ||||||
|  | TIMEOUT_MAX = 2**31 | ||||||
|  |  | ||||||
|  | # NOTE: this module can be imported early in the extension building process, | ||||||
|  | # and so top level imports of other modules should be avoided.  Instead, all | ||||||
|  | # imports are done when needed on a function-by-function basis.  Since threads | ||||||
|  | # are disabled, the import lock should not be an issue anyway (??). | ||||||
|  |  | ||||||
|  | error = RuntimeError | ||||||
|  |  | ||||||
|  | def start_new_thread(function, args, kwargs={}): | ||||||
|  |     """Dummy implementation of _thread.start_new_thread(). | ||||||
|  |  | ||||||
|  |     Compatibility is maintained by making sure that ``args`` is a | ||||||
|  |     tuple and ``kwargs`` is a dictionary.  If an exception is raised | ||||||
|  |     and it is SystemExit (which can be done by _thread.exit()) it is | ||||||
|  |     caught and nothing is done; all other exceptions are printed out | ||||||
|  |     by using traceback.print_exc(). | ||||||
|  |  | ||||||
|  |     If the executed function calls interrupt_main the KeyboardInterrupt will be | ||||||
|  |     raised when the function returns. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     if type(args) != type(tuple()): | ||||||
|  |         raise TypeError("2nd arg must be a tuple") | ||||||
|  |     if type(kwargs) != type(dict()): | ||||||
|  |         raise TypeError("3rd arg must be a dict") | ||||||
|  |     global _main | ||||||
|  |     _main = False | ||||||
|  |     try: | ||||||
|  |         function(*args, **kwargs) | ||||||
|  |     except SystemExit: | ||||||
|  |         pass | ||||||
|  |     except: | ||||||
|  |         import traceback | ||||||
|  |         traceback.print_exc() | ||||||
|  |     _main = True | ||||||
|  |     global _interrupt | ||||||
|  |     if _interrupt: | ||||||
|  |         _interrupt = False | ||||||
|  |         raise KeyboardInterrupt | ||||||
|  |  | ||||||
|  | def exit(): | ||||||
|  |     """Dummy implementation of _thread.exit().""" | ||||||
|  |     raise SystemExit | ||||||
|  |  | ||||||
|  | def get_ident(): | ||||||
|  |     """Dummy implementation of _thread.get_ident(). | ||||||
|  |  | ||||||
|  |     Since this module should only be used when _threadmodule is not | ||||||
|  |     available, it is safe to assume that the current process is the | ||||||
|  |     only thread.  Thus a constant can be safely returned. | ||||||
|  |     """ | ||||||
|  |     return -1 | ||||||
|  |  | ||||||
|  | def allocate_lock(): | ||||||
|  |     """Dummy implementation of _thread.allocate_lock().""" | ||||||
|  |     return LockType() | ||||||
|  |  | ||||||
|  | def stack_size(size=None): | ||||||
|  |     """Dummy implementation of _thread.stack_size().""" | ||||||
|  |     if size is not None: | ||||||
|  |         raise error("setting thread stack size not supported") | ||||||
|  |     return 0 | ||||||
|  |  | ||||||
|  | def _set_sentinel(): | ||||||
|  |     """Dummy implementation of _thread._set_sentinel().""" | ||||||
|  |     return LockType() | ||||||
|  |  | ||||||
|  | class LockType(object): | ||||||
|  |     """Class implementing dummy implementation of _thread.LockType. | ||||||
|  |  | ||||||
|  |     Compatibility is maintained by maintaining self.locked_status | ||||||
|  |     which is a boolean that stores the state of the lock.  Pickling of | ||||||
|  |     the lock, though, should not be done since if the _thread module is | ||||||
|  |     then used with an unpickled ``lock()`` from here problems could | ||||||
|  |     occur from this class not having atomic methods. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         self.locked_status = False | ||||||
|  |  | ||||||
|  |     def acquire(self, waitflag=None, timeout=-1): | ||||||
|  |         """Dummy implementation of acquire(). | ||||||
|  |  | ||||||
|  |         For blocking calls, self.locked_status is automatically set to | ||||||
|  |         True and returned appropriately based on value of | ||||||
|  |         ``waitflag``.  If it is non-blocking, then the value is | ||||||
|  |         actually checked and not set if it is already acquired.  This | ||||||
|  |         is all done so that threading.Condition's assert statements | ||||||
|  |         aren't triggered and throw a little fit. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         if waitflag is None or waitflag: | ||||||
|  |             self.locked_status = True | ||||||
|  |             return True | ||||||
|  |         else: | ||||||
|  |             if not self.locked_status: | ||||||
|  |                 self.locked_status = True | ||||||
|  |                 return True | ||||||
|  |             else: | ||||||
|  |                 if timeout > 0: | ||||||
|  |                     import time | ||||||
|  |                     time.sleep(timeout) | ||||||
|  |                 return False | ||||||
|  |  | ||||||
|  |     __enter__ = acquire | ||||||
|  |  | ||||||
|  |     def __exit__(self, typ, val, tb): | ||||||
|  |         self.release() | ||||||
|  |  | ||||||
|  |     def release(self): | ||||||
|  |         """Release the dummy lock.""" | ||||||
|  |         # XXX Perhaps shouldn't actually bother to test?  Could lead | ||||||
|  |         #     to problems for complex, threaded code. | ||||||
|  |         if not self.locked_status: | ||||||
|  |             raise error | ||||||
|  |         self.locked_status = False | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def locked(self): | ||||||
|  |         return self.locked_status | ||||||
|  |  | ||||||
|  | # Used to signal that interrupt_main was called in a "thread" | ||||||
|  | _interrupt = False | ||||||
|  | # True when not executing in a "thread" | ||||||
|  | _main = True | ||||||
|  |  | ||||||
|  | def interrupt_main(): | ||||||
|  |     """Set _interrupt flag to True to have start_new_thread raise | ||||||
|  |     KeyboardInterrupt upon exiting.""" | ||||||
|  |     if _main: | ||||||
|  |         raise KeyboardInterrupt | ||||||
|  |     else: | ||||||
|  |         global _interrupt | ||||||
|  |         _interrupt = True | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/_weakrefset.py |  | ||||||
							
								
								
									
										196
									
								
								v1/flask/lib/python3.4/_weakrefset.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										196
									
								
								v1/flask/lib/python3.4/_weakrefset.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,196 @@ | |||||||
|  | # Access WeakSet through the weakref module. | ||||||
|  | # This code is separated-out because it is needed | ||||||
|  | # by abc.py to load everything else at startup. | ||||||
|  |  | ||||||
|  | from _weakref import ref | ||||||
|  |  | ||||||
|  | __all__ = ['WeakSet'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class _IterationGuard: | ||||||
|  |     # This context manager registers itself in the current iterators of the | ||||||
|  |     # weak container, such as to delay all removals until the context manager | ||||||
|  |     # exits. | ||||||
|  |     # This technique should be relatively thread-safe (since sets are). | ||||||
|  |  | ||||||
|  |     def __init__(self, weakcontainer): | ||||||
|  |         # Don't create cycles | ||||||
|  |         self.weakcontainer = ref(weakcontainer) | ||||||
|  |  | ||||||
|  |     def __enter__(self): | ||||||
|  |         w = self.weakcontainer() | ||||||
|  |         if w is not None: | ||||||
|  |             w._iterating.add(self) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __exit__(self, e, t, b): | ||||||
|  |         w = self.weakcontainer() | ||||||
|  |         if w is not None: | ||||||
|  |             s = w._iterating | ||||||
|  |             s.remove(self) | ||||||
|  |             if not s: | ||||||
|  |                 w._commit_removals() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WeakSet: | ||||||
|  |     def __init__(self, data=None): | ||||||
|  |         self.data = set() | ||||||
|  |         def _remove(item, selfref=ref(self)): | ||||||
|  |             self = selfref() | ||||||
|  |             if self is not None: | ||||||
|  |                 if self._iterating: | ||||||
|  |                     self._pending_removals.append(item) | ||||||
|  |                 else: | ||||||
|  |                     self.data.discard(item) | ||||||
|  |         self._remove = _remove | ||||||
|  |         # A list of keys to be removed | ||||||
|  |         self._pending_removals = [] | ||||||
|  |         self._iterating = set() | ||||||
|  |         if data is not None: | ||||||
|  |             self.update(data) | ||||||
|  |  | ||||||
|  |     def _commit_removals(self): | ||||||
|  |         l = self._pending_removals | ||||||
|  |         discard = self.data.discard | ||||||
|  |         while l: | ||||||
|  |             discard(l.pop()) | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for itemref in self.data: | ||||||
|  |                 item = itemref() | ||||||
|  |                 if item is not None: | ||||||
|  |                     # Caveat: the iterator will keep a strong reference to | ||||||
|  |                     # `item` until it is resumed or closed. | ||||||
|  |                     yield item | ||||||
|  |  | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self.data) - len(self._pending_removals) | ||||||
|  |  | ||||||
|  |     def __contains__(self, item): | ||||||
|  |         try: | ||||||
|  |             wr = ref(item) | ||||||
|  |         except TypeError: | ||||||
|  |             return False | ||||||
|  |         return wr in self.data | ||||||
|  |  | ||||||
|  |     def __reduce__(self): | ||||||
|  |         return (self.__class__, (list(self),), | ||||||
|  |                 getattr(self, '__dict__', None)) | ||||||
|  |  | ||||||
|  |     def add(self, item): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         self.data.add(ref(item, self._remove)) | ||||||
|  |  | ||||||
|  |     def clear(self): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         self.data.clear() | ||||||
|  |  | ||||||
|  |     def copy(self): | ||||||
|  |         return self.__class__(self) | ||||||
|  |  | ||||||
|  |     def pop(self): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         while True: | ||||||
|  |             try: | ||||||
|  |                 itemref = self.data.pop() | ||||||
|  |             except KeyError: | ||||||
|  |                 raise KeyError('pop from empty WeakSet') | ||||||
|  |             item = itemref() | ||||||
|  |             if item is not None: | ||||||
|  |                 return item | ||||||
|  |  | ||||||
|  |     def remove(self, item): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         self.data.remove(ref(item)) | ||||||
|  |  | ||||||
|  |     def discard(self, item): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         self.data.discard(ref(item)) | ||||||
|  |  | ||||||
|  |     def update(self, other): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         for element in other: | ||||||
|  |             self.add(element) | ||||||
|  |  | ||||||
|  |     def __ior__(self, other): | ||||||
|  |         self.update(other) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def difference(self, other): | ||||||
|  |         newset = self.copy() | ||||||
|  |         newset.difference_update(other) | ||||||
|  |         return newset | ||||||
|  |     __sub__ = difference | ||||||
|  |  | ||||||
|  |     def difference_update(self, other): | ||||||
|  |         self.__isub__(other) | ||||||
|  |     def __isub__(self, other): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         if self is other: | ||||||
|  |             self.data.clear() | ||||||
|  |         else: | ||||||
|  |             self.data.difference_update(ref(item) for item in other) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def intersection(self, other): | ||||||
|  |         return self.__class__(item for item in other if item in self) | ||||||
|  |     __and__ = intersection | ||||||
|  |  | ||||||
|  |     def intersection_update(self, other): | ||||||
|  |         self.__iand__(other) | ||||||
|  |     def __iand__(self, other): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         self.data.intersection_update(ref(item) for item in other) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def issubset(self, other): | ||||||
|  |         return self.data.issubset(ref(item) for item in other) | ||||||
|  |     __le__ = issubset | ||||||
|  |  | ||||||
|  |     def __lt__(self, other): | ||||||
|  |         return self.data < set(ref(item) for item in other) | ||||||
|  |  | ||||||
|  |     def issuperset(self, other): | ||||||
|  |         return self.data.issuperset(ref(item) for item in other) | ||||||
|  |     __ge__ = issuperset | ||||||
|  |  | ||||||
|  |     def __gt__(self, other): | ||||||
|  |         return self.data > set(ref(item) for item in other) | ||||||
|  |  | ||||||
|  |     def __eq__(self, other): | ||||||
|  |         if not isinstance(other, self.__class__): | ||||||
|  |             return NotImplemented | ||||||
|  |         return self.data == set(ref(item) for item in other) | ||||||
|  |  | ||||||
|  |     def symmetric_difference(self, other): | ||||||
|  |         newset = self.copy() | ||||||
|  |         newset.symmetric_difference_update(other) | ||||||
|  |         return newset | ||||||
|  |     __xor__ = symmetric_difference | ||||||
|  |  | ||||||
|  |     def symmetric_difference_update(self, other): | ||||||
|  |         self.__ixor__(other) | ||||||
|  |     def __ixor__(self, other): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         if self is other: | ||||||
|  |             self.data.clear() | ||||||
|  |         else: | ||||||
|  |             self.data.symmetric_difference_update(ref(item, self._remove) for item in other) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def union(self, other): | ||||||
|  |         return self.__class__(e for s in (self, other) for e in s) | ||||||
|  |     __or__ = union | ||||||
|  |  | ||||||
|  |     def isdisjoint(self, other): | ||||||
|  |         return len(self.intersection(other)) == 0 | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/abc.py |  | ||||||
							
								
								
									
										248
									
								
								v1/flask/lib/python3.4/abc.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										248
									
								
								v1/flask/lib/python3.4/abc.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,248 @@ | |||||||
|  | # Copyright 2007 Google, Inc. All Rights Reserved. | ||||||
|  | # Licensed to PSF under a Contributor Agreement. | ||||||
|  |  | ||||||
|  | """Abstract Base Classes (ABCs) according to PEP 3119.""" | ||||||
|  |  | ||||||
|  | from _weakrefset import WeakSet | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def abstractmethod(funcobj): | ||||||
|  |     """A decorator indicating abstract methods. | ||||||
|  |  | ||||||
|  |     Requires that the metaclass is ABCMeta or derived from it.  A | ||||||
|  |     class that has a metaclass derived from ABCMeta cannot be | ||||||
|  |     instantiated unless all of its abstract methods are overridden. | ||||||
|  |     The abstract methods can be called using any of the normal | ||||||
|  |     'super' call mechanisms. | ||||||
|  |  | ||||||
|  |     Usage: | ||||||
|  |  | ||||||
|  |         class C(metaclass=ABCMeta): | ||||||
|  |             @abstractmethod | ||||||
|  |             def my_abstract_method(self, ...): | ||||||
|  |                 ... | ||||||
|  |     """ | ||||||
|  |     funcobj.__isabstractmethod__ = True | ||||||
|  |     return funcobj | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class abstractclassmethod(classmethod): | ||||||
|  |     """ | ||||||
|  |     A decorator indicating abstract classmethods. | ||||||
|  |  | ||||||
|  |     Similar to abstractmethod. | ||||||
|  |  | ||||||
|  |     Usage: | ||||||
|  |  | ||||||
|  |         class C(metaclass=ABCMeta): | ||||||
|  |             @abstractclassmethod | ||||||
|  |             def my_abstract_classmethod(cls, ...): | ||||||
|  |                 ... | ||||||
|  |  | ||||||
|  |     'abstractclassmethod' is deprecated. Use 'classmethod' with | ||||||
|  |     'abstractmethod' instead. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __isabstractmethod__ = True | ||||||
|  |  | ||||||
|  |     def __init__(self, callable): | ||||||
|  |         callable.__isabstractmethod__ = True | ||||||
|  |         super().__init__(callable) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class abstractstaticmethod(staticmethod): | ||||||
|  |     """ | ||||||
|  |     A decorator indicating abstract staticmethods. | ||||||
|  |  | ||||||
|  |     Similar to abstractmethod. | ||||||
|  |  | ||||||
|  |     Usage: | ||||||
|  |  | ||||||
|  |         class C(metaclass=ABCMeta): | ||||||
|  |             @abstractstaticmethod | ||||||
|  |             def my_abstract_staticmethod(...): | ||||||
|  |                 ... | ||||||
|  |  | ||||||
|  |     'abstractstaticmethod' is deprecated. Use 'staticmethod' with | ||||||
|  |     'abstractmethod' instead. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __isabstractmethod__ = True | ||||||
|  |  | ||||||
|  |     def __init__(self, callable): | ||||||
|  |         callable.__isabstractmethod__ = True | ||||||
|  |         super().__init__(callable) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class abstractproperty(property): | ||||||
|  |     """ | ||||||
|  |     A decorator indicating abstract properties. | ||||||
|  |  | ||||||
|  |     Requires that the metaclass is ABCMeta or derived from it.  A | ||||||
|  |     class that has a metaclass derived from ABCMeta cannot be | ||||||
|  |     instantiated unless all of its abstract properties are overridden. | ||||||
|  |     The abstract properties can be called using any of the normal | ||||||
|  |     'super' call mechanisms. | ||||||
|  |  | ||||||
|  |     Usage: | ||||||
|  |  | ||||||
|  |         class C(metaclass=ABCMeta): | ||||||
|  |             @abstractproperty | ||||||
|  |             def my_abstract_property(self): | ||||||
|  |                 ... | ||||||
|  |  | ||||||
|  |     This defines a read-only property; you can also define a read-write | ||||||
|  |     abstract property using the 'long' form of property declaration: | ||||||
|  |  | ||||||
|  |         class C(metaclass=ABCMeta): | ||||||
|  |             def getx(self): ... | ||||||
|  |             def setx(self, value): ... | ||||||
|  |             x = abstractproperty(getx, setx) | ||||||
|  |  | ||||||
|  |     'abstractproperty' is deprecated. Use 'property' with 'abstractmethod' | ||||||
|  |     instead. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __isabstractmethod__ = True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ABCMeta(type): | ||||||
|  |  | ||||||
|  |     """Metaclass for defining Abstract Base Classes (ABCs). | ||||||
|  |  | ||||||
|  |     Use this metaclass to create an ABC.  An ABC can be subclassed | ||||||
|  |     directly, and then acts as a mix-in class.  You can also register | ||||||
|  |     unrelated concrete classes (even built-in classes) and unrelated | ||||||
|  |     ABCs as 'virtual subclasses' -- these and their descendants will | ||||||
|  |     be considered subclasses of the registering ABC by the built-in | ||||||
|  |     issubclass() function, but the registering ABC won't show up in | ||||||
|  |     their MRO (Method Resolution Order) nor will method | ||||||
|  |     implementations defined by the registering ABC be callable (not | ||||||
|  |     even via super()). | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     # A global counter that is incremented each time a class is | ||||||
|  |     # registered as a virtual subclass of anything.  It forces the | ||||||
|  |     # negative cache to be cleared before its next use. | ||||||
|  |     # Note: this counter is private. Use `abc.get_cache_token()` for | ||||||
|  |     #       external code. | ||||||
|  |     _abc_invalidation_counter = 0 | ||||||
|  |  | ||||||
|  |     def __new__(mcls, name, bases, namespace): | ||||||
|  |         cls = super().__new__(mcls, name, bases, namespace) | ||||||
|  |         # Compute set of abstract method names | ||||||
|  |         abstracts = {name | ||||||
|  |                      for name, value in namespace.items() | ||||||
|  |                      if getattr(value, "__isabstractmethod__", False)} | ||||||
|  |         for base in bases: | ||||||
|  |             for name in getattr(base, "__abstractmethods__", set()): | ||||||
|  |                 value = getattr(cls, name, None) | ||||||
|  |                 if getattr(value, "__isabstractmethod__", False): | ||||||
|  |                     abstracts.add(name) | ||||||
|  |         cls.__abstractmethods__ = frozenset(abstracts) | ||||||
|  |         # Set up inheritance registry | ||||||
|  |         cls._abc_registry = WeakSet() | ||||||
|  |         cls._abc_cache = WeakSet() | ||||||
|  |         cls._abc_negative_cache = WeakSet() | ||||||
|  |         cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter | ||||||
|  |         return cls | ||||||
|  |  | ||||||
|  |     def register(cls, subclass): | ||||||
|  |         """Register a virtual subclass of an ABC. | ||||||
|  |  | ||||||
|  |         Returns the subclass, to allow usage as a class decorator. | ||||||
|  |         """ | ||||||
|  |         if not isinstance(subclass, type): | ||||||
|  |             raise TypeError("Can only register classes") | ||||||
|  |         if issubclass(subclass, cls): | ||||||
|  |             return subclass  # Already a subclass | ||||||
|  |         # Subtle: test for cycles *after* testing for "already a subclass"; | ||||||
|  |         # this means we allow X.register(X) and interpret it as a no-op. | ||||||
|  |         if issubclass(cls, subclass): | ||||||
|  |             # This would create a cycle, which is bad for the algorithm below | ||||||
|  |             raise RuntimeError("Refusing to create an inheritance cycle") | ||||||
|  |         cls._abc_registry.add(subclass) | ||||||
|  |         ABCMeta._abc_invalidation_counter += 1  # Invalidate negative cache | ||||||
|  |         return subclass | ||||||
|  |  | ||||||
|  |     def _dump_registry(cls, file=None): | ||||||
|  |         """Debug helper to print the ABC registry.""" | ||||||
|  |         print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file) | ||||||
|  |         print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file) | ||||||
|  |         for name in sorted(cls.__dict__.keys()): | ||||||
|  |             if name.startswith("_abc_"): | ||||||
|  |                 value = getattr(cls, name) | ||||||
|  |                 print("%s: %r" % (name, value), file=file) | ||||||
|  |  | ||||||
|  |     def __instancecheck__(cls, instance): | ||||||
|  |         """Override for isinstance(instance, cls).""" | ||||||
|  |         # Inline the cache checking | ||||||
|  |         subclass = instance.__class__ | ||||||
|  |         if subclass in cls._abc_cache: | ||||||
|  |             return True | ||||||
|  |         subtype = type(instance) | ||||||
|  |         if subtype is subclass: | ||||||
|  |             if (cls._abc_negative_cache_version == | ||||||
|  |                 ABCMeta._abc_invalidation_counter and | ||||||
|  |                 subclass in cls._abc_negative_cache): | ||||||
|  |                 return False | ||||||
|  |             # Fall back to the subclass check. | ||||||
|  |             return cls.__subclasscheck__(subclass) | ||||||
|  |         return any(cls.__subclasscheck__(c) for c in {subclass, subtype}) | ||||||
|  |  | ||||||
|  |     def __subclasscheck__(cls, subclass): | ||||||
|  |         """Override for issubclass(subclass, cls).""" | ||||||
|  |         # Check cache | ||||||
|  |         if subclass in cls._abc_cache: | ||||||
|  |             return True | ||||||
|  |         # Check negative cache; may have to invalidate | ||||||
|  |         if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: | ||||||
|  |             # Invalidate the negative cache | ||||||
|  |             cls._abc_negative_cache = WeakSet() | ||||||
|  |             cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter | ||||||
|  |         elif subclass in cls._abc_negative_cache: | ||||||
|  |             return False | ||||||
|  |         # Check the subclass hook | ||||||
|  |         ok = cls.__subclasshook__(subclass) | ||||||
|  |         if ok is not NotImplemented: | ||||||
|  |             assert isinstance(ok, bool) | ||||||
|  |             if ok: | ||||||
|  |                 cls._abc_cache.add(subclass) | ||||||
|  |             else: | ||||||
|  |                 cls._abc_negative_cache.add(subclass) | ||||||
|  |             return ok | ||||||
|  |         # Check if it's a direct subclass | ||||||
|  |         if cls in getattr(subclass, '__mro__', ()): | ||||||
|  |             cls._abc_cache.add(subclass) | ||||||
|  |             return True | ||||||
|  |         # Check if it's a subclass of a registered class (recursive) | ||||||
|  |         for rcls in cls._abc_registry: | ||||||
|  |             if issubclass(subclass, rcls): | ||||||
|  |                 cls._abc_cache.add(subclass) | ||||||
|  |                 return True | ||||||
|  |         # Check if it's a subclass of a subclass (recursive) | ||||||
|  |         for scls in cls.__subclasses__(): | ||||||
|  |             if issubclass(subclass, scls): | ||||||
|  |                 cls._abc_cache.add(subclass) | ||||||
|  |                 return True | ||||||
|  |         # No dice; update negative cache | ||||||
|  |         cls._abc_negative_cache.add(subclass) | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ABC(metaclass=ABCMeta): | ||||||
|  |     """Helper class that provides a standard way to create an ABC using | ||||||
|  |     inheritance. | ||||||
|  |     """ | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_cache_token(): | ||||||
|  |     """Returns the current ABC cache token. | ||||||
|  |  | ||||||
|  |     The token is an opaque object (supporting equality testing) identifying the | ||||||
|  |     current version of the ABC cache for virtual subclasses. The token changes | ||||||
|  |     with every call to ``register()`` on any ABC. | ||||||
|  |     """ | ||||||
|  |     return ABCMeta._abc_invalidation_counter | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/base64.py |  | ||||||
							
								
								
									
										602
									
								
								v1/flask/lib/python3.4/base64.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										602
									
								
								v1/flask/lib/python3.4/base64.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,602 @@ | |||||||
|  | #! /usr/bin/env python3 | ||||||
|  |  | ||||||
|  | """Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings""" | ||||||
|  |  | ||||||
|  | # Modified 04-Oct-1995 by Jack Jansen to use binascii module | ||||||
|  | # Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support | ||||||
|  | # Modified 22-May-2007 by Guido van Rossum to use bytes everywhere | ||||||
|  |  | ||||||
|  | import re | ||||||
|  | import struct | ||||||
|  | import binascii | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __all__ = [ | ||||||
|  |     # Legacy interface exports traditional RFC 1521 Base64 encodings | ||||||
|  |     'encode', 'decode', 'encodebytes', 'decodebytes', | ||||||
|  |     # Generalized interface for other encodings | ||||||
|  |     'b64encode', 'b64decode', 'b32encode', 'b32decode', | ||||||
|  |     'b16encode', 'b16decode', | ||||||
|  |     # Base85 and Ascii85 encodings | ||||||
|  |     'b85encode', 'b85decode', 'a85encode', 'a85decode', | ||||||
|  |     # Standard Base64 encoding | ||||||
|  |     'standard_b64encode', 'standard_b64decode', | ||||||
|  |     # Some common Base64 alternatives.  As referenced by RFC 3458, see thread | ||||||
|  |     # starting at: | ||||||
|  |     # | ||||||
|  |     # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html | ||||||
|  |     'urlsafe_b64encode', 'urlsafe_b64decode', | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | bytes_types = (bytes, bytearray)  # Types acceptable as binary data | ||||||
|  |  | ||||||
|  | def _bytes_from_decode_data(s): | ||||||
|  |     if isinstance(s, str): | ||||||
|  |         try: | ||||||
|  |             return s.encode('ascii') | ||||||
|  |         except UnicodeEncodeError: | ||||||
|  |             raise ValueError('string argument should contain only ASCII characters') | ||||||
|  |     if isinstance(s, bytes_types): | ||||||
|  |         return s | ||||||
|  |     try: | ||||||
|  |         return memoryview(s).tobytes() | ||||||
|  |     except TypeError: | ||||||
|  |         raise TypeError("argument should be a bytes-like object or ASCII " | ||||||
|  |                         "string, not %r" % s.__class__.__name__) from None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Base64 encoding/decoding uses binascii | ||||||
|  |  | ||||||
|  | def b64encode(s, altchars=None): | ||||||
|  |     """Encode a byte string using Base64. | ||||||
|  |  | ||||||
|  |     s is the byte string to encode.  Optional altchars must be a byte | ||||||
|  |     string of length 2 which specifies an alternative alphabet for the | ||||||
|  |     '+' and '/' characters.  This allows an application to | ||||||
|  |     e.g. generate url or filesystem safe Base64 strings. | ||||||
|  |  | ||||||
|  |     The encoded byte string is returned. | ||||||
|  |     """ | ||||||
|  |     # Strip off the trailing newline | ||||||
|  |     encoded = binascii.b2a_base64(s)[:-1] | ||||||
|  |     if altchars is not None: | ||||||
|  |         assert len(altchars) == 2, repr(altchars) | ||||||
|  |         return encoded.translate(bytes.maketrans(b'+/', altchars)) | ||||||
|  |     return encoded | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def b64decode(s, altchars=None, validate=False): | ||||||
|  |     """Decode a Base64 encoded byte string. | ||||||
|  |  | ||||||
|  |     s is the byte string to decode.  Optional altchars must be a | ||||||
|  |     string of length 2 which specifies the alternative alphabet used | ||||||
|  |     instead of the '+' and '/' characters. | ||||||
|  |  | ||||||
|  |     The decoded string is returned.  A binascii.Error is raised if s is | ||||||
|  |     incorrectly padded. | ||||||
|  |  | ||||||
|  |     If validate is False (the default), non-base64-alphabet characters are | ||||||
|  |     discarded prior to the padding check.  If validate is True, | ||||||
|  |     non-base64-alphabet characters in the input result in a binascii.Error. | ||||||
|  |     """ | ||||||
|  |     s = _bytes_from_decode_data(s) | ||||||
|  |     if altchars is not None: | ||||||
|  |         altchars = _bytes_from_decode_data(altchars) | ||||||
|  |         assert len(altchars) == 2, repr(altchars) | ||||||
|  |         s = s.translate(bytes.maketrans(altchars, b'+/')) | ||||||
|  |     if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s): | ||||||
|  |         raise binascii.Error('Non-base64 digit found') | ||||||
|  |     return binascii.a2b_base64(s) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def standard_b64encode(s): | ||||||
|  |     """Encode a byte string using the standard Base64 alphabet. | ||||||
|  |  | ||||||
|  |     s is the byte string to encode.  The encoded byte string is returned. | ||||||
|  |     """ | ||||||
|  |     return b64encode(s) | ||||||
|  |  | ||||||
|  | def standard_b64decode(s): | ||||||
|  |     """Decode a byte string encoded with the standard Base64 alphabet. | ||||||
|  |  | ||||||
|  |     s is the byte string to decode.  The decoded byte string is | ||||||
|  |     returned.  binascii.Error is raised if the input is incorrectly | ||||||
|  |     padded or if there are non-alphabet characters present in the | ||||||
|  |     input. | ||||||
|  |     """ | ||||||
|  |     return b64decode(s) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') | ||||||
|  | _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') | ||||||
|  |  | ||||||
|  | def urlsafe_b64encode(s): | ||||||
|  |     """Encode a byte string using a url-safe Base64 alphabet. | ||||||
|  |  | ||||||
|  |     s is the byte string to encode.  The encoded byte string is | ||||||
|  |     returned.  The alphabet uses '-' instead of '+' and '_' instead of | ||||||
|  |     '/'. | ||||||
|  |     """ | ||||||
|  |     return b64encode(s).translate(_urlsafe_encode_translation) | ||||||
|  |  | ||||||
|  | def urlsafe_b64decode(s): | ||||||
|  |     """Decode a byte string encoded with the standard Base64 alphabet. | ||||||
|  |  | ||||||
|  |     s is the byte string to decode.  The decoded byte string is | ||||||
|  |     returned.  binascii.Error is raised if the input is incorrectly | ||||||
|  |     padded or if there are non-alphabet characters present in the | ||||||
|  |     input. | ||||||
|  |  | ||||||
|  |     The alphabet uses '-' instead of '+' and '_' instead of '/'. | ||||||
|  |     """ | ||||||
|  |     s = _bytes_from_decode_data(s) | ||||||
|  |     s = s.translate(_urlsafe_decode_translation) | ||||||
|  |     return b64decode(s) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Base32 encoding/decoding must be done in Python | ||||||
|  | _b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567' | ||||||
|  | _b32tab2 = None | ||||||
|  | _b32rev = None | ||||||
|  |  | ||||||
|  | def b32encode(s): | ||||||
|  |     """Encode a byte string using Base32. | ||||||
|  |  | ||||||
|  |     s is the byte string to encode.  The encoded byte string is returned. | ||||||
|  |     """ | ||||||
|  |     global _b32tab2 | ||||||
|  |     # Delay the initialization of the table to not waste memory | ||||||
|  |     # if the function is never called | ||||||
|  |     if _b32tab2 is None: | ||||||
|  |         b32tab = [bytes((i,)) for i in _b32alphabet] | ||||||
|  |         _b32tab2 = [a + b for a in b32tab for b in b32tab] | ||||||
|  |         b32tab = None | ||||||
|  |  | ||||||
|  |     if not isinstance(s, bytes_types): | ||||||
|  |         s = memoryview(s).tobytes() | ||||||
|  |     leftover = len(s) % 5 | ||||||
|  |     # Pad the last quantum with zero bits if necessary | ||||||
|  |     if leftover: | ||||||
|  |         s = s + bytes(5 - leftover)  # Don't use += ! | ||||||
|  |     encoded = bytearray() | ||||||
|  |     from_bytes = int.from_bytes | ||||||
|  |     b32tab2 = _b32tab2 | ||||||
|  |     for i in range(0, len(s), 5): | ||||||
|  |         c = from_bytes(s[i: i + 5], 'big') | ||||||
|  |         encoded += (b32tab2[c >> 30] +           # bits 1 - 10 | ||||||
|  |                     b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20 | ||||||
|  |                     b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30 | ||||||
|  |                     b32tab2[c & 0x3ff]           # bits 31 - 40 | ||||||
|  |                    ) | ||||||
|  |     # Adjust for any leftover partial quanta | ||||||
|  |     if leftover == 1: | ||||||
|  |         encoded[-6:] = b'======' | ||||||
|  |     elif leftover == 2: | ||||||
|  |         encoded[-4:] = b'====' | ||||||
|  |     elif leftover == 3: | ||||||
|  |         encoded[-3:] = b'===' | ||||||
|  |     elif leftover == 4: | ||||||
|  |         encoded[-1:] = b'=' | ||||||
|  |     return bytes(encoded) | ||||||
|  |  | ||||||
|  | def b32decode(s, casefold=False, map01=None): | ||||||
|  |     """Decode a Base32 encoded byte string. | ||||||
|  |  | ||||||
|  |     s is the byte string to decode.  Optional casefold is a flag | ||||||
|  |     specifying whether a lowercase alphabet is acceptable as input. | ||||||
|  |     For security purposes, the default is False. | ||||||
|  |  | ||||||
|  |     RFC 3548 allows for optional mapping of the digit 0 (zero) to the | ||||||
|  |     letter O (oh), and for optional mapping of the digit 1 (one) to | ||||||
|  |     either the letter I (eye) or letter L (el).  The optional argument | ||||||
|  |     map01 when not None, specifies which letter the digit 1 should be | ||||||
|  |     mapped to (when map01 is not None, the digit 0 is always mapped to | ||||||
|  |     the letter O).  For security purposes the default is None, so that | ||||||
|  |     0 and 1 are not allowed in the input. | ||||||
|  |  | ||||||
|  |     The decoded byte string is returned.  binascii.Error is raised if | ||||||
|  |     the input is incorrectly padded or if there are non-alphabet | ||||||
|  |     characters present in the input. | ||||||
|  |     """ | ||||||
|  |     global _b32rev | ||||||
|  |     # Delay the initialization of the table to not waste memory | ||||||
|  |     # if the function is never called | ||||||
|  |     if _b32rev is None: | ||||||
|  |         _b32rev = {v: k for k, v in enumerate(_b32alphabet)} | ||||||
|  |     s = _bytes_from_decode_data(s) | ||||||
|  |     if len(s) % 8: | ||||||
|  |         raise binascii.Error('Incorrect padding') | ||||||
|  |     # Handle section 2.4 zero and one mapping.  The flag map01 will be either | ||||||
|  |     # False, or the character to map the digit 1 (one) to.  It should be | ||||||
|  |     # either L (el) or I (eye). | ||||||
|  |     if map01 is not None: | ||||||
|  |         map01 = _bytes_from_decode_data(map01) | ||||||
|  |         assert len(map01) == 1, repr(map01) | ||||||
|  |         s = s.translate(bytes.maketrans(b'01', b'O' + map01)) | ||||||
|  |     if casefold: | ||||||
|  |         s = s.upper() | ||||||
|  |     # Strip off pad characters from the right.  We need to count the pad | ||||||
|  |     # characters because this will tell us how many null bytes to remove from | ||||||
|  |     # the end of the decoded string. | ||||||
|  |     l = len(s) | ||||||
|  |     s = s.rstrip(b'=') | ||||||
|  |     padchars = l - len(s) | ||||||
|  |     # Now decode the full quanta | ||||||
|  |     decoded = bytearray() | ||||||
|  |     b32rev = _b32rev | ||||||
|  |     for i in range(0, len(s), 8): | ||||||
|  |         quanta = s[i: i + 8] | ||||||
|  |         acc = 0 | ||||||
|  |         try: | ||||||
|  |             for c in quanta: | ||||||
|  |                 acc = (acc << 5) + b32rev[c] | ||||||
|  |         except KeyError: | ||||||
|  |             raise binascii.Error('Non-base32 digit found') from None | ||||||
|  |         decoded += acc.to_bytes(5, 'big') | ||||||
|  |     # Process the last, partial quanta | ||||||
|  |     if padchars: | ||||||
|  |         acc <<= 5 * padchars | ||||||
|  |         last = acc.to_bytes(5, 'big') | ||||||
|  |         if padchars == 1: | ||||||
|  |             decoded[-5:] = last[:-1] | ||||||
|  |         elif padchars == 3: | ||||||
|  |             decoded[-5:] = last[:-2] | ||||||
|  |         elif padchars == 4: | ||||||
|  |             decoded[-5:] = last[:-3] | ||||||
|  |         elif padchars == 6: | ||||||
|  |             decoded[-5:] = last[:-4] | ||||||
|  |         else: | ||||||
|  |             raise binascii.Error('Incorrect padding') | ||||||
|  |     return bytes(decoded) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns | ||||||
|  | # lowercase.  The RFC also recommends against accepting input case | ||||||
|  | # insensitively. | ||||||
|  | def b16encode(s): | ||||||
|  |     """Encode a byte string using Base16. | ||||||
|  |  | ||||||
|  |     s is the byte string to encode.  The encoded byte string is returned. | ||||||
|  |     """ | ||||||
|  |     return binascii.hexlify(s).upper() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def b16decode(s, casefold=False): | ||||||
|  |     """Decode a Base16 encoded byte string. | ||||||
|  |  | ||||||
|  |     s is the byte string to decode.  Optional casefold is a flag | ||||||
|  |     specifying whether a lowercase alphabet is acceptable as input. | ||||||
|  |     For security purposes, the default is False. | ||||||
|  |  | ||||||
|  |     The decoded byte string is returned.  binascii.Error is raised if | ||||||
|  |     s were incorrectly padded or if there are non-alphabet characters | ||||||
|  |     present in the string. | ||||||
|  |     """ | ||||||
|  |     s = _bytes_from_decode_data(s) | ||||||
|  |     if casefold: | ||||||
|  |         s = s.upper() | ||||||
|  |     if re.search(b'[^0-9A-F]', s): | ||||||
|  |         raise binascii.Error('Non-base16 digit found') | ||||||
|  |     return binascii.unhexlify(s) | ||||||
|  |  | ||||||
|  | # | ||||||
|  | # Ascii85 encoding/decoding | ||||||
|  | # | ||||||
|  |  | ||||||
|  | _a85chars = None | ||||||
|  | _a85chars2 = None | ||||||
|  | _A85START = b"<~" | ||||||
|  | _A85END = b"~>" | ||||||
|  |  | ||||||
|  | def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False): | ||||||
|  |     # Helper function for a85encode and b85encode | ||||||
|  |     if not isinstance(b, bytes_types): | ||||||
|  |         b = memoryview(b).tobytes() | ||||||
|  |  | ||||||
|  |     padding = (-len(b)) % 4 | ||||||
|  |     if padding: | ||||||
|  |         b = b + b'\0' * padding | ||||||
|  |     words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b) | ||||||
|  |  | ||||||
|  |     chunks = [b'z' if foldnuls and not word else | ||||||
|  |               b'y' if foldspaces and word == 0x20202020 else | ||||||
|  |               (chars2[word // 614125] + | ||||||
|  |                chars2[word // 85 % 7225] + | ||||||
|  |                chars[word % 85]) | ||||||
|  |               for word in words] | ||||||
|  |  | ||||||
|  |     if padding and not pad: | ||||||
|  |         if chunks[-1] == b'z': | ||||||
|  |             chunks[-1] = chars[0] * 5 | ||||||
|  |         chunks[-1] = chunks[-1][:-padding] | ||||||
|  |  | ||||||
|  |     return b''.join(chunks) | ||||||
|  |  | ||||||
|  | def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): | ||||||
|  |     """Encode a byte string using Ascii85. | ||||||
|  |  | ||||||
|  |     b is the byte string to encode. The encoded byte string is returned. | ||||||
|  |  | ||||||
|  |     foldspaces is an optional flag that uses the special short sequence 'y' | ||||||
|  |     instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This | ||||||
|  |     feature is not supported by the "standard" Adobe encoding. | ||||||
|  |  | ||||||
|  |     wrapcol controls whether the output should have newline ('\\n') characters | ||||||
|  |     added to it. If this is non-zero, each output line will be at most this | ||||||
|  |     many characters long. | ||||||
|  |  | ||||||
|  |     pad controls whether the input string is padded to a multiple of 4 before | ||||||
|  |     encoding. Note that the btoa implementation always pads. | ||||||
|  |  | ||||||
|  |     adobe controls whether the encoded byte sequence is framed with <~ and ~>, | ||||||
|  |     which is used by the Adobe implementation. | ||||||
|  |     """ | ||||||
|  |     global _a85chars, _a85chars2 | ||||||
|  |     # Delay the initialization of tables to not waste memory | ||||||
|  |     # if the function is never called | ||||||
|  |     if _a85chars is None: | ||||||
|  |         _a85chars = [bytes((i,)) for i in range(33, 118)] | ||||||
|  |         _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars] | ||||||
|  |  | ||||||
|  |     result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces) | ||||||
|  |  | ||||||
|  |     if adobe: | ||||||
|  |         result = _A85START + result | ||||||
|  |     if wrapcol: | ||||||
|  |         wrapcol = max(2 if adobe else 1, wrapcol) | ||||||
|  |         chunks = [result[i: i + wrapcol] | ||||||
|  |                   for i in range(0, len(result), wrapcol)] | ||||||
|  |         if adobe: | ||||||
|  |             if len(chunks[-1]) + 2 > wrapcol: | ||||||
|  |                 chunks.append(b'') | ||||||
|  |         result = b'\n'.join(chunks) | ||||||
|  |     if adobe: | ||||||
|  |         result += _A85END | ||||||
|  |  | ||||||
|  |     return result | ||||||
|  |  | ||||||
|  | def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): | ||||||
|  |     """Decode an Ascii85 encoded byte string. | ||||||
|  |  | ||||||
|  |     s is the byte string to decode. | ||||||
|  |  | ||||||
|  |     foldspaces is a flag that specifies whether the 'y' short sequence should be | ||||||
|  |     accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is | ||||||
|  |     not supported by the "standard" Adobe encoding. | ||||||
|  |  | ||||||
|  |     adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. | ||||||
|  |     is framed with <~ and ~>). | ||||||
|  |  | ||||||
|  |     ignorechars should be a byte string containing characters to ignore from the | ||||||
|  |     input. This should only contain whitespace characters, and by default | ||||||
|  |     contains all whitespace characters in ASCII. | ||||||
|  |     """ | ||||||
|  |     b = _bytes_from_decode_data(b) | ||||||
|  |     if adobe: | ||||||
|  |         if not (b.startswith(_A85START) and b.endswith(_A85END)): | ||||||
|  |             raise ValueError("Ascii85 encoded byte sequences must be bracketed " | ||||||
|  |                              "by {!r} and {!r}".format(_A85START, _A85END)) | ||||||
|  |         b = b[2:-2] # Strip off start/end markers | ||||||
|  |     # | ||||||
|  |     # We have to go through this stepwise, so as to ignore spaces and handle | ||||||
|  |     # special short sequences | ||||||
|  |     # | ||||||
|  |     packI = struct.Struct('!I').pack | ||||||
|  |     decoded = [] | ||||||
|  |     decoded_append = decoded.append | ||||||
|  |     curr = [] | ||||||
|  |     curr_append = curr.append | ||||||
|  |     curr_clear = curr.clear | ||||||
|  |     for x in b + b'u' * 4: | ||||||
|  |         if b'!'[0] <= x <= b'u'[0]: | ||||||
|  |             curr_append(x) | ||||||
|  |             if len(curr) == 5: | ||||||
|  |                 acc = 0 | ||||||
|  |                 for x in curr: | ||||||
|  |                     acc = 85 * acc + (x - 33) | ||||||
|  |                 try: | ||||||
|  |                     decoded_append(packI(acc)) | ||||||
|  |                 except struct.error: | ||||||
|  |                     raise ValueError('Ascii85 overflow') from None | ||||||
|  |                 curr_clear() | ||||||
|  |         elif x == b'z'[0]: | ||||||
|  |             if curr: | ||||||
|  |                 raise ValueError('z inside Ascii85 5-tuple') | ||||||
|  |             decoded_append(b'\0\0\0\0') | ||||||
|  |         elif foldspaces and x == b'y'[0]: | ||||||
|  |             if curr: | ||||||
|  |                 raise ValueError('y inside Ascii85 5-tuple') | ||||||
|  |             decoded_append(b'\x20\x20\x20\x20') | ||||||
|  |         elif x in ignorechars: | ||||||
|  |             # Skip whitespace | ||||||
|  |             continue | ||||||
|  |         else: | ||||||
|  |             raise ValueError('Non-Ascii85 digit found: %c' % x) | ||||||
|  |  | ||||||
|  |     result = b''.join(decoded) | ||||||
|  |     padding = 4 - len(curr) | ||||||
|  |     if padding: | ||||||
|  |         # Throw away the extra padding | ||||||
|  |         result = result[:-padding] | ||||||
|  |     return result | ||||||
|  |  | ||||||
|  | # The following code is originally taken (with permission) from Mercurial | ||||||
|  |  | ||||||
|  | _b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" | ||||||
|  |                 b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") | ||||||
|  | _b85chars = None | ||||||
|  | _b85chars2 = None | ||||||
|  | _b85dec = None | ||||||
|  |  | ||||||
|  | def b85encode(b, pad=False): | ||||||
|  |     """Encode an ASCII-encoded byte array in base85 format. | ||||||
|  |  | ||||||
|  |     If pad is true, the input is padded with "\\0" so its length is a multiple of | ||||||
|  |     4 characters before encoding. | ||||||
|  |     """ | ||||||
|  |     global _b85chars, _b85chars2 | ||||||
|  |     # Delay the initialization of tables to not waste memory | ||||||
|  |     # if the function is never called | ||||||
|  |     if _b85chars is None: | ||||||
|  |         _b85chars = [bytes((i,)) for i in _b85alphabet] | ||||||
|  |         _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] | ||||||
|  |     return _85encode(b, _b85chars, _b85chars2, pad) | ||||||
|  |  | ||||||
|  | def b85decode(b): | ||||||
|  |     """Decode base85-encoded byte array""" | ||||||
|  |     global _b85dec | ||||||
|  |     # Delay the initialization of tables to not waste memory | ||||||
|  |     # if the function is never called | ||||||
|  |     if _b85dec is None: | ||||||
|  |         _b85dec = [None] * 256 | ||||||
|  |         for i, c in enumerate(_b85alphabet): | ||||||
|  |             _b85dec[c] = i | ||||||
|  |  | ||||||
|  |     b = _bytes_from_decode_data(b) | ||||||
|  |     padding = (-len(b)) % 5 | ||||||
|  |     b = b + b'~' * padding | ||||||
|  |     out = [] | ||||||
|  |     packI = struct.Struct('!I').pack | ||||||
|  |     for i in range(0, len(b), 5): | ||||||
|  |         chunk = b[i:i + 5] | ||||||
|  |         acc = 0 | ||||||
|  |         try: | ||||||
|  |             for c in chunk: | ||||||
|  |                 acc = acc * 85 + _b85dec[c] | ||||||
|  |         except TypeError: | ||||||
|  |             for j, c in enumerate(chunk): | ||||||
|  |                 if _b85dec[c] is None: | ||||||
|  |                     raise ValueError('bad base85 character at position %d' | ||||||
|  |                                     % (i + j)) from None | ||||||
|  |             raise | ||||||
|  |         try: | ||||||
|  |             out.append(packI(acc)) | ||||||
|  |         except struct.error: | ||||||
|  |             raise ValueError('base85 overflow in hunk starting at byte %d' | ||||||
|  |                              % i) from None | ||||||
|  |  | ||||||
|  |     result = b''.join(out) | ||||||
|  |     if padding: | ||||||
|  |         result = result[:-padding] | ||||||
|  |     return result | ||||||
|  |  | ||||||
|  | # Legacy interface.  This code could be cleaned up since I don't believe | ||||||
|  | # binascii has any line length limitations.  It just doesn't seem worth it | ||||||
|  | # though.  The files should be opened in binary mode. | ||||||
|  |  | ||||||
|  | MAXLINESIZE = 76 # Excluding the CRLF | ||||||
|  | MAXBINSIZE = (MAXLINESIZE//4)*3 | ||||||
|  |  | ||||||
|  | def encode(input, output): | ||||||
|  |     """Encode a file; input and output are binary files.""" | ||||||
|  |     while True: | ||||||
|  |         s = input.read(MAXBINSIZE) | ||||||
|  |         if not s: | ||||||
|  |             break | ||||||
|  |         while len(s) < MAXBINSIZE: | ||||||
|  |             ns = input.read(MAXBINSIZE-len(s)) | ||||||
|  |             if not ns: | ||||||
|  |                 break | ||||||
|  |             s += ns | ||||||
|  |         line = binascii.b2a_base64(s) | ||||||
|  |         output.write(line) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def decode(input, output): | ||||||
|  |     """Decode a file; input and output are binary files.""" | ||||||
|  |     while True: | ||||||
|  |         line = input.readline() | ||||||
|  |         if not line: | ||||||
|  |             break | ||||||
|  |         s = binascii.a2b_base64(line) | ||||||
|  |         output.write(s) | ||||||
|  |  | ||||||
|  | def _input_type_check(s): | ||||||
|  |     try: | ||||||
|  |         m = memoryview(s) | ||||||
|  |     except TypeError as err: | ||||||
|  |         msg = "expected bytes-like object, not %s" % s.__class__.__name__ | ||||||
|  |         raise TypeError(msg) from err | ||||||
|  |     if m.format not in ('c', 'b', 'B'): | ||||||
|  |         msg = ("expected single byte elements, not %r from %s" % | ||||||
|  |                                           (m.format, s.__class__.__name__)) | ||||||
|  |         raise TypeError(msg) | ||||||
|  |     if m.ndim != 1: | ||||||
|  |         msg = ("expected 1-D data, not %d-D data from %s" % | ||||||
|  |                                           (m.ndim, s.__class__.__name__)) | ||||||
|  |         raise TypeError(msg) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def encodebytes(s): | ||||||
|  |     """Encode a bytestring into a bytestring containing multiple lines | ||||||
|  |     of base-64 data.""" | ||||||
|  |     _input_type_check(s) | ||||||
|  |     pieces = [] | ||||||
|  |     for i in range(0, len(s), MAXBINSIZE): | ||||||
|  |         chunk = s[i : i + MAXBINSIZE] | ||||||
|  |         pieces.append(binascii.b2a_base64(chunk)) | ||||||
|  |     return b"".join(pieces) | ||||||
|  |  | ||||||
|  | def encodestring(s): | ||||||
|  |     """Legacy alias of encodebytes().""" | ||||||
|  |     import warnings | ||||||
|  |     warnings.warn("encodestring() is a deprecated alias, use encodebytes()", | ||||||
|  |                   DeprecationWarning, 2) | ||||||
|  |     return encodebytes(s) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def decodebytes(s): | ||||||
|  |     """Decode a bytestring of base-64 data into a bytestring.""" | ||||||
|  |     _input_type_check(s) | ||||||
|  |     return binascii.a2b_base64(s) | ||||||
|  |  | ||||||
|  | def decodestring(s): | ||||||
|  |     """Legacy alias of decodebytes().""" | ||||||
|  |     import warnings | ||||||
|  |     warnings.warn("decodestring() is a deprecated alias, use decodebytes()", | ||||||
|  |                   DeprecationWarning, 2) | ||||||
|  |     return decodebytes(s) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Usable as a script... | ||||||
|  | def main(): | ||||||
|  |     """Small main program""" | ||||||
|  |     import sys, getopt | ||||||
|  |     try: | ||||||
|  |         opts, args = getopt.getopt(sys.argv[1:], 'deut') | ||||||
|  |     except getopt.error as msg: | ||||||
|  |         sys.stdout = sys.stderr | ||||||
|  |         print(msg) | ||||||
|  |         print("""usage: %s [-d|-e|-u|-t] [file|-] | ||||||
|  |         -d, -u: decode | ||||||
|  |         -e: encode (default) | ||||||
|  |         -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) | ||||||
|  |         sys.exit(2) | ||||||
|  |     func = encode | ||||||
|  |     for o, a in opts: | ||||||
|  |         if o == '-e': func = encode | ||||||
|  |         if o == '-d': func = decode | ||||||
|  |         if o == '-u': func = decode | ||||||
|  |         if o == '-t': test(); return | ||||||
|  |     if args and args[0] != '-': | ||||||
|  |         with open(args[0], 'rb') as f: | ||||||
|  |             func(f, sys.stdout.buffer) | ||||||
|  |     else: | ||||||
|  |         func(sys.stdin.buffer, sys.stdout.buffer) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def test(): | ||||||
|  |     s0 = b"Aladdin:open sesame" | ||||||
|  |     print(repr(s0)) | ||||||
|  |     s1 = encodebytes(s0) | ||||||
|  |     print(repr(s1)) | ||||||
|  |     s2 = decodebytes(s1) | ||||||
|  |     print(repr(s2)) | ||||||
|  |     assert s0 == s2 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     main() | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/bisect.py |  | ||||||
							
								
								
									
										92
									
								
								v1/flask/lib/python3.4/bisect.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								v1/flask/lib/python3.4/bisect.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,92 @@ | |||||||
|  | """Bisection algorithms.""" | ||||||
|  |  | ||||||
|  | def insort_right(a, x, lo=0, hi=None): | ||||||
|  |     """Insert item x in list a, and keep it sorted assuming a is sorted. | ||||||
|  |  | ||||||
|  |     If x is already in a, insert it to the right of the rightmost x. | ||||||
|  |  | ||||||
|  |     Optional args lo (default 0) and hi (default len(a)) bound the | ||||||
|  |     slice of a to be searched. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if lo < 0: | ||||||
|  |         raise ValueError('lo must be non-negative') | ||||||
|  |     if hi is None: | ||||||
|  |         hi = len(a) | ||||||
|  |     while lo < hi: | ||||||
|  |         mid = (lo+hi)//2 | ||||||
|  |         if x < a[mid]: hi = mid | ||||||
|  |         else: lo = mid+1 | ||||||
|  |     a.insert(lo, x) | ||||||
|  |  | ||||||
|  | insort = insort_right   # backward compatibility | ||||||
|  |  | ||||||
|  | def bisect_right(a, x, lo=0, hi=None): | ||||||
|  |     """Return the index where to insert item x in list a, assuming a is sorted. | ||||||
|  |  | ||||||
|  |     The return value i is such that all e in a[:i] have e <= x, and all e in | ||||||
|  |     a[i:] have e > x.  So if x already appears in the list, a.insert(x) will | ||||||
|  |     insert just after the rightmost x already there. | ||||||
|  |  | ||||||
|  |     Optional args lo (default 0) and hi (default len(a)) bound the | ||||||
|  |     slice of a to be searched. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if lo < 0: | ||||||
|  |         raise ValueError('lo must be non-negative') | ||||||
|  |     if hi is None: | ||||||
|  |         hi = len(a) | ||||||
|  |     while lo < hi: | ||||||
|  |         mid = (lo+hi)//2 | ||||||
|  |         if x < a[mid]: hi = mid | ||||||
|  |         else: lo = mid+1 | ||||||
|  |     return lo | ||||||
|  |  | ||||||
|  | bisect = bisect_right   # backward compatibility | ||||||
|  |  | ||||||
|  | def insort_left(a, x, lo=0, hi=None): | ||||||
|  |     """Insert item x in list a, and keep it sorted assuming a is sorted. | ||||||
|  |  | ||||||
|  |     If x is already in a, insert it to the left of the leftmost x. | ||||||
|  |  | ||||||
|  |     Optional args lo (default 0) and hi (default len(a)) bound the | ||||||
|  |     slice of a to be searched. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if lo < 0: | ||||||
|  |         raise ValueError('lo must be non-negative') | ||||||
|  |     if hi is None: | ||||||
|  |         hi = len(a) | ||||||
|  |     while lo < hi: | ||||||
|  |         mid = (lo+hi)//2 | ||||||
|  |         if a[mid] < x: lo = mid+1 | ||||||
|  |         else: hi = mid | ||||||
|  |     a.insert(lo, x) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def bisect_left(a, x, lo=0, hi=None): | ||||||
|  |     """Return the index where to insert item x in list a, assuming a is sorted. | ||||||
|  |  | ||||||
|  |     The return value i is such that all e in a[:i] have e < x, and all e in | ||||||
|  |     a[i:] have e >= x.  So if x already appears in the list, a.insert(x) will | ||||||
|  |     insert just before the leftmost x already there. | ||||||
|  |  | ||||||
|  |     Optional args lo (default 0) and hi (default len(a)) bound the | ||||||
|  |     slice of a to be searched. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if lo < 0: | ||||||
|  |         raise ValueError('lo must be non-negative') | ||||||
|  |     if hi is None: | ||||||
|  |         hi = len(a) | ||||||
|  |     while lo < hi: | ||||||
|  |         mid = (lo+hi)//2 | ||||||
|  |         if a[mid] < x: lo = mid+1 | ||||||
|  |         else: hi = mid | ||||||
|  |     return lo | ||||||
|  |  | ||||||
|  | # Overwrite above definitions with a fast C implementation | ||||||
|  | try: | ||||||
|  |     from _bisect import * | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/codecs.py |  | ||||||
							
								
								
									
										1105
									
								
								v1/flask/lib/python3.4/codecs.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1105
									
								
								v1/flask/lib/python3.4/codecs.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/copy.py |  | ||||||
							
								
								
									
										333
									
								
								v1/flask/lib/python3.4/copy.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										333
									
								
								v1/flask/lib/python3.4/copy.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,333 @@ | |||||||
|  | """Generic (shallow and deep) copying operations. | ||||||
|  |  | ||||||
|  | Interface summary: | ||||||
|  |  | ||||||
|  |         import copy | ||||||
|  |  | ||||||
|  |         x = copy.copy(y)        # make a shallow copy of y | ||||||
|  |         x = copy.deepcopy(y)    # make a deep copy of y | ||||||
|  |  | ||||||
|  | For module specific errors, copy.Error is raised. | ||||||
|  |  | ||||||
|  | The difference between shallow and deep copying is only relevant for | ||||||
|  | compound objects (objects that contain other objects, like lists or | ||||||
|  | class instances). | ||||||
|  |  | ||||||
|  | - A shallow copy constructs a new compound object and then (to the | ||||||
|  |   extent possible) inserts *the same objects* into it that the | ||||||
|  |   original contains. | ||||||
|  |  | ||||||
|  | - A deep copy constructs a new compound object and then, recursively, | ||||||
|  |   inserts *copies* into it of the objects found in the original. | ||||||
|  |  | ||||||
|  | Two problems often exist with deep copy operations that don't exist | ||||||
|  | with shallow copy operations: | ||||||
|  |  | ||||||
|  |  a) recursive objects (compound objects that, directly or indirectly, | ||||||
|  |     contain a reference to themselves) may cause a recursive loop | ||||||
|  |  | ||||||
|  |  b) because deep copy copies *everything* it may copy too much, e.g. | ||||||
|  |     administrative data structures that should be shared even between | ||||||
|  |     copies | ||||||
|  |  | ||||||
|  | Python's deep copy operation avoids these problems by: | ||||||
|  |  | ||||||
|  |  a) keeping a table of objects already copied during the current | ||||||
|  |     copying pass | ||||||
|  |  | ||||||
|  |  b) letting user-defined classes override the copying operation or the | ||||||
|  |     set of components copied | ||||||
|  |  | ||||||
|  | This version does not copy types like module, class, function, method, | ||||||
|  | nor stack trace, stack frame, nor file, socket, window, nor array, nor | ||||||
|  | any similar types. | ||||||
|  |  | ||||||
|  | Classes can use the same interfaces to control copying that they use | ||||||
|  | to control pickling: they can define methods called __getinitargs__(), | ||||||
|  | __getstate__() and __setstate__().  See the documentation for module | ||||||
|  | "pickle" for information on these methods. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import types | ||||||
|  | import weakref | ||||||
|  | from copyreg import dispatch_table | ||||||
|  | import builtins | ||||||
|  |  | ||||||
|  | class Error(Exception): | ||||||
|  |     pass | ||||||
|  | error = Error   # backward compatibility | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from org.python.core import PyStringMap | ||||||
|  | except ImportError: | ||||||
|  |     PyStringMap = None | ||||||
|  |  | ||||||
|  | __all__ = ["Error", "copy", "deepcopy"] | ||||||
|  |  | ||||||
|  | def copy(x): | ||||||
|  |     """Shallow copy operation on arbitrary Python objects. | ||||||
|  |  | ||||||
|  |     See the module's __doc__ string for more info. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     cls = type(x) | ||||||
|  |  | ||||||
|  |     copier = _copy_dispatch.get(cls) | ||||||
|  |     if copier: | ||||||
|  |         return copier(x) | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         issc = issubclass(cls, type) | ||||||
|  |     except TypeError: # cls is not a class | ||||||
|  |         issc = False | ||||||
|  |     if issc: | ||||||
|  |         # treat it as a regular class: | ||||||
|  |         return _copy_immutable(x) | ||||||
|  |  | ||||||
|  |     copier = getattr(cls, "__copy__", None) | ||||||
|  |     if copier: | ||||||
|  |         return copier(x) | ||||||
|  |  | ||||||
|  |     reductor = dispatch_table.get(cls) | ||||||
|  |     if reductor: | ||||||
|  |         rv = reductor(x) | ||||||
|  |     else: | ||||||
|  |         reductor = getattr(x, "__reduce_ex__", None) | ||||||
|  |         if reductor: | ||||||
|  |             rv = reductor(2) | ||||||
|  |         else: | ||||||
|  |             reductor = getattr(x, "__reduce__", None) | ||||||
|  |             if reductor: | ||||||
|  |                 rv = reductor() | ||||||
|  |             else: | ||||||
|  |                 raise Error("un(shallow)copyable object of type %s" % cls) | ||||||
|  |  | ||||||
|  |     return _reconstruct(x, rv, 0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | _copy_dispatch = d = {} | ||||||
|  |  | ||||||
|  | def _copy_immutable(x): | ||||||
|  |     return x | ||||||
|  | for t in (type(None), int, float, bool, str, tuple, | ||||||
|  |           bytes, frozenset, type, range, | ||||||
|  |           types.BuiltinFunctionType, type(Ellipsis), | ||||||
|  |           types.FunctionType, weakref.ref): | ||||||
|  |     d[t] = _copy_immutable | ||||||
|  | t = getattr(types, "CodeType", None) | ||||||
|  | if t is not None: | ||||||
|  |     d[t] = _copy_immutable | ||||||
|  | for name in ("complex", "unicode"): | ||||||
|  |     t = getattr(builtins, name, None) | ||||||
|  |     if t is not None: | ||||||
|  |         d[t] = _copy_immutable | ||||||
|  |  | ||||||
|  | def _copy_with_constructor(x): | ||||||
|  |     return type(x)(x) | ||||||
|  | for t in (list, dict, set): | ||||||
|  |     d[t] = _copy_with_constructor | ||||||
|  |  | ||||||
|  | def _copy_with_copy_method(x): | ||||||
|  |     return x.copy() | ||||||
|  | if PyStringMap is not None: | ||||||
|  |     d[PyStringMap] = _copy_with_copy_method | ||||||
|  |  | ||||||
|  | del d | ||||||
|  |  | ||||||
|  | def deepcopy(x, memo=None, _nil=[]): | ||||||
|  |     """Deep copy operation on arbitrary Python objects. | ||||||
|  |  | ||||||
|  |     See the module's __doc__ string for more info. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if memo is None: | ||||||
|  |         memo = {} | ||||||
|  |  | ||||||
|  |     d = id(x) | ||||||
|  |     y = memo.get(d, _nil) | ||||||
|  |     if y is not _nil: | ||||||
|  |         return y | ||||||
|  |  | ||||||
|  |     cls = type(x) | ||||||
|  |  | ||||||
|  |     copier = _deepcopy_dispatch.get(cls) | ||||||
|  |     if copier: | ||||||
|  |         y = copier(x, memo) | ||||||
|  |     else: | ||||||
|  |         try: | ||||||
|  |             issc = issubclass(cls, type) | ||||||
|  |         except TypeError: # cls is not a class (old Boost; see SF #502085) | ||||||
|  |             issc = 0 | ||||||
|  |         if issc: | ||||||
|  |             y = _deepcopy_atomic(x, memo) | ||||||
|  |         else: | ||||||
|  |             copier = getattr(x, "__deepcopy__", None) | ||||||
|  |             if copier: | ||||||
|  |                 y = copier(memo) | ||||||
|  |             else: | ||||||
|  |                 reductor = dispatch_table.get(cls) | ||||||
|  |                 if reductor: | ||||||
|  |                     rv = reductor(x) | ||||||
|  |                 else: | ||||||
|  |                     reductor = getattr(x, "__reduce_ex__", None) | ||||||
|  |                     if reductor: | ||||||
|  |                         rv = reductor(2) | ||||||
|  |                     else: | ||||||
|  |                         reductor = getattr(x, "__reduce__", None) | ||||||
|  |                         if reductor: | ||||||
|  |                             rv = reductor() | ||||||
|  |                         else: | ||||||
|  |                             raise Error( | ||||||
|  |                                 "un(deep)copyable object of type %s" % cls) | ||||||
|  |                 y = _reconstruct(x, rv, 1, memo) | ||||||
|  |  | ||||||
|  |     # If is its own copy, don't memoize. | ||||||
|  |     if y is not x: | ||||||
|  |         memo[d] = y | ||||||
|  |         _keep_alive(x, memo) # Make sure x lives at least as long as d | ||||||
|  |     return y | ||||||
|  |  | ||||||
|  | _deepcopy_dispatch = d = {} | ||||||
|  |  | ||||||
|  | def _deepcopy_atomic(x, memo): | ||||||
|  |     return x | ||||||
|  | d[type(None)] = _deepcopy_atomic | ||||||
|  | d[type(Ellipsis)] = _deepcopy_atomic | ||||||
|  | d[int] = _deepcopy_atomic | ||||||
|  | d[float] = _deepcopy_atomic | ||||||
|  | d[bool] = _deepcopy_atomic | ||||||
|  | try: | ||||||
|  |     d[complex] = _deepcopy_atomic | ||||||
|  | except NameError: | ||||||
|  |     pass | ||||||
|  | d[bytes] = _deepcopy_atomic | ||||||
|  | d[str] = _deepcopy_atomic | ||||||
|  | try: | ||||||
|  |     d[types.CodeType] = _deepcopy_atomic | ||||||
|  | except AttributeError: | ||||||
|  |     pass | ||||||
|  | d[type] = _deepcopy_atomic | ||||||
|  | d[range] = _deepcopy_atomic | ||||||
|  | d[types.BuiltinFunctionType] = _deepcopy_atomic | ||||||
|  | d[types.FunctionType] = _deepcopy_atomic | ||||||
|  | d[weakref.ref] = _deepcopy_atomic | ||||||
|  |  | ||||||
|  | def _deepcopy_list(x, memo): | ||||||
|  |     y = [] | ||||||
|  |     memo[id(x)] = y | ||||||
|  |     for a in x: | ||||||
|  |         y.append(deepcopy(a, memo)) | ||||||
|  |     return y | ||||||
|  | d[list] = _deepcopy_list | ||||||
|  |  | ||||||
|  | def _deepcopy_tuple(x, memo): | ||||||
|  |     y = [] | ||||||
|  |     for a in x: | ||||||
|  |         y.append(deepcopy(a, memo)) | ||||||
|  |     # We're not going to put the tuple in the memo, but it's still important we | ||||||
|  |     # check for it, in case the tuple contains recursive mutable structures. | ||||||
|  |     try: | ||||||
|  |         return memo[id(x)] | ||||||
|  |     except KeyError: | ||||||
|  |         pass | ||||||
|  |     for i in range(len(x)): | ||||||
|  |         if x[i] is not y[i]: | ||||||
|  |             y = tuple(y) | ||||||
|  |             break | ||||||
|  |     else: | ||||||
|  |         y = x | ||||||
|  |     return y | ||||||
|  | d[tuple] = _deepcopy_tuple | ||||||
|  |  | ||||||
|  | def _deepcopy_dict(x, memo): | ||||||
|  |     y = {} | ||||||
|  |     memo[id(x)] = y | ||||||
|  |     for key, value in x.items(): | ||||||
|  |         y[deepcopy(key, memo)] = deepcopy(value, memo) | ||||||
|  |     return y | ||||||
|  | d[dict] = _deepcopy_dict | ||||||
|  | if PyStringMap is not None: | ||||||
|  |     d[PyStringMap] = _deepcopy_dict | ||||||
|  |  | ||||||
|  | def _deepcopy_method(x, memo): # Copy instance methods | ||||||
|  |     return type(x)(x.__func__, deepcopy(x.__self__, memo)) | ||||||
|  | _deepcopy_dispatch[types.MethodType] = _deepcopy_method | ||||||
|  |  | ||||||
|  | def _keep_alive(x, memo): | ||||||
|  |     """Keeps a reference to the object x in the memo. | ||||||
|  |  | ||||||
|  |     Because we remember objects by their id, we have | ||||||
|  |     to assure that possibly temporary objects are kept | ||||||
|  |     alive by referencing them. | ||||||
|  |     We store a reference at the id of the memo, which should | ||||||
|  |     normally not be used unless someone tries to deepcopy | ||||||
|  |     the memo itself... | ||||||
|  |     """ | ||||||
|  |     try: | ||||||
|  |         memo[id(memo)].append(x) | ||||||
|  |     except KeyError: | ||||||
|  |         # aha, this is the first one :-) | ||||||
|  |         memo[id(memo)]=[x] | ||||||
|  |  | ||||||
|  | def _reconstruct(x, info, deep, memo=None): | ||||||
|  |     if isinstance(info, str): | ||||||
|  |         return x | ||||||
|  |     assert isinstance(info, tuple) | ||||||
|  |     if memo is None: | ||||||
|  |         memo = {} | ||||||
|  |     n = len(info) | ||||||
|  |     assert n in (2, 3, 4, 5) | ||||||
|  |     callable, args = info[:2] | ||||||
|  |     if n > 2: | ||||||
|  |         state = info[2] | ||||||
|  |     else: | ||||||
|  |         state = None | ||||||
|  |     if n > 3: | ||||||
|  |         listiter = info[3] | ||||||
|  |     else: | ||||||
|  |         listiter = None | ||||||
|  |     if n > 4: | ||||||
|  |         dictiter = info[4] | ||||||
|  |     else: | ||||||
|  |         dictiter = None | ||||||
|  |     if deep: | ||||||
|  |         args = deepcopy(args, memo) | ||||||
|  |     y = callable(*args) | ||||||
|  |     memo[id(x)] = y | ||||||
|  |  | ||||||
|  |     if state is not None: | ||||||
|  |         if deep: | ||||||
|  |             state = deepcopy(state, memo) | ||||||
|  |         if hasattr(y, '__setstate__'): | ||||||
|  |             y.__setstate__(state) | ||||||
|  |         else: | ||||||
|  |             if isinstance(state, tuple) and len(state) == 2: | ||||||
|  |                 state, slotstate = state | ||||||
|  |             else: | ||||||
|  |                 slotstate = None | ||||||
|  |             if state is not None: | ||||||
|  |                 y.__dict__.update(state) | ||||||
|  |             if slotstate is not None: | ||||||
|  |                 for key, value in slotstate.items(): | ||||||
|  |                     setattr(y, key, value) | ||||||
|  |  | ||||||
|  |     if listiter is not None: | ||||||
|  |         for item in listiter: | ||||||
|  |             if deep: | ||||||
|  |                 item = deepcopy(item, memo) | ||||||
|  |             y.append(item) | ||||||
|  |     if dictiter is not None: | ||||||
|  |         for key, value in dictiter: | ||||||
|  |             if deep: | ||||||
|  |                 key = deepcopy(key, memo) | ||||||
|  |                 value = deepcopy(value, memo) | ||||||
|  |             y[key] = value | ||||||
|  |     return y | ||||||
|  |  | ||||||
|  | del d | ||||||
|  |  | ||||||
|  | del types | ||||||
|  |  | ||||||
|  | # Helper for instance creation without calling __init__ | ||||||
|  | class _EmptyClass: | ||||||
|  |     pass | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/copyreg.py |  | ||||||
							
								
								
									
										202
									
								
								v1/flask/lib/python3.4/copyreg.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								v1/flask/lib/python3.4/copyreg.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | |||||||
|  | """Helper to provide extensibility for pickle. | ||||||
|  |  | ||||||
|  | This is only useful to add pickle support for extension types defined in | ||||||
|  | C, not for instances of user-defined classes. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | __all__ = ["pickle", "constructor", | ||||||
|  |            "add_extension", "remove_extension", "clear_extension_cache"] | ||||||
|  |  | ||||||
|  | dispatch_table = {} | ||||||
|  |  | ||||||
|  | def pickle(ob_type, pickle_function, constructor_ob=None): | ||||||
|  |     if not callable(pickle_function): | ||||||
|  |         raise TypeError("reduction functions must be callable") | ||||||
|  |     dispatch_table[ob_type] = pickle_function | ||||||
|  |  | ||||||
|  |     # The constructor_ob function is a vestige of safe for unpickling. | ||||||
|  |     # There is no reason for the caller to pass it anymore. | ||||||
|  |     if constructor_ob is not None: | ||||||
|  |         constructor(constructor_ob) | ||||||
|  |  | ||||||
|  | def constructor(object): | ||||||
|  |     if not callable(object): | ||||||
|  |         raise TypeError("constructors must be callable") | ||||||
|  |  | ||||||
|  | # Example: provide pickling support for complex numbers. | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     complex | ||||||
|  | except NameError: | ||||||
|  |     pass | ||||||
|  | else: | ||||||
|  |  | ||||||
|  |     def pickle_complex(c): | ||||||
|  |         return complex, (c.real, c.imag) | ||||||
|  |  | ||||||
|  |     pickle(complex, pickle_complex, complex) | ||||||
|  |  | ||||||
|  | # Support for pickling new-style objects | ||||||
|  |  | ||||||
|  | def _reconstructor(cls, base, state): | ||||||
|  |     if base is object: | ||||||
|  |         obj = object.__new__(cls) | ||||||
|  |     else: | ||||||
|  |         obj = base.__new__(cls, state) | ||||||
|  |         if base.__init__ != object.__init__: | ||||||
|  |             base.__init__(obj, state) | ||||||
|  |     return obj | ||||||
|  |  | ||||||
|  | _HEAPTYPE = 1<<9 | ||||||
|  |  | ||||||
|  | # Python code for object.__reduce_ex__ for protocols 0 and 1 | ||||||
|  |  | ||||||
|  | def _reduce_ex(self, proto): | ||||||
|  |     assert proto < 2 | ||||||
|  |     for base in self.__class__.__mro__: | ||||||
|  |         if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: | ||||||
|  |             break | ||||||
|  |     else: | ||||||
|  |         base = object # not really reachable | ||||||
|  |     if base is object: | ||||||
|  |         state = None | ||||||
|  |     else: | ||||||
|  |         if base is self.__class__: | ||||||
|  |             raise TypeError("can't pickle %s objects" % base.__name__) | ||||||
|  |         state = base(self) | ||||||
|  |     args = (self.__class__, base, state) | ||||||
|  |     try: | ||||||
|  |         getstate = self.__getstate__ | ||||||
|  |     except AttributeError: | ||||||
|  |         if getattr(self, "__slots__", None): | ||||||
|  |             raise TypeError("a class that defines __slots__ without " | ||||||
|  |                             "defining __getstate__ cannot be pickled") | ||||||
|  |         try: | ||||||
|  |             dict = self.__dict__ | ||||||
|  |         except AttributeError: | ||||||
|  |             dict = None | ||||||
|  |     else: | ||||||
|  |         dict = getstate() | ||||||
|  |     if dict: | ||||||
|  |         return _reconstructor, args, dict | ||||||
|  |     else: | ||||||
|  |         return _reconstructor, args | ||||||
|  |  | ||||||
|  | # Helper for __reduce_ex__ protocol 2 | ||||||
|  |  | ||||||
|  | def __newobj__(cls, *args): | ||||||
|  |     return cls.__new__(cls, *args) | ||||||
|  |  | ||||||
|  | def __newobj_ex__(cls, args, kwargs): | ||||||
|  |     """Used by pickle protocol 4, instead of __newobj__ to allow classes with | ||||||
|  |     keyword-only arguments to be pickled correctly. | ||||||
|  |     """ | ||||||
|  |     return cls.__new__(cls, *args, **kwargs) | ||||||
|  |  | ||||||
|  | def _slotnames(cls): | ||||||
|  |     """Return a list of slot names for a given class. | ||||||
|  |  | ||||||
|  |     This needs to find slots defined by the class and its bases, so we | ||||||
|  |     can't simply return the __slots__ attribute.  We must walk down | ||||||
|  |     the Method Resolution Order and concatenate the __slots__ of each | ||||||
|  |     class found there.  (This assumes classes don't modify their | ||||||
|  |     __slots__ attribute to misrepresent their slots after the class is | ||||||
|  |     defined.) | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     # Get the value from a cache in the class if possible | ||||||
|  |     names = cls.__dict__.get("__slotnames__") | ||||||
|  |     if names is not None: | ||||||
|  |         return names | ||||||
|  |  | ||||||
|  |     # Not cached -- calculate the value | ||||||
|  |     names = [] | ||||||
|  |     if not hasattr(cls, "__slots__"): | ||||||
|  |         # This class has no slots | ||||||
|  |         pass | ||||||
|  |     else: | ||||||
|  |         # Slots found -- gather slot names from all base classes | ||||||
|  |         for c in cls.__mro__: | ||||||
|  |             if "__slots__" in c.__dict__: | ||||||
|  |                 slots = c.__dict__['__slots__'] | ||||||
|  |                 # if class has a single slot, it can be given as a string | ||||||
|  |                 if isinstance(slots, str): | ||||||
|  |                     slots = (slots,) | ||||||
|  |                 for name in slots: | ||||||
|  |                     # special descriptors | ||||||
|  |                     if name in ("__dict__", "__weakref__"): | ||||||
|  |                         continue | ||||||
|  |                     # mangled names | ||||||
|  |                     elif name.startswith('__') and not name.endswith('__'): | ||||||
|  |                         names.append('_%s%s' % (c.__name__, name)) | ||||||
|  |                     else: | ||||||
|  |                         names.append(name) | ||||||
|  |  | ||||||
|  |     # Cache the outcome in the class if at all possible | ||||||
|  |     try: | ||||||
|  |         cls.__slotnames__ = names | ||||||
|  |     except: | ||||||
|  |         pass # But don't die if we can't | ||||||
|  |  | ||||||
|  |     return names | ||||||
|  |  | ||||||
|  | # A registry of extension codes.  This is an ad-hoc compression | ||||||
|  | # mechanism.  Whenever a global reference to <module>, <name> is about | ||||||
|  | # to be pickled, the (<module>, <name>) tuple is looked up here to see | ||||||
|  | # if it is a registered extension code for it.  Extension codes are | ||||||
|  | # universal, so that the meaning of a pickle does not depend on | ||||||
|  | # context.  (There are also some codes reserved for local use that | ||||||
|  | # don't have this restriction.)  Codes are positive ints; 0 is | ||||||
|  | # reserved. | ||||||
|  |  | ||||||
|  | _extension_registry = {}                # key -> code | ||||||
|  | _inverted_registry = {}                 # code -> key | ||||||
|  | _extension_cache = {}                   # code -> object | ||||||
|  | # Don't ever rebind those names:  pickling grabs a reference to them when | ||||||
|  | # it's initialized, and won't see a rebinding. | ||||||
|  |  | ||||||
|  | def add_extension(module, name, code): | ||||||
|  |     """Register an extension code.""" | ||||||
|  |     code = int(code) | ||||||
|  |     if not 1 <= code <= 0x7fffffff: | ||||||
|  |         raise ValueError("code out of range") | ||||||
|  |     key = (module, name) | ||||||
|  |     if (_extension_registry.get(key) == code and | ||||||
|  |         _inverted_registry.get(code) == key): | ||||||
|  |         return # Redundant registrations are benign | ||||||
|  |     if key in _extension_registry: | ||||||
|  |         raise ValueError("key %s is already registered with code %s" % | ||||||
|  |                          (key, _extension_registry[key])) | ||||||
|  |     if code in _inverted_registry: | ||||||
|  |         raise ValueError("code %s is already in use for key %s" % | ||||||
|  |                          (code, _inverted_registry[code])) | ||||||
|  |     _extension_registry[key] = code | ||||||
|  |     _inverted_registry[code] = key | ||||||
|  |  | ||||||
|  | def remove_extension(module, name, code): | ||||||
|  |     """Unregister an extension code.  For testing only.""" | ||||||
|  |     key = (module, name) | ||||||
|  |     if (_extension_registry.get(key) != code or | ||||||
|  |         _inverted_registry.get(code) != key): | ||||||
|  |         raise ValueError("key %s is not registered with code %s" % | ||||||
|  |                          (key, code)) | ||||||
|  |     del _extension_registry[key] | ||||||
|  |     del _inverted_registry[code] | ||||||
|  |     if code in _extension_cache: | ||||||
|  |         del _extension_cache[code] | ||||||
|  |  | ||||||
|  | def clear_extension_cache(): | ||||||
|  |     _extension_cache.clear() | ||||||
|  |  | ||||||
|  | # Standard extension code assignments | ||||||
|  |  | ||||||
|  | # Reserved ranges | ||||||
|  |  | ||||||
|  | # First  Last Count  Purpose | ||||||
|  | #     1   127   127  Reserved for Python standard library | ||||||
|  | #   128   191    64  Reserved for Zope | ||||||
|  | #   192   239    48  Reserved for 3rd parties | ||||||
|  | #   240   255    16  Reserved for private use (will never be assigned) | ||||||
|  | #   256   Inf   Inf  Reserved for future assignment | ||||||
|  |  | ||||||
|  | # Extension codes are assigned by the Python Software Foundation. | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/fnmatch.py |  | ||||||
							
								
								
									
										109
									
								
								v1/flask/lib/python3.4/fnmatch.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								v1/flask/lib/python3.4/fnmatch.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | |||||||
|  | """Filename matching with shell patterns. | ||||||
|  |  | ||||||
|  | fnmatch(FILENAME, PATTERN) matches according to the local convention. | ||||||
|  | fnmatchcase(FILENAME, PATTERN) always takes case in account. | ||||||
|  |  | ||||||
|  | The functions operate by translating the pattern into a regular | ||||||
|  | expression.  They cache the compiled regular expressions for speed. | ||||||
|  |  | ||||||
|  | The function translate(PATTERN) returns a regular expression | ||||||
|  | corresponding to PATTERN.  (It does not compile it.) | ||||||
|  | """ | ||||||
|  | import os | ||||||
|  | import posixpath | ||||||
|  | import re | ||||||
|  | import functools | ||||||
|  |  | ||||||
|  | __all__ = ["filter", "fnmatch", "fnmatchcase", "translate"] | ||||||
|  |  | ||||||
|  | def fnmatch(name, pat): | ||||||
|  |     """Test whether FILENAME matches PATTERN. | ||||||
|  |  | ||||||
|  |     Patterns are Unix shell style: | ||||||
|  |  | ||||||
|  |     *       matches everything | ||||||
|  |     ?       matches any single character | ||||||
|  |     [seq]   matches any character in seq | ||||||
|  |     [!seq]  matches any char not in seq | ||||||
|  |  | ||||||
|  |     An initial period in FILENAME is not special. | ||||||
|  |     Both FILENAME and PATTERN are first case-normalized | ||||||
|  |     if the operating system requires it. | ||||||
|  |     If you don't want this, use fnmatchcase(FILENAME, PATTERN). | ||||||
|  |     """ | ||||||
|  |     name = os.path.normcase(name) | ||||||
|  |     pat = os.path.normcase(pat) | ||||||
|  |     return fnmatchcase(name, pat) | ||||||
|  |  | ||||||
|  | @functools.lru_cache(maxsize=256, typed=True) | ||||||
|  | def _compile_pattern(pat): | ||||||
|  |     if isinstance(pat, bytes): | ||||||
|  |         pat_str = str(pat, 'ISO-8859-1') | ||||||
|  |         res_str = translate(pat_str) | ||||||
|  |         res = bytes(res_str, 'ISO-8859-1') | ||||||
|  |     else: | ||||||
|  |         res = translate(pat) | ||||||
|  |     return re.compile(res).match | ||||||
|  |  | ||||||
|  | def filter(names, pat): | ||||||
|  |     """Return the subset of the list NAMES that match PAT.""" | ||||||
|  |     result = [] | ||||||
|  |     pat = os.path.normcase(pat) | ||||||
|  |     match = _compile_pattern(pat) | ||||||
|  |     if os.path is posixpath: | ||||||
|  |         # normcase on posix is NOP. Optimize it away from the loop. | ||||||
|  |         for name in names: | ||||||
|  |             if match(name): | ||||||
|  |                 result.append(name) | ||||||
|  |     else: | ||||||
|  |         for name in names: | ||||||
|  |             if match(os.path.normcase(name)): | ||||||
|  |                 result.append(name) | ||||||
|  |     return result | ||||||
|  |  | ||||||
|  | def fnmatchcase(name, pat): | ||||||
|  |     """Test whether FILENAME matches PATTERN, including case. | ||||||
|  |  | ||||||
|  |     This is a version of fnmatch() which doesn't case-normalize | ||||||
|  |     its arguments. | ||||||
|  |     """ | ||||||
|  |     match = _compile_pattern(pat) | ||||||
|  |     return match(name) is not None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def translate(pat): | ||||||
|  |     """Translate a shell PATTERN to a regular expression. | ||||||
|  |  | ||||||
|  |     There is no way to quote meta-characters. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     i, n = 0, len(pat) | ||||||
|  |     res = '' | ||||||
|  |     while i < n: | ||||||
|  |         c = pat[i] | ||||||
|  |         i = i+1 | ||||||
|  |         if c == '*': | ||||||
|  |             res = res + '.*' | ||||||
|  |         elif c == '?': | ||||||
|  |             res = res + '.' | ||||||
|  |         elif c == '[': | ||||||
|  |             j = i | ||||||
|  |             if j < n and pat[j] == '!': | ||||||
|  |                 j = j+1 | ||||||
|  |             if j < n and pat[j] == ']': | ||||||
|  |                 j = j+1 | ||||||
|  |             while j < n and pat[j] != ']': | ||||||
|  |                 j = j+1 | ||||||
|  |             if j >= n: | ||||||
|  |                 res = res + '\\[' | ||||||
|  |             else: | ||||||
|  |                 stuff = pat[i:j].replace('\\','\\\\') | ||||||
|  |                 i = j+1 | ||||||
|  |                 if stuff[0] == '!': | ||||||
|  |                     stuff = '^' + stuff[1:] | ||||||
|  |                 elif stuff[0] == '^': | ||||||
|  |                     stuff = '\\' + stuff | ||||||
|  |                 res = '%s[%s]' % (res, stuff) | ||||||
|  |         else: | ||||||
|  |             res = res + re.escape(c) | ||||||
|  |     return res + '\Z(?ms)' | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/functools.py |  | ||||||
							
								
								
									
										735
									
								
								v1/flask/lib/python3.4/functools.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										735
									
								
								v1/flask/lib/python3.4/functools.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,735 @@ | |||||||
|  | """functools.py - Tools for working with functions and callable objects | ||||||
|  | """ | ||||||
|  | # Python module wrapper for _functools C module | ||||||
|  | # to allow utilities written in Python to be added | ||||||
|  | # to the functools module. | ||||||
|  | # Written by Nick Coghlan <ncoghlan at gmail.com>, | ||||||
|  | # Raymond Hettinger <python at rcn.com>, | ||||||
|  | # and Łukasz Langa <lukasz at langa.pl>. | ||||||
|  | #   Copyright (C) 2006-2013 Python Software Foundation. | ||||||
|  | # See C source code for _functools credits/copyright | ||||||
|  |  | ||||||
|  | __all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', | ||||||
|  |            'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial', | ||||||
|  |            'partialmethod', 'singledispatch'] | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from _functools import reduce | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
|  | from abc import get_cache_token | ||||||
|  | from collections import namedtuple | ||||||
|  | from types import MappingProxyType | ||||||
|  | from weakref import WeakKeyDictionary | ||||||
|  | try: | ||||||
|  |     from _thread import RLock | ||||||
|  | except: | ||||||
|  |     class RLock: | ||||||
|  |         'Dummy reentrant lock for builds without threads' | ||||||
|  |         def __enter__(self): pass | ||||||
|  |         def __exit__(self, exctype, excinst, exctb): pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ################################################################################ | ||||||
|  | ### update_wrapper() and wraps() decorator | ||||||
|  | ################################################################################ | ||||||
|  |  | ||||||
|  | # update_wrapper() and wraps() are tools to help write | ||||||
|  | # wrapper functions that can handle naive introspection | ||||||
|  |  | ||||||
|  | WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', | ||||||
|  |                        '__annotations__') | ||||||
|  | WRAPPER_UPDATES = ('__dict__',) | ||||||
|  | def update_wrapper(wrapper, | ||||||
|  |                    wrapped, | ||||||
|  |                    assigned = WRAPPER_ASSIGNMENTS, | ||||||
|  |                    updated = WRAPPER_UPDATES): | ||||||
|  |     """Update a wrapper function to look like the wrapped function | ||||||
|  |  | ||||||
|  |        wrapper is the function to be updated | ||||||
|  |        wrapped is the original function | ||||||
|  |        assigned is a tuple naming the attributes assigned directly | ||||||
|  |        from the wrapped function to the wrapper function (defaults to | ||||||
|  |        functools.WRAPPER_ASSIGNMENTS) | ||||||
|  |        updated is a tuple naming the attributes of the wrapper that | ||||||
|  |        are updated with the corresponding attribute from the wrapped | ||||||
|  |        function (defaults to functools.WRAPPER_UPDATES) | ||||||
|  |     """ | ||||||
|  |     for attr in assigned: | ||||||
|  |         try: | ||||||
|  |             value = getattr(wrapped, attr) | ||||||
|  |         except AttributeError: | ||||||
|  |             pass | ||||||
|  |         else: | ||||||
|  |             setattr(wrapper, attr, value) | ||||||
|  |     for attr in updated: | ||||||
|  |         getattr(wrapper, attr).update(getattr(wrapped, attr, {})) | ||||||
|  |     # Issue #17482: set __wrapped__ last so we don't inadvertently copy it | ||||||
|  |     # from the wrapped function when updating __dict__ | ||||||
|  |     wrapper.__wrapped__ = wrapped | ||||||
|  |     # Return the wrapper so this can be used as a decorator via partial() | ||||||
|  |     return wrapper | ||||||
|  |  | ||||||
|  | def wraps(wrapped, | ||||||
|  |           assigned = WRAPPER_ASSIGNMENTS, | ||||||
|  |           updated = WRAPPER_UPDATES): | ||||||
|  |     """Decorator factory to apply update_wrapper() to a wrapper function | ||||||
|  |  | ||||||
|  |        Returns a decorator that invokes update_wrapper() with the decorated | ||||||
|  |        function as the wrapper argument and the arguments to wraps() as the | ||||||
|  |        remaining arguments. Default arguments are as for update_wrapper(). | ||||||
|  |        This is a convenience function to simplify applying partial() to | ||||||
|  |        update_wrapper(). | ||||||
|  |     """ | ||||||
|  |     return partial(update_wrapper, wrapped=wrapped, | ||||||
|  |                    assigned=assigned, updated=updated) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ################################################################################ | ||||||
|  | ### total_ordering class decorator | ||||||
|  | ################################################################################ | ||||||
|  |  | ||||||
|  | # The total ordering functions all invoke the root magic method directly | ||||||
|  | # rather than using the corresponding operator.  This avoids possible | ||||||
|  | # infinite recursion that could occur when the operator dispatch logic | ||||||
|  | # detects a NotImplemented result and then calls a reflected method. | ||||||
|  |  | ||||||
|  | def _gt_from_lt(self, other): | ||||||
|  |     'Return a > b.  Computed by @total_ordering from (not a < b) and (a != b).' | ||||||
|  |     op_result = self.__lt__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result and self != other | ||||||
|  |  | ||||||
|  | def _le_from_lt(self, other): | ||||||
|  |     'Return a <= b.  Computed by @total_ordering from (a < b) or (a == b).' | ||||||
|  |     op_result = self.__lt__(other) | ||||||
|  |     return op_result or self == other | ||||||
|  |  | ||||||
|  | def _ge_from_lt(self, other): | ||||||
|  |     'Return a >= b.  Computed by @total_ordering from (not a < b).' | ||||||
|  |     op_result = self.__lt__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result | ||||||
|  |  | ||||||
|  | def _ge_from_le(self, other): | ||||||
|  |     'Return a >= b.  Computed by @total_ordering from (not a <= b) or (a == b).' | ||||||
|  |     op_result = self.__le__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result or self == other | ||||||
|  |  | ||||||
|  | def _lt_from_le(self, other): | ||||||
|  |     'Return a < b.  Computed by @total_ordering from (a <= b) and (a != b).' | ||||||
|  |     op_result = self.__le__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return op_result and self != other | ||||||
|  |  | ||||||
|  | def _gt_from_le(self, other): | ||||||
|  |     'Return a > b.  Computed by @total_ordering from (not a <= b).' | ||||||
|  |     op_result = self.__le__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result | ||||||
|  |  | ||||||
|  | def _lt_from_gt(self, other): | ||||||
|  |     'Return a < b.  Computed by @total_ordering from (not a > b) and (a != b).' | ||||||
|  |     op_result = self.__gt__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result and self != other | ||||||
|  |  | ||||||
|  | def _ge_from_gt(self, other): | ||||||
|  |     'Return a >= b.  Computed by @total_ordering from (a > b) or (a == b).' | ||||||
|  |     op_result = self.__gt__(other) | ||||||
|  |     return op_result or self == other | ||||||
|  |  | ||||||
|  | def _le_from_gt(self, other): | ||||||
|  |     'Return a <= b.  Computed by @total_ordering from (not a > b).' | ||||||
|  |     op_result = self.__gt__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result | ||||||
|  |  | ||||||
|  | def _le_from_ge(self, other): | ||||||
|  |     'Return a <= b.  Computed by @total_ordering from (not a >= b) or (a == b).' | ||||||
|  |     op_result = self.__ge__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result or self == other | ||||||
|  |  | ||||||
|  | def _gt_from_ge(self, other): | ||||||
|  |     'Return a > b.  Computed by @total_ordering from (a >= b) and (a != b).' | ||||||
|  |     op_result = self.__ge__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return op_result and self != other | ||||||
|  |  | ||||||
|  | def _lt_from_ge(self, other): | ||||||
|  |     'Return a < b.  Computed by @total_ordering from (not a >= b).' | ||||||
|  |     op_result = self.__ge__(other) | ||||||
|  |     if op_result is NotImplemented: | ||||||
|  |         return NotImplemented | ||||||
|  |     return not op_result | ||||||
|  |  | ||||||
|  | def total_ordering(cls): | ||||||
|  |     """Class decorator that fills in missing ordering methods""" | ||||||
|  |     convert = { | ||||||
|  |         '__lt__': [('__gt__', _gt_from_lt), | ||||||
|  |                    ('__le__', _le_from_lt), | ||||||
|  |                    ('__ge__', _ge_from_lt)], | ||||||
|  |         '__le__': [('__ge__', _ge_from_le), | ||||||
|  |                    ('__lt__', _lt_from_le), | ||||||
|  |                    ('__gt__', _gt_from_le)], | ||||||
|  |         '__gt__': [('__lt__', _lt_from_gt), | ||||||
|  |                    ('__ge__', _ge_from_gt), | ||||||
|  |                    ('__le__', _le_from_gt)], | ||||||
|  |         '__ge__': [('__le__', _le_from_ge), | ||||||
|  |                    ('__gt__', _gt_from_ge), | ||||||
|  |                    ('__lt__', _lt_from_ge)] | ||||||
|  |     } | ||||||
|  |     # Find user-defined comparisons (not those inherited from object). | ||||||
|  |     roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)] | ||||||
|  |     if not roots: | ||||||
|  |         raise ValueError('must define at least one ordering operation: < > <= >=') | ||||||
|  |     root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__ | ||||||
|  |     for opname, opfunc in convert[root]: | ||||||
|  |         if opname not in roots: | ||||||
|  |             opfunc.__name__ = opname | ||||||
|  |             setattr(cls, opname, opfunc) | ||||||
|  |     return cls | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ################################################################################ | ||||||
|  | ### cmp_to_key() function converter | ||||||
|  | ################################################################################ | ||||||
|  |  | ||||||
|  | def cmp_to_key(mycmp): | ||||||
|  |     """Convert a cmp= function into a key= function""" | ||||||
|  |     class K(object): | ||||||
|  |         __slots__ = ['obj'] | ||||||
|  |         def __init__(self, obj): | ||||||
|  |             self.obj = obj | ||||||
|  |         def __lt__(self, other): | ||||||
|  |             return mycmp(self.obj, other.obj) < 0 | ||||||
|  |         def __gt__(self, other): | ||||||
|  |             return mycmp(self.obj, other.obj) > 0 | ||||||
|  |         def __eq__(self, other): | ||||||
|  |             return mycmp(self.obj, other.obj) == 0 | ||||||
|  |         def __le__(self, other): | ||||||
|  |             return mycmp(self.obj, other.obj) <= 0 | ||||||
|  |         def __ge__(self, other): | ||||||
|  |             return mycmp(self.obj, other.obj) >= 0 | ||||||
|  |         def __ne__(self, other): | ||||||
|  |             return mycmp(self.obj, other.obj) != 0 | ||||||
|  |         __hash__ = None | ||||||
|  |     return K | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from _functools import cmp_to_key | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ################################################################################ | ||||||
|  | ### partial() argument application | ||||||
|  | ################################################################################ | ||||||
|  |  | ||||||
|  | # Purely functional, no descriptor behaviour | ||||||
|  | def partial(func, *args, **keywords): | ||||||
|  |     """New function with partial application of the given arguments | ||||||
|  |     and keywords. | ||||||
|  |     """ | ||||||
|  |     def newfunc(*fargs, **fkeywords): | ||||||
|  |         newkeywords = keywords.copy() | ||||||
|  |         newkeywords.update(fkeywords) | ||||||
|  |         return func(*(args + fargs), **newkeywords) | ||||||
|  |     newfunc.func = func | ||||||
|  |     newfunc.args = args | ||||||
|  |     newfunc.keywords = keywords | ||||||
|  |     return newfunc | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from _functools import partial | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  | # Descriptor version | ||||||
|  | class partialmethod(object): | ||||||
|  |     """Method descriptor with partial application of the given arguments | ||||||
|  |     and keywords. | ||||||
|  |  | ||||||
|  |     Supports wrapping existing descriptors and handles non-descriptor | ||||||
|  |     callables as instance methods. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, func, *args, **keywords): | ||||||
|  |         if not callable(func) and not hasattr(func, "__get__"): | ||||||
|  |             raise TypeError("{!r} is not callable or a descriptor" | ||||||
|  |                                  .format(func)) | ||||||
|  |  | ||||||
|  |         # func could be a descriptor like classmethod which isn't callable, | ||||||
|  |         # so we can't inherit from partial (it verifies func is callable) | ||||||
|  |         if isinstance(func, partialmethod): | ||||||
|  |             # flattening is mandatory in order to place cls/self before all | ||||||
|  |             # other arguments | ||||||
|  |             # it's also more efficient since only one function will be called | ||||||
|  |             self.func = func.func | ||||||
|  |             self.args = func.args + args | ||||||
|  |             self.keywords = func.keywords.copy() | ||||||
|  |             self.keywords.update(keywords) | ||||||
|  |         else: | ||||||
|  |             self.func = func | ||||||
|  |             self.args = args | ||||||
|  |             self.keywords = keywords | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         args = ", ".join(map(repr, self.args)) | ||||||
|  |         keywords = ", ".join("{}={!r}".format(k, v) | ||||||
|  |                                  for k, v in self.keywords.items()) | ||||||
|  |         format_string = "{module}.{cls}({func}, {args}, {keywords})" | ||||||
|  |         return format_string.format(module=self.__class__.__module__, | ||||||
|  |                                     cls=self.__class__.__name__, | ||||||
|  |                                     func=self.func, | ||||||
|  |                                     args=args, | ||||||
|  |                                     keywords=keywords) | ||||||
|  |  | ||||||
|  |     def _make_unbound_method(self): | ||||||
|  |         def _method(*args, **keywords): | ||||||
|  |             call_keywords = self.keywords.copy() | ||||||
|  |             call_keywords.update(keywords) | ||||||
|  |             cls_or_self, *rest = args | ||||||
|  |             call_args = (cls_or_self,) + self.args + tuple(rest) | ||||||
|  |             return self.func(*call_args, **call_keywords) | ||||||
|  |         _method.__isabstractmethod__ = self.__isabstractmethod__ | ||||||
|  |         _method._partialmethod = self | ||||||
|  |         return _method | ||||||
|  |  | ||||||
|  |     def __get__(self, obj, cls): | ||||||
|  |         get = getattr(self.func, "__get__", None) | ||||||
|  |         result = None | ||||||
|  |         if get is not None: | ||||||
|  |             new_func = get(obj, cls) | ||||||
|  |             if new_func is not self.func: | ||||||
|  |                 # Assume __get__ returning something new indicates the | ||||||
|  |                 # creation of an appropriate callable | ||||||
|  |                 result = partial(new_func, *self.args, **self.keywords) | ||||||
|  |                 try: | ||||||
|  |                     result.__self__ = new_func.__self__ | ||||||
|  |                 except AttributeError: | ||||||
|  |                     pass | ||||||
|  |         if result is None: | ||||||
|  |             # If the underlying descriptor didn't do anything, treat this | ||||||
|  |             # like an instance method | ||||||
|  |             result = self._make_unbound_method().__get__(obj, cls) | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def __isabstractmethod__(self): | ||||||
|  |         return getattr(self.func, "__isabstractmethod__", False) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ################################################################################ | ||||||
|  | ### LRU Cache function decorator | ||||||
|  | ################################################################################ | ||||||
|  |  | ||||||
|  | _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) | ||||||
|  |  | ||||||
|  | class _HashedSeq(list): | ||||||
|  |     """ This class guarantees that hash() will be called no more than once | ||||||
|  |         per element.  This is important because the lru_cache() will hash | ||||||
|  |         the key multiple times on a cache miss. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = 'hashvalue' | ||||||
|  |  | ||||||
|  |     def __init__(self, tup, hash=hash): | ||||||
|  |         self[:] = tup | ||||||
|  |         self.hashvalue = hash(tup) | ||||||
|  |  | ||||||
|  |     def __hash__(self): | ||||||
|  |         return self.hashvalue | ||||||
|  |  | ||||||
|  | def _make_key(args, kwds, typed, | ||||||
|  |              kwd_mark = (object(),), | ||||||
|  |              fasttypes = {int, str, frozenset, type(None)}, | ||||||
|  |              sorted=sorted, tuple=tuple, type=type, len=len): | ||||||
|  |     """Make a cache key from optionally typed positional and keyword arguments | ||||||
|  |  | ||||||
|  |     The key is constructed in a way that is flat as possible rather than | ||||||
|  |     as a nested structure that would take more memory. | ||||||
|  |  | ||||||
|  |     If there is only a single argument and its data type is known to cache | ||||||
|  |     its hash value, then that argument is returned without a wrapper.  This | ||||||
|  |     saves space and improves lookup speed. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     key = args | ||||||
|  |     if kwds: | ||||||
|  |         sorted_items = sorted(kwds.items()) | ||||||
|  |         key += kwd_mark | ||||||
|  |         for item in sorted_items: | ||||||
|  |             key += item | ||||||
|  |     if typed: | ||||||
|  |         key += tuple(type(v) for v in args) | ||||||
|  |         if kwds: | ||||||
|  |             key += tuple(type(v) for k, v in sorted_items) | ||||||
|  |     elif len(key) == 1 and type(key[0]) in fasttypes: | ||||||
|  |         return key[0] | ||||||
|  |     return _HashedSeq(key) | ||||||
|  |  | ||||||
|  | def lru_cache(maxsize=128, typed=False): | ||||||
|  |     """Least-recently-used cache decorator. | ||||||
|  |  | ||||||
|  |     If *maxsize* is set to None, the LRU features are disabled and the cache | ||||||
|  |     can grow without bound. | ||||||
|  |  | ||||||
|  |     If *typed* is True, arguments of different types will be cached separately. | ||||||
|  |     For example, f(3.0) and f(3) will be treated as distinct calls with | ||||||
|  |     distinct results. | ||||||
|  |  | ||||||
|  |     Arguments to the cached function must be hashable. | ||||||
|  |  | ||||||
|  |     View the cache statistics named tuple (hits, misses, maxsize, currsize) | ||||||
|  |     with f.cache_info().  Clear the cache and statistics with f.cache_clear(). | ||||||
|  |     Access the underlying function with f.__wrapped__. | ||||||
|  |  | ||||||
|  |     See:  http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     # Users should only access the lru_cache through its public API: | ||||||
|  |     #       cache_info, cache_clear, and f.__wrapped__ | ||||||
|  |     # The internals of the lru_cache are encapsulated for thread safety and | ||||||
|  |     # to allow the implementation to change (including a possible C version). | ||||||
|  |  | ||||||
|  |     # Early detection of an erroneous call to @lru_cache without any arguments | ||||||
|  |     # resulting in the inner function being passed to maxsize instead of an | ||||||
|  |     # integer or None. | ||||||
|  |     if maxsize is not None and not isinstance(maxsize, int): | ||||||
|  |         raise TypeError('Expected maxsize to be an integer or None') | ||||||
|  |  | ||||||
|  |     # Constants shared by all lru cache instances: | ||||||
|  |     sentinel = object()          # unique object used to signal cache misses | ||||||
|  |     make_key = _make_key         # build a key from the function arguments | ||||||
|  |     PREV, NEXT, KEY, RESULT = 0, 1, 2, 3   # names for the link fields | ||||||
|  |  | ||||||
|  |     def decorating_function(user_function): | ||||||
|  |         cache = {} | ||||||
|  |         hits = misses = 0 | ||||||
|  |         full = False | ||||||
|  |         cache_get = cache.get    # bound method to lookup a key or return None | ||||||
|  |         lock = RLock()           # because linkedlist updates aren't threadsafe | ||||||
|  |         root = []                # root of the circular doubly linked list | ||||||
|  |         root[:] = [root, root, None, None]     # initialize by pointing to self | ||||||
|  |  | ||||||
|  |         if maxsize == 0: | ||||||
|  |  | ||||||
|  |             def wrapper(*args, **kwds): | ||||||
|  |                 # No caching -- just a statistics update after a successful call | ||||||
|  |                 nonlocal misses | ||||||
|  |                 result = user_function(*args, **kwds) | ||||||
|  |                 misses += 1 | ||||||
|  |                 return result | ||||||
|  |  | ||||||
|  |         elif maxsize is None: | ||||||
|  |  | ||||||
|  |             def wrapper(*args, **kwds): | ||||||
|  |                 # Simple caching without ordering or size limit | ||||||
|  |                 nonlocal hits, misses | ||||||
|  |                 key = make_key(args, kwds, typed) | ||||||
|  |                 result = cache_get(key, sentinel) | ||||||
|  |                 if result is not sentinel: | ||||||
|  |                     hits += 1 | ||||||
|  |                     return result | ||||||
|  |                 result = user_function(*args, **kwds) | ||||||
|  |                 cache[key] = result | ||||||
|  |                 misses += 1 | ||||||
|  |                 return result | ||||||
|  |  | ||||||
|  |         else: | ||||||
|  |  | ||||||
|  |             def wrapper(*args, **kwds): | ||||||
|  |                 # Size limited caching that tracks accesses by recency | ||||||
|  |                 nonlocal root, hits, misses, full | ||||||
|  |                 key = make_key(args, kwds, typed) | ||||||
|  |                 with lock: | ||||||
|  |                     link = cache_get(key) | ||||||
|  |                     if link is not None: | ||||||
|  |                         # Move the link to the front of the circular queue | ||||||
|  |                         link_prev, link_next, _key, result = link | ||||||
|  |                         link_prev[NEXT] = link_next | ||||||
|  |                         link_next[PREV] = link_prev | ||||||
|  |                         last = root[PREV] | ||||||
|  |                         last[NEXT] = root[PREV] = link | ||||||
|  |                         link[PREV] = last | ||||||
|  |                         link[NEXT] = root | ||||||
|  |                         hits += 1 | ||||||
|  |                         return result | ||||||
|  |                 result = user_function(*args, **kwds) | ||||||
|  |                 with lock: | ||||||
|  |                     if key in cache: | ||||||
|  |                         # Getting here means that this same key was added to the | ||||||
|  |                         # cache while the lock was released.  Since the link | ||||||
|  |                         # update is already done, we need only return the | ||||||
|  |                         # computed result and update the count of misses. | ||||||
|  |                         pass | ||||||
|  |                     elif full: | ||||||
|  |                         # Use the old root to store the new key and result. | ||||||
|  |                         oldroot = root | ||||||
|  |                         oldroot[KEY] = key | ||||||
|  |                         oldroot[RESULT] = result | ||||||
|  |                         # Empty the oldest link and make it the new root. | ||||||
|  |                         # Keep a reference to the old key and old result to | ||||||
|  |                         # prevent their ref counts from going to zero during the | ||||||
|  |                         # update. That will prevent potentially arbitrary object | ||||||
|  |                         # clean-up code (i.e. __del__) from running while we're | ||||||
|  |                         # still adjusting the links. | ||||||
|  |                         root = oldroot[NEXT] | ||||||
|  |                         oldkey = root[KEY] | ||||||
|  |                         oldresult = root[RESULT] | ||||||
|  |                         root[KEY] = root[RESULT] = None | ||||||
|  |                         # Now update the cache dictionary. | ||||||
|  |                         del cache[oldkey] | ||||||
|  |                         # Save the potentially reentrant cache[key] assignment | ||||||
|  |                         # for last, after the root and links have been put in | ||||||
|  |                         # a consistent state. | ||||||
|  |                         cache[key] = oldroot | ||||||
|  |                     else: | ||||||
|  |                         # Put result in a new link at the front of the queue. | ||||||
|  |                         last = root[PREV] | ||||||
|  |                         link = [last, root, key, result] | ||||||
|  |                         last[NEXT] = root[PREV] = cache[key] = link | ||||||
|  |                         full = (len(cache) >= maxsize) | ||||||
|  |                     misses += 1 | ||||||
|  |                 return result | ||||||
|  |  | ||||||
|  |         def cache_info(): | ||||||
|  |             """Report cache statistics""" | ||||||
|  |             with lock: | ||||||
|  |                 return _CacheInfo(hits, misses, maxsize, len(cache)) | ||||||
|  |  | ||||||
|  |         def cache_clear(): | ||||||
|  |             """Clear the cache and cache statistics""" | ||||||
|  |             nonlocal hits, misses, full | ||||||
|  |             with lock: | ||||||
|  |                 cache.clear() | ||||||
|  |                 root[:] = [root, root, None, None] | ||||||
|  |                 hits = misses = 0 | ||||||
|  |                 full = False | ||||||
|  |  | ||||||
|  |         wrapper.cache_info = cache_info | ||||||
|  |         wrapper.cache_clear = cache_clear | ||||||
|  |         return update_wrapper(wrapper, user_function) | ||||||
|  |  | ||||||
|  |     return decorating_function | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ################################################################################ | ||||||
|  | ### singledispatch() - single-dispatch generic function decorator | ||||||
|  | ################################################################################ | ||||||
|  |  | ||||||
|  | def _c3_merge(sequences): | ||||||
|  |     """Merges MROs in *sequences* to a single MRO using the C3 algorithm. | ||||||
|  |  | ||||||
|  |     Adapted from http://www.python.org/download/releases/2.3/mro/. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     result = [] | ||||||
|  |     while True: | ||||||
|  |         sequences = [s for s in sequences if s]   # purge empty sequences | ||||||
|  |         if not sequences: | ||||||
|  |             return result | ||||||
|  |         for s1 in sequences:   # find merge candidates among seq heads | ||||||
|  |             candidate = s1[0] | ||||||
|  |             for s2 in sequences: | ||||||
|  |                 if candidate in s2[1:]: | ||||||
|  |                     candidate = None | ||||||
|  |                     break      # reject the current head, it appears later | ||||||
|  |             else: | ||||||
|  |                 break | ||||||
|  |         if candidate is None: | ||||||
|  |             raise RuntimeError("Inconsistent hierarchy") | ||||||
|  |         result.append(candidate) | ||||||
|  |         # remove the chosen candidate | ||||||
|  |         for seq in sequences: | ||||||
|  |             if seq[0] == candidate: | ||||||
|  |                 del seq[0] | ||||||
|  |  | ||||||
|  | def _c3_mro(cls, abcs=None): | ||||||
|  |     """Computes the method resolution order using extended C3 linearization. | ||||||
|  |  | ||||||
|  |     If no *abcs* are given, the algorithm works exactly like the built-in C3 | ||||||
|  |     linearization used for method resolution. | ||||||
|  |  | ||||||
|  |     If given, *abcs* is a list of abstract base classes that should be inserted | ||||||
|  |     into the resulting MRO. Unrelated ABCs are ignored and don't end up in the | ||||||
|  |     result. The algorithm inserts ABCs where their functionality is introduced, | ||||||
|  |     i.e. issubclass(cls, abc) returns True for the class itself but returns | ||||||
|  |     False for all its direct base classes. Implicit ABCs for a given class | ||||||
|  |     (either registered or inferred from the presence of a special method like | ||||||
|  |     __len__) are inserted directly after the last ABC explicitly listed in the | ||||||
|  |     MRO of said class. If two implicit ABCs end up next to each other in the | ||||||
|  |     resulting MRO, their ordering depends on the order of types in *abcs*. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     for i, base in enumerate(reversed(cls.__bases__)): | ||||||
|  |         if hasattr(base, '__abstractmethods__'): | ||||||
|  |             boundary = len(cls.__bases__) - i | ||||||
|  |             break   # Bases up to the last explicit ABC are considered first. | ||||||
|  |     else: | ||||||
|  |         boundary = 0 | ||||||
|  |     abcs = list(abcs) if abcs else [] | ||||||
|  |     explicit_bases = list(cls.__bases__[:boundary]) | ||||||
|  |     abstract_bases = [] | ||||||
|  |     other_bases = list(cls.__bases__[boundary:]) | ||||||
|  |     for base in abcs: | ||||||
|  |         if issubclass(cls, base) and not any( | ||||||
|  |                 issubclass(b, base) for b in cls.__bases__ | ||||||
|  |             ): | ||||||
|  |             # If *cls* is the class that introduces behaviour described by | ||||||
|  |             # an ABC *base*, insert said ABC to its MRO. | ||||||
|  |             abstract_bases.append(base) | ||||||
|  |     for base in abstract_bases: | ||||||
|  |         abcs.remove(base) | ||||||
|  |     explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] | ||||||
|  |     abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] | ||||||
|  |     other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] | ||||||
|  |     return _c3_merge( | ||||||
|  |         [[cls]] + | ||||||
|  |         explicit_c3_mros + abstract_c3_mros + other_c3_mros + | ||||||
|  |         [explicit_bases] + [abstract_bases] + [other_bases] | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  | def _compose_mro(cls, types): | ||||||
|  |     """Calculates the method resolution order for a given class *cls*. | ||||||
|  |  | ||||||
|  |     Includes relevant abstract base classes (with their respective bases) from | ||||||
|  |     the *types* iterable. Uses a modified C3 linearization algorithm. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     bases = set(cls.__mro__) | ||||||
|  |     # Remove entries which are already present in the __mro__ or unrelated. | ||||||
|  |     def is_related(typ): | ||||||
|  |         return (typ not in bases and hasattr(typ, '__mro__') | ||||||
|  |                                  and issubclass(cls, typ)) | ||||||
|  |     types = [n for n in types if is_related(n)] | ||||||
|  |     # Remove entries which are strict bases of other entries (they will end up | ||||||
|  |     # in the MRO anyway. | ||||||
|  |     def is_strict_base(typ): | ||||||
|  |         for other in types: | ||||||
|  |             if typ != other and typ in other.__mro__: | ||||||
|  |                 return True | ||||||
|  |         return False | ||||||
|  |     types = [n for n in types if not is_strict_base(n)] | ||||||
|  |     # Subclasses of the ABCs in *types* which are also implemented by | ||||||
|  |     # *cls* can be used to stabilize ABC ordering. | ||||||
|  |     type_set = set(types) | ||||||
|  |     mro = [] | ||||||
|  |     for typ in types: | ||||||
|  |         found = [] | ||||||
|  |         for sub in typ.__subclasses__(): | ||||||
|  |             if sub not in bases and issubclass(cls, sub): | ||||||
|  |                 found.append([s for s in sub.__mro__ if s in type_set]) | ||||||
|  |         if not found: | ||||||
|  |             mro.append(typ) | ||||||
|  |             continue | ||||||
|  |         # Favor subclasses with the biggest number of useful bases | ||||||
|  |         found.sort(key=len, reverse=True) | ||||||
|  |         for sub in found: | ||||||
|  |             for subcls in sub: | ||||||
|  |                 if subcls not in mro: | ||||||
|  |                     mro.append(subcls) | ||||||
|  |     return _c3_mro(cls, abcs=mro) | ||||||
|  |  | ||||||
|  | def _find_impl(cls, registry): | ||||||
|  |     """Returns the best matching implementation from *registry* for type *cls*. | ||||||
|  |  | ||||||
|  |     Where there is no registered implementation for a specific type, its method | ||||||
|  |     resolution order is used to find a more generic implementation. | ||||||
|  |  | ||||||
|  |     Note: if *registry* does not contain an implementation for the base | ||||||
|  |     *object* type, this function may return None. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     mro = _compose_mro(cls, registry.keys()) | ||||||
|  |     match = None | ||||||
|  |     for t in mro: | ||||||
|  |         if match is not None: | ||||||
|  |             # If *match* is an implicit ABC but there is another unrelated, | ||||||
|  |             # equally matching implicit ABC, refuse the temptation to guess. | ||||||
|  |             if (t in registry and t not in cls.__mro__ | ||||||
|  |                               and match not in cls.__mro__ | ||||||
|  |                               and not issubclass(match, t)): | ||||||
|  |                 raise RuntimeError("Ambiguous dispatch: {} or {}".format( | ||||||
|  |                     match, t)) | ||||||
|  |             break | ||||||
|  |         if t in registry: | ||||||
|  |             match = t | ||||||
|  |     return registry.get(match) | ||||||
|  |  | ||||||
|  | def singledispatch(func): | ||||||
|  |     """Single-dispatch generic function decorator. | ||||||
|  |  | ||||||
|  |     Transforms a function into a generic function, which can have different | ||||||
|  |     behaviours depending upon the type of its first argument. The decorated | ||||||
|  |     function acts as the default implementation, and additional | ||||||
|  |     implementations can be registered using the register() attribute of the | ||||||
|  |     generic function. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     registry = {} | ||||||
|  |     dispatch_cache = WeakKeyDictionary() | ||||||
|  |     cache_token = None | ||||||
|  |  | ||||||
|  |     def dispatch(cls): | ||||||
|  |         """generic_func.dispatch(cls) -> <function implementation> | ||||||
|  |  | ||||||
|  |         Runs the dispatch algorithm to return the best available implementation | ||||||
|  |         for the given *cls* registered on *generic_func*. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         nonlocal cache_token | ||||||
|  |         if cache_token is not None: | ||||||
|  |             current_token = get_cache_token() | ||||||
|  |             if cache_token != current_token: | ||||||
|  |                 dispatch_cache.clear() | ||||||
|  |                 cache_token = current_token | ||||||
|  |         try: | ||||||
|  |             impl = dispatch_cache[cls] | ||||||
|  |         except KeyError: | ||||||
|  |             try: | ||||||
|  |                 impl = registry[cls] | ||||||
|  |             except KeyError: | ||||||
|  |                 impl = _find_impl(cls, registry) | ||||||
|  |             dispatch_cache[cls] = impl | ||||||
|  |         return impl | ||||||
|  |  | ||||||
|  |     def register(cls, func=None): | ||||||
|  |         """generic_func.register(cls, func) -> func | ||||||
|  |  | ||||||
|  |         Registers a new implementation for the given *cls* on a *generic_func*. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         nonlocal cache_token | ||||||
|  |         if func is None: | ||||||
|  |             return lambda f: register(cls, f) | ||||||
|  |         registry[cls] = func | ||||||
|  |         if cache_token is None and hasattr(cls, '__abstractmethods__'): | ||||||
|  |             cache_token = get_cache_token() | ||||||
|  |         dispatch_cache.clear() | ||||||
|  |         return func | ||||||
|  |  | ||||||
|  |     def wrapper(*args, **kw): | ||||||
|  |         return dispatch(args[0].__class__)(*args, **kw) | ||||||
|  |  | ||||||
|  |     registry[object] = func | ||||||
|  |     wrapper.register = register | ||||||
|  |     wrapper.dispatch = dispatch | ||||||
|  |     wrapper.registry = MappingProxyType(registry) | ||||||
|  |     wrapper._clear_cache = dispatch_cache.clear | ||||||
|  |     update_wrapper(wrapper, func) | ||||||
|  |     return wrapper | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/genericpath.py |  | ||||||
							
								
								
									
										132
									
								
								v1/flask/lib/python3.4/genericpath.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										132
									
								
								v1/flask/lib/python3.4/genericpath.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,132 @@ | |||||||
|  | """ | ||||||
|  | Path operations common to more than one OS | ||||||
|  | Do not use directly.  The OS specific modules import the appropriate | ||||||
|  | functions from this module themselves. | ||||||
|  | """ | ||||||
|  | import os | ||||||
|  | import stat | ||||||
|  |  | ||||||
|  | __all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', | ||||||
|  |            'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', | ||||||
|  |            'samestat'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Does a path exist? | ||||||
|  | # This is false for dangling symbolic links on systems that support them. | ||||||
|  | def exists(path): | ||||||
|  |     """Test whether a path exists.  Returns False for broken symbolic links""" | ||||||
|  |     try: | ||||||
|  |         os.stat(path) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |     return True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # This follows symbolic links, so both islink() and isdir() can be true | ||||||
|  | # for the same path on systems that support symlinks | ||||||
|  | def isfile(path): | ||||||
|  |     """Test whether a path is a regular file""" | ||||||
|  |     try: | ||||||
|  |         st = os.stat(path) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |     return stat.S_ISREG(st.st_mode) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Is a path a directory? | ||||||
|  | # This follows symbolic links, so both islink() and isdir() | ||||||
|  | # can be true for the same path on systems that support symlinks | ||||||
|  | def isdir(s): | ||||||
|  |     """Return true if the pathname refers to an existing directory.""" | ||||||
|  |     try: | ||||||
|  |         st = os.stat(s) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |     return stat.S_ISDIR(st.st_mode) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def getsize(filename): | ||||||
|  |     """Return the size of a file, reported by os.stat().""" | ||||||
|  |     return os.stat(filename).st_size | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def getmtime(filename): | ||||||
|  |     """Return the last modification time of a file, reported by os.stat().""" | ||||||
|  |     return os.stat(filename).st_mtime | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def getatime(filename): | ||||||
|  |     """Return the last access time of a file, reported by os.stat().""" | ||||||
|  |     return os.stat(filename).st_atime | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def getctime(filename): | ||||||
|  |     """Return the metadata change time of a file, reported by os.stat().""" | ||||||
|  |     return os.stat(filename).st_ctime | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return the longest prefix of all list elements. | ||||||
|  | def commonprefix(m): | ||||||
|  |     "Given a list of pathnames, returns the longest common leading component" | ||||||
|  |     if not m: return '' | ||||||
|  |     s1 = min(m) | ||||||
|  |     s2 = max(m) | ||||||
|  |     for i, c in enumerate(s1): | ||||||
|  |         if c != s2[i]: | ||||||
|  |             return s1[:i] | ||||||
|  |     return s1 | ||||||
|  |  | ||||||
|  | # Are two stat buffers (obtained from stat, fstat or lstat) | ||||||
|  | # describing the same file? | ||||||
|  | def samestat(s1, s2): | ||||||
|  |     """Test whether two stat buffers reference the same file""" | ||||||
|  |     return (s1.st_ino == s2.st_ino and | ||||||
|  |             s1.st_dev == s2.st_dev) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Are two filenames really pointing to the same file? | ||||||
|  | def samefile(f1, f2): | ||||||
|  |     """Test whether two pathnames reference the same actual file""" | ||||||
|  |     s1 = os.stat(f1) | ||||||
|  |     s2 = os.stat(f2) | ||||||
|  |     return samestat(s1, s2) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Are two open files really referencing the same file? | ||||||
|  | # (Not necessarily the same file descriptor!) | ||||||
|  | def sameopenfile(fp1, fp2): | ||||||
|  |     """Test whether two open file objects reference the same file""" | ||||||
|  |     s1 = os.fstat(fp1) | ||||||
|  |     s2 = os.fstat(fp2) | ||||||
|  |     return samestat(s1, s2) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Split a path in root and extension. | ||||||
|  | # The extension is everything starting at the last dot in the last | ||||||
|  | # pathname component; the root is everything before that. | ||||||
|  | # It is always true that root + ext == p. | ||||||
|  |  | ||||||
|  | # Generic implementation of splitext, to be parametrized with | ||||||
|  | # the separators | ||||||
|  | def _splitext(p, sep, altsep, extsep): | ||||||
|  |     """Split the extension from a pathname. | ||||||
|  |  | ||||||
|  |     Extension is everything from the last dot to the end, ignoring | ||||||
|  |     leading dots.  Returns "(root, ext)"; ext may be empty.""" | ||||||
|  |     # NOTE: This code must work for text and bytes strings. | ||||||
|  |  | ||||||
|  |     sepIndex = p.rfind(sep) | ||||||
|  |     if altsep: | ||||||
|  |         altsepIndex = p.rfind(altsep) | ||||||
|  |         sepIndex = max(sepIndex, altsepIndex) | ||||||
|  |  | ||||||
|  |     dotIndex = p.rfind(extsep) | ||||||
|  |     if dotIndex > sepIndex: | ||||||
|  |         # skip all leading dots | ||||||
|  |         filenameIndex = sepIndex + 1 | ||||||
|  |         while filenameIndex < dotIndex: | ||||||
|  |             if p[filenameIndex:filenameIndex+1] != extsep: | ||||||
|  |                 return p[:dotIndex], p[dotIndex:] | ||||||
|  |             filenameIndex += 1 | ||||||
|  |  | ||||||
|  |     return p, p[:0] | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/hashlib.py |  | ||||||
							
								
								
									
										217
									
								
								v1/flask/lib/python3.4/hashlib.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										217
									
								
								v1/flask/lib/python3.4/hashlib.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,217 @@ | |||||||
|  | #.  Copyright (C) 2005-2010   Gregory P. Smith (greg@krypto.org) | ||||||
|  | #  Licensed to PSF under a Contributor Agreement. | ||||||
|  | # | ||||||
|  |  | ||||||
|  | __doc__ = """hashlib module - A common interface to many hash functions. | ||||||
|  |  | ||||||
|  | new(name, data=b'') - returns a new hash object implementing the | ||||||
|  |                       given hash function; initializing the hash | ||||||
|  |                       using the given binary data. | ||||||
|  |  | ||||||
|  | Named constructor functions are also available, these are faster | ||||||
|  | than using new(name): | ||||||
|  |  | ||||||
|  | md5(), sha1(), sha224(), sha256(), sha384(), and sha512() | ||||||
|  |  | ||||||
|  | More algorithms may be available on your platform but the above are guaranteed | ||||||
|  | to exist.  See the algorithms_guaranteed and algorithms_available attributes | ||||||
|  | to find out what algorithm names can be passed to new(). | ||||||
|  |  | ||||||
|  | NOTE: If you want the adler32 or crc32 hash functions they are available in | ||||||
|  | the zlib module. | ||||||
|  |  | ||||||
|  | Choose your hash function wisely.  Some have known collision weaknesses. | ||||||
|  | sha384 and sha512 will be slow on 32 bit platforms. | ||||||
|  |  | ||||||
|  | Hash objects have these methods: | ||||||
|  |  - update(arg): Update the hash object with the bytes in arg. Repeated calls | ||||||
|  |                 are equivalent to a single call with the concatenation of all | ||||||
|  |                 the arguments. | ||||||
|  |  - digest():    Return the digest of the bytes passed to the update() method | ||||||
|  |                 so far. | ||||||
|  |  - hexdigest(): Like digest() except the digest is returned as a unicode | ||||||
|  |                 object of double length, containing only hexadecimal digits. | ||||||
|  |  - copy():      Return a copy (clone) of the hash object. This can be used to | ||||||
|  |                 efficiently compute the digests of strings that share a common | ||||||
|  |                 initial substring. | ||||||
|  |  | ||||||
|  | For example, to obtain the digest of the string 'Nobody inspects the | ||||||
|  | spammish repetition': | ||||||
|  |  | ||||||
|  |     >>> import hashlib | ||||||
|  |     >>> m = hashlib.md5() | ||||||
|  |     >>> m.update(b"Nobody inspects") | ||||||
|  |     >>> m.update(b" the spammish repetition") | ||||||
|  |     >>> m.digest() | ||||||
|  |     b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' | ||||||
|  |  | ||||||
|  | More condensed: | ||||||
|  |  | ||||||
|  |     >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() | ||||||
|  |     'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | # This tuple and __get_builtin_constructor() must be modified if a new | ||||||
|  | # always available algorithm is added. | ||||||
|  | __always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') | ||||||
|  |  | ||||||
|  | algorithms_guaranteed = set(__always_supported) | ||||||
|  | algorithms_available = set(__always_supported) | ||||||
|  |  | ||||||
|  | __all__ = __always_supported + ('new', 'algorithms_guaranteed', | ||||||
|  |                                 'algorithms_available', 'pbkdf2_hmac') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __builtin_constructor_cache = {} | ||||||
|  |  | ||||||
|  | def __get_builtin_constructor(name): | ||||||
|  |     cache = __builtin_constructor_cache | ||||||
|  |     constructor = cache.get(name) | ||||||
|  |     if constructor is not None: | ||||||
|  |         return constructor | ||||||
|  |     try: | ||||||
|  |         if name in ('SHA1', 'sha1'): | ||||||
|  |             import _sha1 | ||||||
|  |             cache['SHA1'] = cache['sha1'] = _sha1.sha1 | ||||||
|  |         elif name in ('MD5', 'md5'): | ||||||
|  |             import _md5 | ||||||
|  |             cache['MD5'] = cache['md5'] = _md5.md5 | ||||||
|  |         elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): | ||||||
|  |             import _sha256 | ||||||
|  |             cache['SHA224'] = cache['sha224'] = _sha256.sha224 | ||||||
|  |             cache['SHA256'] = cache['sha256'] = _sha256.sha256 | ||||||
|  |         elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): | ||||||
|  |             import _sha512 | ||||||
|  |             cache['SHA384'] = cache['sha384'] = _sha512.sha384 | ||||||
|  |             cache['SHA512'] = cache['sha512'] = _sha512.sha512 | ||||||
|  |     except ImportError: | ||||||
|  |         pass  # no extension module, this hash is unsupported. | ||||||
|  |  | ||||||
|  |     constructor = cache.get(name) | ||||||
|  |     if constructor is not None: | ||||||
|  |         return constructor | ||||||
|  |  | ||||||
|  |     raise ValueError('unsupported hash type ' + name) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def __get_openssl_constructor(name): | ||||||
|  |     try: | ||||||
|  |         f = getattr(_hashlib, 'openssl_' + name) | ||||||
|  |         # Allow the C module to raise ValueError.  The function will be | ||||||
|  |         # defined but the hash not actually available thanks to OpenSSL. | ||||||
|  |         f() | ||||||
|  |         # Use the C function directly (very fast) | ||||||
|  |         return f | ||||||
|  |     except (AttributeError, ValueError): | ||||||
|  |         return __get_builtin_constructor(name) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def __py_new(name, data=b''): | ||||||
|  |     """new(name, data=b'') - Return a new hashing object using the named algorithm; | ||||||
|  |     optionally initialized with data (which must be bytes). | ||||||
|  |     """ | ||||||
|  |     return __get_builtin_constructor(name)(data) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def __hash_new(name, data=b''): | ||||||
|  |     """new(name, data=b'') - Return a new hashing object using the named algorithm; | ||||||
|  |     optionally initialized with data (which must be bytes). | ||||||
|  |     """ | ||||||
|  |     try: | ||||||
|  |         return _hashlib.new(name, data) | ||||||
|  |     except ValueError: | ||||||
|  |         # If the _hashlib module (OpenSSL) doesn't support the named | ||||||
|  |         # hash, try using our builtin implementations. | ||||||
|  |         # This allows for SHA224/256 and SHA384/512 support even though | ||||||
|  |         # the OpenSSL library prior to 0.9.8 doesn't provide them. | ||||||
|  |         return __get_builtin_constructor(name)(data) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     import _hashlib | ||||||
|  |     new = __hash_new | ||||||
|  |     __get_hash = __get_openssl_constructor | ||||||
|  |     algorithms_available = algorithms_available.union( | ||||||
|  |             _hashlib.openssl_md_meth_names) | ||||||
|  | except ImportError: | ||||||
|  |     new = __py_new | ||||||
|  |     __get_hash = __get_builtin_constructor | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA | ||||||
|  |     from _hashlib import pbkdf2_hmac | ||||||
|  | except ImportError: | ||||||
|  |     _trans_5C = bytes((x ^ 0x5C) for x in range(256)) | ||||||
|  |     _trans_36 = bytes((x ^ 0x36) for x in range(256)) | ||||||
|  |  | ||||||
|  |     def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): | ||||||
|  |         """Password based key derivation function 2 (PKCS #5 v2.0) | ||||||
|  |  | ||||||
|  |         This Python implementations based on the hmac module about as fast | ||||||
|  |         as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster | ||||||
|  |         for long passwords. | ||||||
|  |         """ | ||||||
|  |         if not isinstance(hash_name, str): | ||||||
|  |             raise TypeError(hash_name) | ||||||
|  |  | ||||||
|  |         if not isinstance(password, (bytes, bytearray)): | ||||||
|  |             password = bytes(memoryview(password)) | ||||||
|  |         if not isinstance(salt, (bytes, bytearray)): | ||||||
|  |             salt = bytes(memoryview(salt)) | ||||||
|  |  | ||||||
|  |         # Fast inline HMAC implementation | ||||||
|  |         inner = new(hash_name) | ||||||
|  |         outer = new(hash_name) | ||||||
|  |         blocksize = getattr(inner, 'block_size', 64) | ||||||
|  |         if len(password) > blocksize: | ||||||
|  |             password = new(hash_name, password).digest() | ||||||
|  |         password = password + b'\x00' * (blocksize - len(password)) | ||||||
|  |         inner.update(password.translate(_trans_36)) | ||||||
|  |         outer.update(password.translate(_trans_5C)) | ||||||
|  |  | ||||||
|  |         def prf(msg, inner=inner, outer=outer): | ||||||
|  |             # PBKDF2_HMAC uses the password as key. We can re-use the same | ||||||
|  |             # digest objects and just update copies to skip initialization. | ||||||
|  |             icpy = inner.copy() | ||||||
|  |             ocpy = outer.copy() | ||||||
|  |             icpy.update(msg) | ||||||
|  |             ocpy.update(icpy.digest()) | ||||||
|  |             return ocpy.digest() | ||||||
|  |  | ||||||
|  |         if iterations < 1: | ||||||
|  |             raise ValueError(iterations) | ||||||
|  |         if dklen is None: | ||||||
|  |             dklen = outer.digest_size | ||||||
|  |         if dklen < 1: | ||||||
|  |             raise ValueError(dklen) | ||||||
|  |  | ||||||
|  |         dkey = b'' | ||||||
|  |         loop = 1 | ||||||
|  |         from_bytes = int.from_bytes | ||||||
|  |         while len(dkey) < dklen: | ||||||
|  |             prev = prf(salt + loop.to_bytes(4, 'big')) | ||||||
|  |             # endianess doesn't matter here as long to / from use the same | ||||||
|  |             rkey = int.from_bytes(prev, 'big') | ||||||
|  |             for i in range(iterations - 1): | ||||||
|  |                 prev = prf(prev) | ||||||
|  |                 # rkey = rkey ^ prev | ||||||
|  |                 rkey ^= from_bytes(prev, 'big') | ||||||
|  |             loop += 1 | ||||||
|  |             dkey += rkey.to_bytes(inner.digest_size, 'big') | ||||||
|  |  | ||||||
|  |         return dkey[:dklen] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | for __func_name in __always_supported: | ||||||
|  |     # try them all, some may not work due to the OpenSSL | ||||||
|  |     # version not supporting that algorithm. | ||||||
|  |     try: | ||||||
|  |         globals()[__func_name] = __get_hash(__func_name) | ||||||
|  |     except ValueError: | ||||||
|  |         import logging | ||||||
|  |         logging.exception('code for hash %s was not found.', __func_name) | ||||||
|  |  | ||||||
|  | # Cleanup locals() | ||||||
|  | del __always_supported, __func_name, __get_hash | ||||||
|  | del __py_new, __hash_new, __get_openssl_constructor | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/heapq.py |  | ||||||
							
								
								
									
										476
									
								
								v1/flask/lib/python3.4/heapq.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										476
									
								
								v1/flask/lib/python3.4/heapq.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,476 @@ | |||||||
|  | """Heap queue algorithm (a.k.a. priority queue). | ||||||
|  |  | ||||||
|  | Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for | ||||||
|  | all k, counting elements from 0.  For the sake of comparison, | ||||||
|  | non-existing elements are considered to be infinite.  The interesting | ||||||
|  | property of a heap is that a[0] is always its smallest element. | ||||||
|  |  | ||||||
|  | Usage: | ||||||
|  |  | ||||||
|  | heap = []            # creates an empty heap | ||||||
|  | heappush(heap, item) # pushes a new item on the heap | ||||||
|  | item = heappop(heap) # pops the smallest item from the heap | ||||||
|  | item = heap[0]       # smallest item on the heap without popping it | ||||||
|  | heapify(x)           # transforms list into a heap, in-place, in linear time | ||||||
|  | item = heapreplace(heap, item) # pops and returns smallest item, and adds | ||||||
|  |                                # new item; the heap size is unchanged | ||||||
|  |  | ||||||
|  | Our API differs from textbook heap algorithms as follows: | ||||||
|  |  | ||||||
|  | - We use 0-based indexing.  This makes the relationship between the | ||||||
|  |   index for a node and the indexes for its children slightly less | ||||||
|  |   obvious, but is more suitable since Python uses 0-based indexing. | ||||||
|  |  | ||||||
|  | - Our heappop() method returns the smallest item, not the largest. | ||||||
|  |  | ||||||
|  | These two make it possible to view the heap as a regular Python list | ||||||
|  | without surprises: heap[0] is the smallest item, and heap.sort() | ||||||
|  | maintains the heap invariant! | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | # Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger | ||||||
|  |  | ||||||
|  | __about__ = """Heap queues | ||||||
|  |  | ||||||
|  | [explanation by François Pinard] | ||||||
|  |  | ||||||
|  | Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for | ||||||
|  | all k, counting elements from 0.  For the sake of comparison, | ||||||
|  | non-existing elements are considered to be infinite.  The interesting | ||||||
|  | property of a heap is that a[0] is always its smallest element. | ||||||
|  |  | ||||||
|  | The strange invariant above is meant to be an efficient memory | ||||||
|  | representation for a tournament.  The numbers below are `k', not a[k]: | ||||||
|  |  | ||||||
|  |                                    0 | ||||||
|  |  | ||||||
|  |                   1                                 2 | ||||||
|  |  | ||||||
|  |           3               4                5               6 | ||||||
|  |  | ||||||
|  |       7       8       9       10      11      12      13      14 | ||||||
|  |  | ||||||
|  |     15 16   17 18   19 20   21 22   23 24   25 26   27 28   29 30 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'.  In | ||||||
|  | an usual binary tournament we see in sports, each cell is the winner | ||||||
|  | over the two cells it tops, and we can trace the winner down the tree | ||||||
|  | to see all opponents s/he had.  However, in many computer applications | ||||||
|  | of such tournaments, we do not need to trace the history of a winner. | ||||||
|  | To be more memory efficient, when a winner is promoted, we try to | ||||||
|  | replace it by something else at a lower level, and the rule becomes | ||||||
|  | that a cell and the two cells it tops contain three different items, | ||||||
|  | but the top cell "wins" over the two topped cells. | ||||||
|  |  | ||||||
|  | If this heap invariant is protected at all time, index 0 is clearly | ||||||
|  | the overall winner.  The simplest algorithmic way to remove it and | ||||||
|  | find the "next" winner is to move some loser (let's say cell 30 in the | ||||||
|  | diagram above) into the 0 position, and then percolate this new 0 down | ||||||
|  | the tree, exchanging values, until the invariant is re-established. | ||||||
|  | This is clearly logarithmic on the total number of items in the tree. | ||||||
|  | By iterating over all items, you get an O(n ln n) sort. | ||||||
|  |  | ||||||
|  | A nice feature of this sort is that you can efficiently insert new | ||||||
|  | items while the sort is going on, provided that the inserted items are | ||||||
|  | not "better" than the last 0'th element you extracted.  This is | ||||||
|  | especially useful in simulation contexts, where the tree holds all | ||||||
|  | incoming events, and the "win" condition means the smallest scheduled | ||||||
|  | time.  When an event schedule other events for execution, they are | ||||||
|  | scheduled into the future, so they can easily go into the heap.  So, a | ||||||
|  | heap is a good structure for implementing schedulers (this is what I | ||||||
|  | used for my MIDI sequencer :-). | ||||||
|  |  | ||||||
|  | Various structures for implementing schedulers have been extensively | ||||||
|  | studied, and heaps are good for this, as they are reasonably speedy, | ||||||
|  | the speed is almost constant, and the worst case is not much different | ||||||
|  | than the average case.  However, there are other representations which | ||||||
|  | are more efficient overall, yet the worst cases might be terrible. | ||||||
|  |  | ||||||
|  | Heaps are also very useful in big disk sorts.  You most probably all | ||||||
|  | know that a big sort implies producing "runs" (which are pre-sorted | ||||||
|  | sequences, which size is usually related to the amount of CPU memory), | ||||||
|  | followed by a merging passes for these runs, which merging is often | ||||||
|  | very cleverly organised[1].  It is very important that the initial | ||||||
|  | sort produces the longest runs possible.  Tournaments are a good way | ||||||
|  | to that.  If, using all the memory available to hold a tournament, you | ||||||
|  | replace and percolate items that happen to fit the current run, you'll | ||||||
|  | produce runs which are twice the size of the memory for random input, | ||||||
|  | and much better for input fuzzily ordered. | ||||||
|  |  | ||||||
|  | Moreover, if you output the 0'th item on disk and get an input which | ||||||
|  | may not fit in the current tournament (because the value "wins" over | ||||||
|  | the last output value), it cannot fit in the heap, so the size of the | ||||||
|  | heap decreases.  The freed memory could be cleverly reused immediately | ||||||
|  | for progressively building a second heap, which grows at exactly the | ||||||
|  | same rate the first heap is melting.  When the first heap completely | ||||||
|  | vanishes, you switch heaps and start a new run.  Clever and quite | ||||||
|  | effective! | ||||||
|  |  | ||||||
|  | In a word, heaps are useful memory structures to know.  I use them in | ||||||
|  | a few applications, and I think it is good to keep a `heap' module | ||||||
|  | around. :-) | ||||||
|  |  | ||||||
|  | -------------------- | ||||||
|  | [1] The disk balancing algorithms which are current, nowadays, are | ||||||
|  | more annoying than clever, and this is a consequence of the seeking | ||||||
|  | capabilities of the disks.  On devices which cannot seek, like big | ||||||
|  | tape drives, the story was quite different, and one had to be very | ||||||
|  | clever to ensure (far in advance) that each tape movement will be the | ||||||
|  | most effective possible (that is, will best participate at | ||||||
|  | "progressing" the merge).  Some tapes were even able to read | ||||||
|  | backwards, and this was also used to avoid the rewinding time. | ||||||
|  | Believe me, real good tape sorts were quite spectacular to watch! | ||||||
|  | From all times, sorting has always been a Great Art! :-) | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | __all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', | ||||||
|  |            'nlargest', 'nsmallest', 'heappushpop'] | ||||||
|  |  | ||||||
|  | from itertools import islice, count, tee, chain | ||||||
|  |  | ||||||
|  | def heappush(heap, item): | ||||||
|  |     """Push item onto heap, maintaining the heap invariant.""" | ||||||
|  |     heap.append(item) | ||||||
|  |     _siftdown(heap, 0, len(heap)-1) | ||||||
|  |  | ||||||
|  | def heappop(heap): | ||||||
|  |     """Pop the smallest item off the heap, maintaining the heap invariant.""" | ||||||
|  |     lastelt = heap.pop()    # raises appropriate IndexError if heap is empty | ||||||
|  |     if heap: | ||||||
|  |         returnitem = heap[0] | ||||||
|  |         heap[0] = lastelt | ||||||
|  |         _siftup(heap, 0) | ||||||
|  |     else: | ||||||
|  |         returnitem = lastelt | ||||||
|  |     return returnitem | ||||||
|  |  | ||||||
|  | def heapreplace(heap, item): | ||||||
|  |     """Pop and return the current smallest value, and add the new item. | ||||||
|  |  | ||||||
|  |     This is more efficient than heappop() followed by heappush(), and can be | ||||||
|  |     more appropriate when using a fixed-size heap.  Note that the value | ||||||
|  |     returned may be larger than item!  That constrains reasonable uses of | ||||||
|  |     this routine unless written as part of a conditional replacement: | ||||||
|  |  | ||||||
|  |         if item > heap[0]: | ||||||
|  |             item = heapreplace(heap, item) | ||||||
|  |     """ | ||||||
|  |     returnitem = heap[0]    # raises appropriate IndexError if heap is empty | ||||||
|  |     heap[0] = item | ||||||
|  |     _siftup(heap, 0) | ||||||
|  |     return returnitem | ||||||
|  |  | ||||||
|  | def heappushpop(heap, item): | ||||||
|  |     """Fast version of a heappush followed by a heappop.""" | ||||||
|  |     if heap and heap[0] < item: | ||||||
|  |         item, heap[0] = heap[0], item | ||||||
|  |         _siftup(heap, 0) | ||||||
|  |     return item | ||||||
|  |  | ||||||
|  | def heapify(x): | ||||||
|  |     """Transform list into a heap, in-place, in O(len(x)) time.""" | ||||||
|  |     n = len(x) | ||||||
|  |     # Transform bottom-up.  The largest index there's any point to looking at | ||||||
|  |     # is the largest with a child index in-range, so must have 2*i + 1 < n, | ||||||
|  |     # or i < (n-1)/2.  If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so | ||||||
|  |     # j-1 is the largest, which is n//2 - 1.  If n is odd = 2*j+1, this is | ||||||
|  |     # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. | ||||||
|  |     for i in reversed(range(n//2)): | ||||||
|  |         _siftup(x, i) | ||||||
|  |  | ||||||
|  | def _heappushpop_max(heap, item): | ||||||
|  |     """Maxheap version of a heappush followed by a heappop.""" | ||||||
|  |     if heap and item < heap[0]: | ||||||
|  |         item, heap[0] = heap[0], item | ||||||
|  |         _siftup_max(heap, 0) | ||||||
|  |     return item | ||||||
|  |  | ||||||
|  | def _heapify_max(x): | ||||||
|  |     """Transform list into a maxheap, in-place, in O(len(x)) time.""" | ||||||
|  |     n = len(x) | ||||||
|  |     for i in reversed(range(n//2)): | ||||||
|  |         _siftup_max(x, i) | ||||||
|  |  | ||||||
|  | def nlargest(n, iterable): | ||||||
|  |     """Find the n largest elements in a dataset. | ||||||
|  |  | ||||||
|  |     Equivalent to:  sorted(iterable, reverse=True)[:n] | ||||||
|  |     """ | ||||||
|  |     if n < 0: | ||||||
|  |         return [] | ||||||
|  |     it = iter(iterable) | ||||||
|  |     result = list(islice(it, n)) | ||||||
|  |     if not result: | ||||||
|  |         return result | ||||||
|  |     heapify(result) | ||||||
|  |     _heappushpop = heappushpop | ||||||
|  |     for elem in it: | ||||||
|  |         _heappushpop(result, elem) | ||||||
|  |     result.sort(reverse=True) | ||||||
|  |     return result | ||||||
|  |  | ||||||
|  | def nsmallest(n, iterable): | ||||||
|  |     """Find the n smallest elements in a dataset. | ||||||
|  |  | ||||||
|  |     Equivalent to:  sorted(iterable)[:n] | ||||||
|  |     """ | ||||||
|  |     if n < 0: | ||||||
|  |         return [] | ||||||
|  |     it = iter(iterable) | ||||||
|  |     result = list(islice(it, n)) | ||||||
|  |     if not result: | ||||||
|  |         return result | ||||||
|  |     _heapify_max(result) | ||||||
|  |     _heappushpop = _heappushpop_max | ||||||
|  |     for elem in it: | ||||||
|  |         _heappushpop(result, elem) | ||||||
|  |     result.sort() | ||||||
|  |     return result | ||||||
|  |  | ||||||
|  | # 'heap' is a heap at all indices >= startpos, except possibly for pos.  pos | ||||||
|  | # is the index of a leaf with a possibly out-of-order value.  Restore the | ||||||
|  | # heap invariant. | ||||||
|  | def _siftdown(heap, startpos, pos): | ||||||
|  |     newitem = heap[pos] | ||||||
|  |     # Follow the path to the root, moving parents down until finding a place | ||||||
|  |     # newitem fits. | ||||||
|  |     while pos > startpos: | ||||||
|  |         parentpos = (pos - 1) >> 1 | ||||||
|  |         parent = heap[parentpos] | ||||||
|  |         if newitem < parent: | ||||||
|  |             heap[pos] = parent | ||||||
|  |             pos = parentpos | ||||||
|  |             continue | ||||||
|  |         break | ||||||
|  |     heap[pos] = newitem | ||||||
|  |  | ||||||
|  | # The child indices of heap index pos are already heaps, and we want to make | ||||||
|  | # a heap at index pos too.  We do this by bubbling the smaller child of | ||||||
|  | # pos up (and so on with that child's children, etc) until hitting a leaf, | ||||||
|  | # then using _siftdown to move the oddball originally at index pos into place. | ||||||
|  | # | ||||||
|  | # We *could* break out of the loop as soon as we find a pos where newitem <= | ||||||
|  | # both its children, but turns out that's not a good idea, and despite that | ||||||
|  | # many books write the algorithm that way.  During a heap pop, the last array | ||||||
|  | # element is sifted in, and that tends to be large, so that comparing it | ||||||
|  | # against values starting from the root usually doesn't pay (= usually doesn't | ||||||
|  | # get us out of the loop early).  See Knuth, Volume 3, where this is | ||||||
|  | # explained and quantified in an exercise. | ||||||
|  | # | ||||||
|  | # Cutting the # of comparisons is important, since these routines have no | ||||||
|  | # way to extract "the priority" from an array element, so that intelligence | ||||||
|  | # is likely to be hiding in custom comparison methods, or in array elements | ||||||
|  | # storing (priority, record) tuples.  Comparisons are thus potentially | ||||||
|  | # expensive. | ||||||
|  | # | ||||||
|  | # On random arrays of length 1000, making this change cut the number of | ||||||
|  | # comparisons made by heapify() a little, and those made by exhaustive | ||||||
|  | # heappop() a lot, in accord with theory.  Here are typical results from 3 | ||||||
|  | # runs (3 just to demonstrate how small the variance is): | ||||||
|  | # | ||||||
|  | # Compares needed by heapify     Compares needed by 1000 heappops | ||||||
|  | # --------------------------     -------------------------------- | ||||||
|  | # 1837 cut to 1663               14996 cut to 8680 | ||||||
|  | # 1855 cut to 1659               14966 cut to 8678 | ||||||
|  | # 1847 cut to 1660               15024 cut to 8703 | ||||||
|  | # | ||||||
|  | # Building the heap by using heappush() 1000 times instead required | ||||||
|  | # 2198, 2148, and 2219 compares:  heapify() is more efficient, when | ||||||
|  | # you can use it. | ||||||
|  | # | ||||||
|  | # The total compares needed by list.sort() on the same lists were 8627, | ||||||
|  | # 8627, and 8632 (this should be compared to the sum of heapify() and | ||||||
|  | # heappop() compares):  list.sort() is (unsurprisingly!) more efficient | ||||||
|  | # for sorting. | ||||||
|  |  | ||||||
|  | def _siftup(heap, pos): | ||||||
|  |     endpos = len(heap) | ||||||
|  |     startpos = pos | ||||||
|  |     newitem = heap[pos] | ||||||
|  |     # Bubble up the smaller child until hitting a leaf. | ||||||
|  |     childpos = 2*pos + 1    # leftmost child position | ||||||
|  |     while childpos < endpos: | ||||||
|  |         # Set childpos to index of smaller child. | ||||||
|  |         rightpos = childpos + 1 | ||||||
|  |         if rightpos < endpos and not heap[childpos] < heap[rightpos]: | ||||||
|  |             childpos = rightpos | ||||||
|  |         # Move the smaller child up. | ||||||
|  |         heap[pos] = heap[childpos] | ||||||
|  |         pos = childpos | ||||||
|  |         childpos = 2*pos + 1 | ||||||
|  |     # The leaf at pos is empty now.  Put newitem there, and bubble it up | ||||||
|  |     # to its final resting place (by sifting its parents down). | ||||||
|  |     heap[pos] = newitem | ||||||
|  |     _siftdown(heap, startpos, pos) | ||||||
|  |  | ||||||
|  | def _siftdown_max(heap, startpos, pos): | ||||||
|  |     'Maxheap variant of _siftdown' | ||||||
|  |     newitem = heap[pos] | ||||||
|  |     # Follow the path to the root, moving parents down until finding a place | ||||||
|  |     # newitem fits. | ||||||
|  |     while pos > startpos: | ||||||
|  |         parentpos = (pos - 1) >> 1 | ||||||
|  |         parent = heap[parentpos] | ||||||
|  |         if parent < newitem: | ||||||
|  |             heap[pos] = parent | ||||||
|  |             pos = parentpos | ||||||
|  |             continue | ||||||
|  |         break | ||||||
|  |     heap[pos] = newitem | ||||||
|  |  | ||||||
|  | def _siftup_max(heap, pos): | ||||||
|  |     'Maxheap variant of _siftup' | ||||||
|  |     endpos = len(heap) | ||||||
|  |     startpos = pos | ||||||
|  |     newitem = heap[pos] | ||||||
|  |     # Bubble up the larger child until hitting a leaf. | ||||||
|  |     childpos = 2*pos + 1    # leftmost child position | ||||||
|  |     while childpos < endpos: | ||||||
|  |         # Set childpos to index of larger child. | ||||||
|  |         rightpos = childpos + 1 | ||||||
|  |         if rightpos < endpos and not heap[rightpos] < heap[childpos]: | ||||||
|  |             childpos = rightpos | ||||||
|  |         # Move the larger child up. | ||||||
|  |         heap[pos] = heap[childpos] | ||||||
|  |         pos = childpos | ||||||
|  |         childpos = 2*pos + 1 | ||||||
|  |     # The leaf at pos is empty now.  Put newitem there, and bubble it up | ||||||
|  |     # to its final resting place (by sifting its parents down). | ||||||
|  |     heap[pos] = newitem | ||||||
|  |     _siftdown_max(heap, startpos, pos) | ||||||
|  |  | ||||||
|  | # If available, use C implementation | ||||||
|  | try: | ||||||
|  |     from _heapq import * | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  | def merge(*iterables): | ||||||
|  |     '''Merge multiple sorted inputs into a single sorted output. | ||||||
|  |  | ||||||
|  |     Similar to sorted(itertools.chain(*iterables)) but returns a generator, | ||||||
|  |     does not pull the data into memory all at once, and assumes that each of | ||||||
|  |     the input streams is already sorted (smallest to largest). | ||||||
|  |  | ||||||
|  |     >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) | ||||||
|  |     [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] | ||||||
|  |  | ||||||
|  |     ''' | ||||||
|  |     _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration | ||||||
|  |     _len = len | ||||||
|  |  | ||||||
|  |     h = [] | ||||||
|  |     h_append = h.append | ||||||
|  |     for itnum, it in enumerate(map(iter, iterables)): | ||||||
|  |         try: | ||||||
|  |             next = it.__next__ | ||||||
|  |             h_append([next(), itnum, next]) | ||||||
|  |         except _StopIteration: | ||||||
|  |             pass | ||||||
|  |     heapify(h) | ||||||
|  |  | ||||||
|  |     while _len(h) > 1: | ||||||
|  |         try: | ||||||
|  |             while True: | ||||||
|  |                 v, itnum, next = s = h[0] | ||||||
|  |                 yield v | ||||||
|  |                 s[0] = next()               # raises StopIteration when exhausted | ||||||
|  |                 _heapreplace(h, s)          # restore heap condition | ||||||
|  |         except _StopIteration: | ||||||
|  |             _heappop(h)                     # remove empty iterator | ||||||
|  |     if h: | ||||||
|  |         # fast case when only a single iterator remains | ||||||
|  |         v, itnum, next = h[0] | ||||||
|  |         yield v | ||||||
|  |         yield from next.__self__ | ||||||
|  |  | ||||||
|  | # Extend the implementations of nsmallest and nlargest to use a key= argument | ||||||
|  | _nsmallest = nsmallest | ||||||
|  | def nsmallest(n, iterable, key=None): | ||||||
|  |     """Find the n smallest elements in a dataset. | ||||||
|  |  | ||||||
|  |     Equivalent to:  sorted(iterable, key=key)[:n] | ||||||
|  |     """ | ||||||
|  |     # Short-cut for n==1 is to use min() when len(iterable)>0 | ||||||
|  |     if n == 1: | ||||||
|  |         it = iter(iterable) | ||||||
|  |         head = list(islice(it, 1)) | ||||||
|  |         if not head: | ||||||
|  |             return [] | ||||||
|  |         if key is None: | ||||||
|  |             return [min(chain(head, it))] | ||||||
|  |         return [min(chain(head, it), key=key)] | ||||||
|  |  | ||||||
|  |     # When n>=size, it's faster to use sorted() | ||||||
|  |     try: | ||||||
|  |         size = len(iterable) | ||||||
|  |     except (TypeError, AttributeError): | ||||||
|  |         pass | ||||||
|  |     else: | ||||||
|  |         if n >= size: | ||||||
|  |             return sorted(iterable, key=key)[:n] | ||||||
|  |  | ||||||
|  |     # When key is none, use simpler decoration | ||||||
|  |     if key is None: | ||||||
|  |         it = zip(iterable, count())                         # decorate | ||||||
|  |         result = _nsmallest(n, it) | ||||||
|  |         return [r[0] for r in result]                       # undecorate | ||||||
|  |  | ||||||
|  |     # General case, slowest method | ||||||
|  |     in1, in2 = tee(iterable) | ||||||
|  |     it = zip(map(key, in1), count(), in2)                   # decorate | ||||||
|  |     result = _nsmallest(n, it) | ||||||
|  |     return [r[2] for r in result]                           # undecorate | ||||||
|  |  | ||||||
|  | _nlargest = nlargest | ||||||
|  | def nlargest(n, iterable, key=None): | ||||||
|  |     """Find the n largest elements in a dataset. | ||||||
|  |  | ||||||
|  |     Equivalent to:  sorted(iterable, key=key, reverse=True)[:n] | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     # Short-cut for n==1 is to use max() when len(iterable)>0 | ||||||
|  |     if n == 1: | ||||||
|  |         it = iter(iterable) | ||||||
|  |         head = list(islice(it, 1)) | ||||||
|  |         if not head: | ||||||
|  |             return [] | ||||||
|  |         if key is None: | ||||||
|  |             return [max(chain(head, it))] | ||||||
|  |         return [max(chain(head, it), key=key)] | ||||||
|  |  | ||||||
|  |     # When n>=size, it's faster to use sorted() | ||||||
|  |     try: | ||||||
|  |         size = len(iterable) | ||||||
|  |     except (TypeError, AttributeError): | ||||||
|  |         pass | ||||||
|  |     else: | ||||||
|  |         if n >= size: | ||||||
|  |             return sorted(iterable, key=key, reverse=True)[:n] | ||||||
|  |  | ||||||
|  |     # When key is none, use simpler decoration | ||||||
|  |     if key is None: | ||||||
|  |         it = zip(iterable, count(0,-1))                     # decorate | ||||||
|  |         result = _nlargest(n, it) | ||||||
|  |         return [r[0] for r in result]                       # undecorate | ||||||
|  |  | ||||||
|  |     # General case, slowest method | ||||||
|  |     in1, in2 = tee(iterable) | ||||||
|  |     it = zip(map(key, in1), count(0,-1), in2)               # decorate | ||||||
|  |     result = _nlargest(n, it) | ||||||
|  |     return [r[2] for r in result]                           # undecorate | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     # Simple sanity test | ||||||
|  |     heap = [] | ||||||
|  |     data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] | ||||||
|  |     for item in data: | ||||||
|  |         heappush(heap, item) | ||||||
|  |     sort = [] | ||||||
|  |     while heap: | ||||||
|  |         sort.append(heappop(heap)) | ||||||
|  |     print(sort) | ||||||
|  |  | ||||||
|  |     import doctest | ||||||
|  |     doctest.testmod() | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/hmac.py |  | ||||||
							
								
								
									
										144
									
								
								v1/flask/lib/python3.4/hmac.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								v1/flask/lib/python3.4/hmac.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,144 @@ | |||||||
|  | """HMAC (Keyed-Hashing for Message Authentication) Python module. | ||||||
|  |  | ||||||
|  | Implements the HMAC algorithm as described by RFC 2104. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import warnings as _warnings | ||||||
|  | from _operator import _compare_digest as compare_digest | ||||||
|  | import hashlib as _hashlib | ||||||
|  |  | ||||||
|  | trans_5C = bytes((x ^ 0x5C) for x in range(256)) | ||||||
|  | trans_36 = bytes((x ^ 0x36) for x in range(256)) | ||||||
|  |  | ||||||
|  | # The size of the digests returned by HMAC depends on the underlying | ||||||
|  | # hashing module used.  Use digest_size from the instance of HMAC instead. | ||||||
|  | digest_size = None | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HMAC: | ||||||
|  |     """RFC 2104 HMAC class.  Also complies with RFC 4231. | ||||||
|  |  | ||||||
|  |     This supports the API for Cryptographic Hash Functions (PEP 247). | ||||||
|  |     """ | ||||||
|  |     blocksize = 64  # 512-bit HMAC; can be changed in subclasses. | ||||||
|  |  | ||||||
|  |     def __init__(self, key, msg = None, digestmod = None): | ||||||
|  |         """Create a new HMAC object. | ||||||
|  |  | ||||||
|  |         key:       key for the keyed hash object. | ||||||
|  |         msg:       Initial input for the hash, if provided. | ||||||
|  |         digestmod: A module supporting PEP 247.  *OR* | ||||||
|  |                    A hashlib constructor returning a new hash object. *OR* | ||||||
|  |                    A hash name suitable for hashlib.new(). | ||||||
|  |                    Defaults to hashlib.md5. | ||||||
|  |                    Implicit default to hashlib.md5 is deprecated and will be | ||||||
|  |                    removed in Python 3.6. | ||||||
|  |  | ||||||
|  |         Note: key and msg must be a bytes or bytearray objects. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         if not isinstance(key, (bytes, bytearray)): | ||||||
|  |             raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__) | ||||||
|  |  | ||||||
|  |         if digestmod is None: | ||||||
|  |             _warnings.warn("HMAC() without an explicit digestmod argument " | ||||||
|  |                            "is deprecated.", PendingDeprecationWarning, 2) | ||||||
|  |             digestmod = _hashlib.md5 | ||||||
|  |  | ||||||
|  |         if callable(digestmod): | ||||||
|  |             self.digest_cons = digestmod | ||||||
|  |         elif isinstance(digestmod, str): | ||||||
|  |             self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d) | ||||||
|  |         else: | ||||||
|  |             self.digest_cons = lambda d=b'': digestmod.new(d) | ||||||
|  |  | ||||||
|  |         self.outer = self.digest_cons() | ||||||
|  |         self.inner = self.digest_cons() | ||||||
|  |         self.digest_size = self.inner.digest_size | ||||||
|  |  | ||||||
|  |         if hasattr(self.inner, 'block_size'): | ||||||
|  |             blocksize = self.inner.block_size | ||||||
|  |             if blocksize < 16: | ||||||
|  |                 _warnings.warn('block_size of %d seems too small; using our ' | ||||||
|  |                                'default of %d.' % (blocksize, self.blocksize), | ||||||
|  |                                RuntimeWarning, 2) | ||||||
|  |                 blocksize = self.blocksize | ||||||
|  |         else: | ||||||
|  |             _warnings.warn('No block_size attribute on given digest object; ' | ||||||
|  |                            'Assuming %d.' % (self.blocksize), | ||||||
|  |                            RuntimeWarning, 2) | ||||||
|  |             blocksize = self.blocksize | ||||||
|  |  | ||||||
|  |         # self.blocksize is the default blocksize. self.block_size is | ||||||
|  |         # effective block size as well as the public API attribute. | ||||||
|  |         self.block_size = blocksize | ||||||
|  |  | ||||||
|  |         if len(key) > blocksize: | ||||||
|  |             key = self.digest_cons(key).digest() | ||||||
|  |  | ||||||
|  |         key = key + bytes(blocksize - len(key)) | ||||||
|  |         self.outer.update(key.translate(trans_5C)) | ||||||
|  |         self.inner.update(key.translate(trans_36)) | ||||||
|  |         if msg is not None: | ||||||
|  |             self.update(msg) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def name(self): | ||||||
|  |         return "hmac-" + self.inner.name | ||||||
|  |  | ||||||
|  |     def update(self, msg): | ||||||
|  |         """Update this hashing object with the string msg. | ||||||
|  |         """ | ||||||
|  |         self.inner.update(msg) | ||||||
|  |  | ||||||
|  |     def copy(self): | ||||||
|  |         """Return a separate copy of this hashing object. | ||||||
|  |  | ||||||
|  |         An update to this copy won't affect the original object. | ||||||
|  |         """ | ||||||
|  |         # Call __new__ directly to avoid the expensive __init__. | ||||||
|  |         other = self.__class__.__new__(self.__class__) | ||||||
|  |         other.digest_cons = self.digest_cons | ||||||
|  |         other.digest_size = self.digest_size | ||||||
|  |         other.inner = self.inner.copy() | ||||||
|  |         other.outer = self.outer.copy() | ||||||
|  |         return other | ||||||
|  |  | ||||||
|  |     def _current(self): | ||||||
|  |         """Return a hash object for the current state. | ||||||
|  |  | ||||||
|  |         To be used only internally with digest() and hexdigest(). | ||||||
|  |         """ | ||||||
|  |         h = self.outer.copy() | ||||||
|  |         h.update(self.inner.digest()) | ||||||
|  |         return h | ||||||
|  |  | ||||||
|  |     def digest(self): | ||||||
|  |         """Return the hash value of this hashing object. | ||||||
|  |  | ||||||
|  |         This returns a string containing 8-bit data.  The object is | ||||||
|  |         not altered in any way by this function; you can continue | ||||||
|  |         updating the object after calling this function. | ||||||
|  |         """ | ||||||
|  |         h = self._current() | ||||||
|  |         return h.digest() | ||||||
|  |  | ||||||
|  |     def hexdigest(self): | ||||||
|  |         """Like digest(), but returns a string of hexadecimal digits instead. | ||||||
|  |         """ | ||||||
|  |         h = self._current() | ||||||
|  |         return h.hexdigest() | ||||||
|  |  | ||||||
|  | def new(key, msg = None, digestmod = None): | ||||||
|  |     """Create a new hashing object and return it. | ||||||
|  |  | ||||||
|  |     key: The starting key for the hash. | ||||||
|  |     msg: if available, will immediately be hashed into the object's starting | ||||||
|  |     state. | ||||||
|  |  | ||||||
|  |     You can now feed arbitrary strings into the object using its update() | ||||||
|  |     method, and can ask for the hash value at any time by calling its digest() | ||||||
|  |     method. | ||||||
|  |     """ | ||||||
|  |     return HMAC(key, msg, digestmod) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/imp.py |  | ||||||
							
								
								
									
										315
									
								
								v1/flask/lib/python3.4/imp.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										315
									
								
								v1/flask/lib/python3.4/imp.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,315 @@ | |||||||
|  | """This module provides the components needed to build your own __import__ | ||||||
|  | function.  Undocumented functions are obsolete. | ||||||
|  |  | ||||||
|  | In most cases it is preferred you consider using the importlib module's | ||||||
|  | functionality over this module. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | # (Probably) need to stay in _imp | ||||||
|  | from _imp import (lock_held, acquire_lock, release_lock, | ||||||
|  |                   get_frozen_object, is_frozen_package, | ||||||
|  |                   init_builtin, init_frozen, is_builtin, is_frozen, | ||||||
|  |                   _fix_co_filename) | ||||||
|  | try: | ||||||
|  |     from _imp import load_dynamic | ||||||
|  | except ImportError: | ||||||
|  |     # Platform doesn't support dynamic loading. | ||||||
|  |     load_dynamic = None | ||||||
|  |  | ||||||
|  | from importlib._bootstrap import SourcelessFileLoader, _ERR_MSG, _SpecMethods | ||||||
|  |  | ||||||
|  | from importlib import machinery | ||||||
|  | from importlib import util | ||||||
|  | import importlib | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | import tokenize | ||||||
|  | import types | ||||||
|  | import warnings | ||||||
|  |  | ||||||
|  | warnings.warn("the imp module is deprecated in favour of importlib; " | ||||||
|  |               "see the module's documentation for alternative uses", | ||||||
|  |               PendingDeprecationWarning) | ||||||
|  |  | ||||||
|  | # DEPRECATED | ||||||
|  | SEARCH_ERROR = 0 | ||||||
|  | PY_SOURCE = 1 | ||||||
|  | PY_COMPILED = 2 | ||||||
|  | C_EXTENSION = 3 | ||||||
|  | PY_RESOURCE = 4 | ||||||
|  | PKG_DIRECTORY = 5 | ||||||
|  | C_BUILTIN = 6 | ||||||
|  | PY_FROZEN = 7 | ||||||
|  | PY_CODERESOURCE = 8 | ||||||
|  | IMP_HOOK = 9 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def new_module(name): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Create a new module. | ||||||
|  |  | ||||||
|  |     The module is not entered into sys.modules. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     return types.ModuleType(name) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_magic(): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Return the magic number for .pyc or .pyo files. | ||||||
|  |     """ | ||||||
|  |     return util.MAGIC_NUMBER | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_tag(): | ||||||
|  |     """Return the magic tag for .pyc or .pyo files.""" | ||||||
|  |     return sys.implementation.cache_tag | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def cache_from_source(path, debug_override=None): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Given the path to a .py file, return the path to its .pyc/.pyo file. | ||||||
|  |  | ||||||
|  |     The .py file does not need to exist; this simply returns the path to the | ||||||
|  |     .pyc/.pyo file calculated as if the .py file were imported.  The extension | ||||||
|  |     will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo. | ||||||
|  |  | ||||||
|  |     If debug_override is not None, then it must be a boolean and is used in | ||||||
|  |     place of sys.flags.optimize. | ||||||
|  |  | ||||||
|  |     If sys.implementation.cache_tag is None then NotImplementedError is raised. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     return util.cache_from_source(path, debug_override) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def source_from_cache(path): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Given the path to a .pyc./.pyo file, return the path to its .py file. | ||||||
|  |  | ||||||
|  |     The .pyc/.pyo file does not need to exist; this simply returns the path to | ||||||
|  |     the .py file calculated to correspond to the .pyc/.pyo file.  If path does | ||||||
|  |     not conform to PEP 3147 format, ValueError will be raised. If | ||||||
|  |     sys.implementation.cache_tag is None then NotImplementedError is raised. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     return util.source_from_cache(path) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_suffixes(): | ||||||
|  |     """**DEPRECATED**""" | ||||||
|  |     extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES] | ||||||
|  |     source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES] | ||||||
|  |     bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES] | ||||||
|  |  | ||||||
|  |     return extensions + source + bytecode | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class NullImporter: | ||||||
|  |  | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Null import object. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, path): | ||||||
|  |         if path == '': | ||||||
|  |             raise ImportError('empty pathname', path='') | ||||||
|  |         elif os.path.isdir(path): | ||||||
|  |             raise ImportError('existing directory', path=path) | ||||||
|  |  | ||||||
|  |     def find_module(self, fullname): | ||||||
|  |         """Always returns None.""" | ||||||
|  |         return None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class _HackedGetData: | ||||||
|  |  | ||||||
|  |     """Compatibility support for 'file' arguments of various load_*() | ||||||
|  |     functions.""" | ||||||
|  |  | ||||||
|  |     def __init__(self, fullname, path, file=None): | ||||||
|  |         super().__init__(fullname, path) | ||||||
|  |         self.file = file | ||||||
|  |  | ||||||
|  |     def get_data(self, path): | ||||||
|  |         """Gross hack to contort loader to deal w/ load_*()'s bad API.""" | ||||||
|  |         if self.file and path == self.path: | ||||||
|  |             if not self.file.closed: | ||||||
|  |                 file = self.file | ||||||
|  |             else: | ||||||
|  |                 self.file = file = open(self.path, 'r') | ||||||
|  |  | ||||||
|  |             with file: | ||||||
|  |                 # Technically should be returning bytes, but | ||||||
|  |                 # SourceLoader.get_code() just passed what is returned to | ||||||
|  |                 # compile() which can handle str. And converting to bytes would | ||||||
|  |                 # require figuring out the encoding to decode to and | ||||||
|  |                 # tokenize.detect_encoding() only accepts bytes. | ||||||
|  |                 return file.read() | ||||||
|  |         else: | ||||||
|  |             return super().get_data(path) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader): | ||||||
|  |  | ||||||
|  |     """Compatibility support for implementing load_source().""" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_source(name, pathname, file=None): | ||||||
|  |     loader = _LoadSourceCompatibility(name, pathname, file) | ||||||
|  |     spec = util.spec_from_file_location(name, pathname, loader=loader) | ||||||
|  |     methods = _SpecMethods(spec) | ||||||
|  |     if name in sys.modules: | ||||||
|  |         module = methods.exec(sys.modules[name]) | ||||||
|  |     else: | ||||||
|  |         module = methods.load() | ||||||
|  |     # To allow reloading to potentially work, use a non-hacked loader which | ||||||
|  |     # won't rely on a now-closed file object. | ||||||
|  |     module.__loader__ = machinery.SourceFileLoader(name, pathname) | ||||||
|  |     module.__spec__.loader = module.__loader__ | ||||||
|  |     return module | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader): | ||||||
|  |  | ||||||
|  |     """Compatibility support for implementing load_compiled().""" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_compiled(name, pathname, file=None): | ||||||
|  |     """**DEPRECATED**""" | ||||||
|  |     loader = _LoadCompiledCompatibility(name, pathname, file) | ||||||
|  |     spec = util.spec_from_file_location(name, pathname, loader=loader) | ||||||
|  |     methods = _SpecMethods(spec) | ||||||
|  |     if name in sys.modules: | ||||||
|  |         module = methods.exec(sys.modules[name]) | ||||||
|  |     else: | ||||||
|  |         module = methods.load() | ||||||
|  |     # To allow reloading to potentially work, use a non-hacked loader which | ||||||
|  |     # won't rely on a now-closed file object. | ||||||
|  |     module.__loader__ = SourcelessFileLoader(name, pathname) | ||||||
|  |     module.__spec__.loader = module.__loader__ | ||||||
|  |     return module | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_package(name, path): | ||||||
|  |     """**DEPRECATED**""" | ||||||
|  |     if os.path.isdir(path): | ||||||
|  |         extensions = (machinery.SOURCE_SUFFIXES[:] + | ||||||
|  |                       machinery.BYTECODE_SUFFIXES[:]) | ||||||
|  |         for extension in extensions: | ||||||
|  |             path = os.path.join(path, '__init__'+extension) | ||||||
|  |             if os.path.exists(path): | ||||||
|  |                 break | ||||||
|  |         else: | ||||||
|  |             raise ValueError('{!r} is not a package'.format(path)) | ||||||
|  |     spec = util.spec_from_file_location(name, path, | ||||||
|  |                                         submodule_search_locations=[]) | ||||||
|  |     methods = _SpecMethods(spec) | ||||||
|  |     if name in sys.modules: | ||||||
|  |         return methods.exec(sys.modules[name]) | ||||||
|  |     else: | ||||||
|  |         return methods.load() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_module(name, file, filename, details): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Load a module, given information returned by find_module(). | ||||||
|  |  | ||||||
|  |     The module name must include the full package name, if any. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     suffix, mode, type_ = details | ||||||
|  |     if mode and (not mode.startswith(('r', 'U')) or '+' in mode): | ||||||
|  |         raise ValueError('invalid file open mode {!r}'.format(mode)) | ||||||
|  |     elif file is None and type_ in {PY_SOURCE, PY_COMPILED}: | ||||||
|  |         msg = 'file object required for import (type code {})'.format(type_) | ||||||
|  |         raise ValueError(msg) | ||||||
|  |     elif type_ == PY_SOURCE: | ||||||
|  |         return load_source(name, filename, file) | ||||||
|  |     elif type_ == PY_COMPILED: | ||||||
|  |         return load_compiled(name, filename, file) | ||||||
|  |     elif type_ == C_EXTENSION and load_dynamic is not None: | ||||||
|  |         if file is None: | ||||||
|  |             with open(filename, 'rb') as opened_file: | ||||||
|  |                 return load_dynamic(name, filename, opened_file) | ||||||
|  |         else: | ||||||
|  |             return load_dynamic(name, filename, file) | ||||||
|  |     elif type_ == PKG_DIRECTORY: | ||||||
|  |         return load_package(name, filename) | ||||||
|  |     elif type_ == C_BUILTIN: | ||||||
|  |         return init_builtin(name) | ||||||
|  |     elif type_ == PY_FROZEN: | ||||||
|  |         return init_frozen(name) | ||||||
|  |     else: | ||||||
|  |         msg =  "Don't know how to import {} (type code {})".format(name, type_) | ||||||
|  |         raise ImportError(msg, name=name) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def find_module(name, path=None): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Search for a module. | ||||||
|  |  | ||||||
|  |     If path is omitted or None, search for a built-in, frozen or special | ||||||
|  |     module and continue search in sys.path. The module name cannot | ||||||
|  |     contain '.'; to search for a submodule of a package, pass the | ||||||
|  |     submodule name and the package's __path__. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     if not isinstance(name, str): | ||||||
|  |         raise TypeError("'name' must be a str, not {}".format(type(name))) | ||||||
|  |     elif not isinstance(path, (type(None), list)): | ||||||
|  |         # Backwards-compatibility | ||||||
|  |         raise RuntimeError("'list' must be None or a list, " | ||||||
|  |                            "not {}".format(type(name))) | ||||||
|  |  | ||||||
|  |     if path is None: | ||||||
|  |         if is_builtin(name): | ||||||
|  |             return None, None, ('', '', C_BUILTIN) | ||||||
|  |         elif is_frozen(name): | ||||||
|  |             return None, None, ('', '', PY_FROZEN) | ||||||
|  |         else: | ||||||
|  |             path = sys.path | ||||||
|  |  | ||||||
|  |     for entry in path: | ||||||
|  |         package_directory = os.path.join(entry, name) | ||||||
|  |         for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]: | ||||||
|  |             package_file_name = '__init__' + suffix | ||||||
|  |             file_path = os.path.join(package_directory, package_file_name) | ||||||
|  |             if os.path.isfile(file_path): | ||||||
|  |                 return None, package_directory, ('', '', PKG_DIRECTORY) | ||||||
|  |         for suffix, mode, type_ in get_suffixes(): | ||||||
|  |             file_name = name + suffix | ||||||
|  |             file_path = os.path.join(entry, file_name) | ||||||
|  |             if os.path.isfile(file_path): | ||||||
|  |                 break | ||||||
|  |         else: | ||||||
|  |             continue | ||||||
|  |         break  # Break out of outer loop when breaking out of inner loop. | ||||||
|  |     else: | ||||||
|  |         raise ImportError(_ERR_MSG.format(name), name=name) | ||||||
|  |  | ||||||
|  |     encoding = None | ||||||
|  |     if 'b' not in mode: | ||||||
|  |         with open(file_path, 'rb') as file: | ||||||
|  |             encoding = tokenize.detect_encoding(file.readline)[0] | ||||||
|  |     file = open(file_path, mode, encoding=encoding) | ||||||
|  |     return file, file_path, (suffix, mode, type_) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def reload(module): | ||||||
|  |     """**DEPRECATED** | ||||||
|  |  | ||||||
|  |     Reload the module and return it. | ||||||
|  |  | ||||||
|  |     The module must have been successfully imported before. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     return importlib.reload(module) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/io.py |  | ||||||
							
								
								
									
										92
									
								
								v1/flask/lib/python3.4/io.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								v1/flask/lib/python3.4/io.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,92 @@ | |||||||
|  | """The io module provides the Python interfaces to stream handling. The | ||||||
|  | builtin open function is defined in this module. | ||||||
|  |  | ||||||
|  | At the top of the I/O hierarchy is the abstract base class IOBase. It | ||||||
|  | defines the basic interface to a stream. Note, however, that there is no | ||||||
|  | separation between reading and writing to streams; implementations are | ||||||
|  | allowed to raise an OSError if they do not support a given operation. | ||||||
|  |  | ||||||
|  | Extending IOBase is RawIOBase which deals simply with the reading and | ||||||
|  | writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide | ||||||
|  | an interface to OS files. | ||||||
|  |  | ||||||
|  | BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its | ||||||
|  | subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer | ||||||
|  | streams that are readable, writable, and both respectively. | ||||||
|  | BufferedRandom provides a buffered interface to random access | ||||||
|  | streams. BytesIO is a simple stream of in-memory bytes. | ||||||
|  |  | ||||||
|  | Another IOBase subclass, TextIOBase, deals with the encoding and decoding | ||||||
|  | of streams into text. TextIOWrapper, which extends it, is a buffered text | ||||||
|  | interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO | ||||||
|  | is an in-memory stream for text. | ||||||
|  |  | ||||||
|  | Argument names are not part of the specification, and only the arguments | ||||||
|  | of open() are intended to be used as keyword arguments. | ||||||
|  |  | ||||||
|  | data: | ||||||
|  |  | ||||||
|  | DEFAULT_BUFFER_SIZE | ||||||
|  |  | ||||||
|  |    An int containing the default buffer size used by the module's buffered | ||||||
|  |    I/O classes. open() uses the file's blksize (as obtained by os.stat) if | ||||||
|  |    possible. | ||||||
|  | """ | ||||||
|  | # New I/O library conforming to PEP 3116. | ||||||
|  |  | ||||||
|  | __author__ = ("Guido van Rossum <guido@python.org>, " | ||||||
|  |               "Mike Verdone <mike.verdone@gmail.com>, " | ||||||
|  |               "Mark Russell <mark.russell@zen.co.uk>, " | ||||||
|  |               "Antoine Pitrou <solipsis@pitrou.net>, " | ||||||
|  |               "Amaury Forgeot d'Arc <amauryfa@gmail.com>, " | ||||||
|  |               "Benjamin Peterson <benjamin@python.org>") | ||||||
|  |  | ||||||
|  | __all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO", | ||||||
|  |            "BytesIO", "StringIO", "BufferedIOBase", | ||||||
|  |            "BufferedReader", "BufferedWriter", "BufferedRWPair", | ||||||
|  |            "BufferedRandom", "TextIOBase", "TextIOWrapper", | ||||||
|  |            "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | import _io | ||||||
|  | import abc | ||||||
|  |  | ||||||
|  | from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, | ||||||
|  |                  open, FileIO, BytesIO, StringIO, BufferedReader, | ||||||
|  |                  BufferedWriter, BufferedRWPair, BufferedRandom, | ||||||
|  |                  IncrementalNewlineDecoder, TextIOWrapper) | ||||||
|  |  | ||||||
|  | OpenWrapper = _io.open # for compatibility with _pyio | ||||||
|  |  | ||||||
|  | # Pretend this exception was created here. | ||||||
|  | UnsupportedOperation.__module__ = "io" | ||||||
|  |  | ||||||
|  | # for seek() | ||||||
|  | SEEK_SET = 0 | ||||||
|  | SEEK_CUR = 1 | ||||||
|  | SEEK_END = 2 | ||||||
|  |  | ||||||
|  | # Declaring ABCs in C is tricky so we do it here. | ||||||
|  | # Method descriptions and default implementations are inherited from the C | ||||||
|  | # version however. | ||||||
|  | class IOBase(_io._IOBase, metaclass=abc.ABCMeta): | ||||||
|  |     __doc__ = _io._IOBase.__doc__ | ||||||
|  |  | ||||||
|  | class RawIOBase(_io._RawIOBase, IOBase): | ||||||
|  |     __doc__ = _io._RawIOBase.__doc__ | ||||||
|  |  | ||||||
|  | class BufferedIOBase(_io._BufferedIOBase, IOBase): | ||||||
|  |     __doc__ = _io._BufferedIOBase.__doc__ | ||||||
|  |  | ||||||
|  | class TextIOBase(_io._TextIOBase, IOBase): | ||||||
|  |     __doc__ = _io._TextIOBase.__doc__ | ||||||
|  |  | ||||||
|  | RawIOBase.register(FileIO) | ||||||
|  |  | ||||||
|  | for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, | ||||||
|  |               BufferedRWPair): | ||||||
|  |     BufferedIOBase.register(klass) | ||||||
|  |  | ||||||
|  | for klass in (StringIO, TextIOWrapper): | ||||||
|  |     TextIOBase.register(klass) | ||||||
|  | del klass | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/keyword.py |  | ||||||
							
								
								
									
										94
									
								
								v1/flask/lib/python3.4/keyword.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										94
									
								
								v1/flask/lib/python3.4/keyword.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,94 @@ | |||||||
|  | #! /usr/bin/env python3 | ||||||
|  |  | ||||||
|  | """Keywords (from "graminit.c") | ||||||
|  |  | ||||||
|  | This file is automatically generated; please don't muck it up! | ||||||
|  |  | ||||||
|  | To update the symbols in this file, 'cd' to the top directory of | ||||||
|  | the python source tree after building the interpreter and run: | ||||||
|  |  | ||||||
|  |     ./python Lib/keyword.py | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | __all__ = ["iskeyword", "kwlist"] | ||||||
|  |  | ||||||
|  | kwlist = [ | ||||||
|  | #--start keywords-- | ||||||
|  |         'False', | ||||||
|  |         'None', | ||||||
|  |         'True', | ||||||
|  |         'and', | ||||||
|  |         'as', | ||||||
|  |         'assert', | ||||||
|  |         'break', | ||||||
|  |         'class', | ||||||
|  |         'continue', | ||||||
|  |         'def', | ||||||
|  |         'del', | ||||||
|  |         'elif', | ||||||
|  |         'else', | ||||||
|  |         'except', | ||||||
|  |         'finally', | ||||||
|  |         'for', | ||||||
|  |         'from', | ||||||
|  |         'global', | ||||||
|  |         'if', | ||||||
|  |         'import', | ||||||
|  |         'in', | ||||||
|  |         'is', | ||||||
|  |         'lambda', | ||||||
|  |         'nonlocal', | ||||||
|  |         'not', | ||||||
|  |         'or', | ||||||
|  |         'pass', | ||||||
|  |         'raise', | ||||||
|  |         'return', | ||||||
|  |         'try', | ||||||
|  |         'while', | ||||||
|  |         'with', | ||||||
|  |         'yield', | ||||||
|  | #--end keywords-- | ||||||
|  |         ] | ||||||
|  |  | ||||||
|  | iskeyword = frozenset(kwlist).__contains__ | ||||||
|  |  | ||||||
|  | def main(): | ||||||
|  |     import sys, re | ||||||
|  |  | ||||||
|  |     args = sys.argv[1:] | ||||||
|  |     iptfile = args and args[0] or "Python/graminit.c" | ||||||
|  |     if len(args) > 1: optfile = args[1] | ||||||
|  |     else: optfile = "Lib/keyword.py" | ||||||
|  |  | ||||||
|  |     # load the output skeleton from the target, taking care to preserve its | ||||||
|  |     # newline convention. | ||||||
|  |     with open(optfile, newline='') as fp: | ||||||
|  |         format = fp.readlines() | ||||||
|  |     nl = format[0][len(format[0].strip()):] if format else '\n' | ||||||
|  |  | ||||||
|  |     # scan the source file for keywords | ||||||
|  |     with open(iptfile) as fp: | ||||||
|  |         strprog = re.compile('"([^"]+)"') | ||||||
|  |         lines = [] | ||||||
|  |         for line in fp: | ||||||
|  |             if '{1, "' in line: | ||||||
|  |                 match = strprog.search(line) | ||||||
|  |                 if match: | ||||||
|  |                     lines.append("        '" + match.group(1) + "'," + nl) | ||||||
|  |     lines.sort() | ||||||
|  |  | ||||||
|  |     # insert the lines of keywords into the skeleton | ||||||
|  |     try: | ||||||
|  |         start = format.index("#--start keywords--" + nl) + 1 | ||||||
|  |         end = format.index("#--end keywords--" + nl) | ||||||
|  |         format[start:end] = lines | ||||||
|  |     except ValueError: | ||||||
|  |         sys.stderr.write("target does not contain format markers\n") | ||||||
|  |         sys.exit(1) | ||||||
|  |  | ||||||
|  |     # write the output file | ||||||
|  |     with open(optfile, 'w', newline='') as fp: | ||||||
|  |         fp.writelines(format) | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     main() | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/linecache.py |  | ||||||
							
								
								
									
										138
									
								
								v1/flask/lib/python3.4/linecache.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										138
									
								
								v1/flask/lib/python3.4/linecache.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,138 @@ | |||||||
|  | """Cache lines from Python source files. | ||||||
|  |  | ||||||
|  | This is intended to read lines from modules imported -- hence if a filename | ||||||
|  | is not found, it will look down the module search path for a file by | ||||||
|  | that name. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | import os | ||||||
|  | import tokenize | ||||||
|  |  | ||||||
|  | __all__ = ["getline", "clearcache", "checkcache"] | ||||||
|  |  | ||||||
|  | def getline(filename, lineno, module_globals=None): | ||||||
|  |     lines = getlines(filename, module_globals) | ||||||
|  |     if 1 <= lineno <= len(lines): | ||||||
|  |         return lines[lineno-1] | ||||||
|  |     else: | ||||||
|  |         return '' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # The cache | ||||||
|  |  | ||||||
|  | cache = {} # The cache | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def clearcache(): | ||||||
|  |     """Clear the cache entirely.""" | ||||||
|  |  | ||||||
|  |     global cache | ||||||
|  |     cache = {} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def getlines(filename, module_globals=None): | ||||||
|  |     """Get the lines for a Python source file from the cache. | ||||||
|  |     Update the cache if it doesn't contain an entry for this file already.""" | ||||||
|  |  | ||||||
|  |     if filename in cache: | ||||||
|  |         return cache[filename][2] | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         return updatecache(filename, module_globals) | ||||||
|  |     except MemoryError: | ||||||
|  |         clearcache() | ||||||
|  |         return [] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def checkcache(filename=None): | ||||||
|  |     """Discard cache entries that are out of date. | ||||||
|  |     (This is not checked upon each call!)""" | ||||||
|  |  | ||||||
|  |     if filename is None: | ||||||
|  |         filenames = list(cache.keys()) | ||||||
|  |     else: | ||||||
|  |         if filename in cache: | ||||||
|  |             filenames = [filename] | ||||||
|  |         else: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |     for filename in filenames: | ||||||
|  |         size, mtime, lines, fullname = cache[filename] | ||||||
|  |         if mtime is None: | ||||||
|  |             continue   # no-op for files loaded via a __loader__ | ||||||
|  |         try: | ||||||
|  |             stat = os.stat(fullname) | ||||||
|  |         except OSError: | ||||||
|  |             del cache[filename] | ||||||
|  |             continue | ||||||
|  |         if size != stat.st_size or mtime != stat.st_mtime: | ||||||
|  |             del cache[filename] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def updatecache(filename, module_globals=None): | ||||||
|  |     """Update a cache entry and return its list of lines. | ||||||
|  |     If something's wrong, print a message, discard the cache entry, | ||||||
|  |     and return an empty list.""" | ||||||
|  |  | ||||||
|  |     if filename in cache: | ||||||
|  |         del cache[filename] | ||||||
|  |     if not filename or (filename.startswith('<') and filename.endswith('>')): | ||||||
|  |         return [] | ||||||
|  |  | ||||||
|  |     fullname = filename | ||||||
|  |     try: | ||||||
|  |         stat = os.stat(fullname) | ||||||
|  |     except OSError: | ||||||
|  |         basename = filename | ||||||
|  |  | ||||||
|  |         # Try for a __loader__, if available | ||||||
|  |         if module_globals and '__loader__' in module_globals: | ||||||
|  |             name = module_globals.get('__name__') | ||||||
|  |             loader = module_globals['__loader__'] | ||||||
|  |             get_source = getattr(loader, 'get_source', None) | ||||||
|  |  | ||||||
|  |             if name and get_source: | ||||||
|  |                 try: | ||||||
|  |                     data = get_source(name) | ||||||
|  |                 except (ImportError, OSError): | ||||||
|  |                     pass | ||||||
|  |                 else: | ||||||
|  |                     if data is None: | ||||||
|  |                         # No luck, the PEP302 loader cannot find the source | ||||||
|  |                         # for this module. | ||||||
|  |                         return [] | ||||||
|  |                     cache[filename] = ( | ||||||
|  |                         len(data), None, | ||||||
|  |                         [line+'\n' for line in data.splitlines()], fullname | ||||||
|  |                     ) | ||||||
|  |                     return cache[filename][2] | ||||||
|  |  | ||||||
|  |         # Try looking through the module search path, which is only useful | ||||||
|  |         # when handling a relative filename. | ||||||
|  |         if os.path.isabs(filename): | ||||||
|  |             return [] | ||||||
|  |  | ||||||
|  |         for dirname in sys.path: | ||||||
|  |             try: | ||||||
|  |                 fullname = os.path.join(dirname, basename) | ||||||
|  |             except (TypeError, AttributeError): | ||||||
|  |                 # Not sufficiently string-like to do anything useful with. | ||||||
|  |                 continue | ||||||
|  |             try: | ||||||
|  |                 stat = os.stat(fullname) | ||||||
|  |                 break | ||||||
|  |             except OSError: | ||||||
|  |                 pass | ||||||
|  |         else: | ||||||
|  |             return [] | ||||||
|  |     try: | ||||||
|  |         with tokenize.open(fullname) as fp: | ||||||
|  |             lines = fp.readlines() | ||||||
|  |     except OSError: | ||||||
|  |         return [] | ||||||
|  |     if lines and not lines[-1].endswith('\n'): | ||||||
|  |         lines[-1] += '\n' | ||||||
|  |     size, mtime = stat.st_size, stat.st_mtime | ||||||
|  |     cache[filename] = size, mtime, lines, fullname | ||||||
|  |     return lines | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/locale.py |  | ||||||
							
								
								
									
										1676
									
								
								v1/flask/lib/python3.4/locale.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1676
									
								
								v1/flask/lib/python3.4/locale.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/ntpath.py |  | ||||||
							
								
								
									
										625
									
								
								v1/flask/lib/python3.4/ntpath.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										625
									
								
								v1/flask/lib/python3.4/ntpath.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,625 @@ | |||||||
|  | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames | ||||||
|  | """Common pathname manipulations, WindowsNT/95 version. | ||||||
|  |  | ||||||
|  | Instead of importing this module directly, import os and refer to this | ||||||
|  | module as os.path. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | import stat | ||||||
|  | import genericpath | ||||||
|  | from genericpath import * | ||||||
|  |  | ||||||
|  | __all__ = ["normcase","isabs","join","splitdrive","split","splitext", | ||||||
|  |            "basename","dirname","commonprefix","getsize","getmtime", | ||||||
|  |            "getatime","getctime", "islink","exists","lexists","isdir","isfile", | ||||||
|  |            "ismount", "expanduser","expandvars","normpath","abspath", | ||||||
|  |            "splitunc","curdir","pardir","sep","pathsep","defpath","altsep", | ||||||
|  |            "extsep","devnull","realpath","supports_unicode_filenames","relpath", | ||||||
|  |            "samefile", "sameopenfile", "samestat",] | ||||||
|  |  | ||||||
|  | # strings representing various path-related bits and pieces | ||||||
|  | # These are primarily for export; internally, they are hardcoded. | ||||||
|  | curdir = '.' | ||||||
|  | pardir = '..' | ||||||
|  | extsep = '.' | ||||||
|  | sep = '\\' | ||||||
|  | pathsep = ';' | ||||||
|  | altsep = '/' | ||||||
|  | defpath = '.;C:\\bin' | ||||||
|  | if 'ce' in sys.builtin_module_names: | ||||||
|  |     defpath = '\\Windows' | ||||||
|  | devnull = 'nul' | ||||||
|  |  | ||||||
|  | def _get_empty(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b'' | ||||||
|  |     else: | ||||||
|  |         return '' | ||||||
|  |  | ||||||
|  | def _get_sep(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b'\\' | ||||||
|  |     else: | ||||||
|  |         return '\\' | ||||||
|  |  | ||||||
|  | def _get_altsep(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b'/' | ||||||
|  |     else: | ||||||
|  |         return '/' | ||||||
|  |  | ||||||
|  | def _get_bothseps(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b'\\/' | ||||||
|  |     else: | ||||||
|  |         return '\\/' | ||||||
|  |  | ||||||
|  | def _get_dot(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b'.' | ||||||
|  |     else: | ||||||
|  |         return '.' | ||||||
|  |  | ||||||
|  | def _get_colon(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b':' | ||||||
|  |     else: | ||||||
|  |         return ':' | ||||||
|  |  | ||||||
|  | def _get_special(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return (b'\\\\.\\', b'\\\\?\\') | ||||||
|  |     else: | ||||||
|  |         return ('\\\\.\\', '\\\\?\\') | ||||||
|  |  | ||||||
|  | # Normalize the case of a pathname and map slashes to backslashes. | ||||||
|  | # Other normalizations (such as optimizing '../' away) are not done | ||||||
|  | # (this is done by normpath). | ||||||
|  |  | ||||||
|  | def normcase(s): | ||||||
|  |     """Normalize case of pathname. | ||||||
|  |  | ||||||
|  |     Makes all characters lowercase and all slashes into backslashes.""" | ||||||
|  |     if not isinstance(s, (bytes, str)): | ||||||
|  |         raise TypeError("normcase() argument must be str or bytes, " | ||||||
|  |                         "not '{}'".format(s.__class__.__name__)) | ||||||
|  |     return s.replace(_get_altsep(s), _get_sep(s)).lower() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return whether a path is absolute. | ||||||
|  | # Trivial in Posix, harder on Windows. | ||||||
|  | # For Windows it is absolute if it starts with a slash or backslash (current | ||||||
|  | # volume), or if a pathname after the volume-letter-and-colon or UNC-resource | ||||||
|  | # starts with a slash or backslash. | ||||||
|  |  | ||||||
|  | def isabs(s): | ||||||
|  |     """Test whether a path is absolute""" | ||||||
|  |     s = splitdrive(s)[1] | ||||||
|  |     return len(s) > 0 and s[:1] in _get_bothseps(s) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Join two (or more) paths. | ||||||
|  | def join(path, *paths): | ||||||
|  |     sep = _get_sep(path) | ||||||
|  |     seps = _get_bothseps(path) | ||||||
|  |     colon = _get_colon(path) | ||||||
|  |     result_drive, result_path = splitdrive(path) | ||||||
|  |     for p in paths: | ||||||
|  |         p_drive, p_path = splitdrive(p) | ||||||
|  |         if p_path and p_path[0] in seps: | ||||||
|  |             # Second path is absolute | ||||||
|  |             if p_drive or not result_drive: | ||||||
|  |                 result_drive = p_drive | ||||||
|  |             result_path = p_path | ||||||
|  |             continue | ||||||
|  |         elif p_drive and p_drive != result_drive: | ||||||
|  |             if p_drive.lower() != result_drive.lower(): | ||||||
|  |                 # Different drives => ignore the first path entirely | ||||||
|  |                 result_drive = p_drive | ||||||
|  |                 result_path = p_path | ||||||
|  |                 continue | ||||||
|  |             # Same drive in different case | ||||||
|  |             result_drive = p_drive | ||||||
|  |         # Second path is relative to the first | ||||||
|  |         if result_path and result_path[-1] not in seps: | ||||||
|  |             result_path = result_path + sep | ||||||
|  |         result_path = result_path + p_path | ||||||
|  |     ## add separator between UNC and non-absolute path | ||||||
|  |     if (result_path and result_path[0] not in seps and | ||||||
|  |         result_drive and result_drive[-1:] != colon): | ||||||
|  |         return result_drive + sep + result_path | ||||||
|  |     return result_drive + result_path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Split a path in a drive specification (a drive letter followed by a | ||||||
|  | # colon) and the path specification. | ||||||
|  | # It is always true that drivespec + pathspec == p | ||||||
|  | def splitdrive(p): | ||||||
|  |     """Split a pathname into drive/UNC sharepoint and relative path specifiers. | ||||||
|  |     Returns a 2-tuple (drive_or_unc, path); either part may be empty. | ||||||
|  |  | ||||||
|  |     If you assign | ||||||
|  |         result = splitdrive(p) | ||||||
|  |     It is always true that: | ||||||
|  |         result[0] + result[1] == p | ||||||
|  |  | ||||||
|  |     If the path contained a drive letter, drive_or_unc will contain everything | ||||||
|  |     up to and including the colon.  e.g. splitdrive("c:/dir") returns ("c:", "/dir") | ||||||
|  |  | ||||||
|  |     If the path contained a UNC path, the drive_or_unc will contain the host name | ||||||
|  |     and share up to but not including the fourth directory separator character. | ||||||
|  |     e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") | ||||||
|  |  | ||||||
|  |     Paths cannot contain both a drive letter and a UNC path. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     empty = _get_empty(p) | ||||||
|  |     if len(p) > 1: | ||||||
|  |         sep = _get_sep(p) | ||||||
|  |         normp = p.replace(_get_altsep(p), sep) | ||||||
|  |         if (normp[0:2] == sep*2) and (normp[2:3] != sep): | ||||||
|  |             # is a UNC path: | ||||||
|  |             # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path | ||||||
|  |             # \\machine\mountpoint\directory\etc\... | ||||||
|  |             #           directory ^^^^^^^^^^^^^^^ | ||||||
|  |             index = normp.find(sep, 2) | ||||||
|  |             if index == -1: | ||||||
|  |                 return empty, p | ||||||
|  |             index2 = normp.find(sep, index + 1) | ||||||
|  |             # a UNC path can't have two slashes in a row | ||||||
|  |             # (after the initial two) | ||||||
|  |             if index2 == index + 1: | ||||||
|  |                 return empty, p | ||||||
|  |             if index2 == -1: | ||||||
|  |                 index2 = len(p) | ||||||
|  |             return p[:index2], p[index2:] | ||||||
|  |         if normp[1:2] == _get_colon(p): | ||||||
|  |             return p[:2], p[2:] | ||||||
|  |     return empty, p | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Parse UNC paths | ||||||
|  | def splitunc(p): | ||||||
|  |     """Deprecated since Python 3.1.  Please use splitdrive() instead; | ||||||
|  |     it now handles UNC paths. | ||||||
|  |  | ||||||
|  |     Split a pathname into UNC mount point and relative path specifiers. | ||||||
|  |  | ||||||
|  |     Return a 2-tuple (unc, rest); either part may be empty. | ||||||
|  |     If unc is not empty, it has the form '//host/mount' (or similar | ||||||
|  |     using backslashes).  unc+rest is always the input path. | ||||||
|  |     Paths containing drive letters never have an UNC part. | ||||||
|  |     """ | ||||||
|  |     import warnings | ||||||
|  |     warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead", | ||||||
|  |                   DeprecationWarning, 2) | ||||||
|  |     drive, path = splitdrive(p) | ||||||
|  |     if len(drive) == 2: | ||||||
|  |          # Drive letter present | ||||||
|  |         return p[:0], p | ||||||
|  |     return drive, path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Split a path in head (everything up to the last '/') and tail (the | ||||||
|  | # rest).  After the trailing '/' is stripped, the invariant | ||||||
|  | # join(head, tail) == p holds. | ||||||
|  | # The resulting head won't end in '/' unless it is the root. | ||||||
|  |  | ||||||
|  | def split(p): | ||||||
|  |     """Split a pathname. | ||||||
|  |  | ||||||
|  |     Return tuple (head, tail) where tail is everything after the final slash. | ||||||
|  |     Either part may be empty.""" | ||||||
|  |  | ||||||
|  |     seps = _get_bothseps(p) | ||||||
|  |     d, p = splitdrive(p) | ||||||
|  |     # set i to index beyond p's last slash | ||||||
|  |     i = len(p) | ||||||
|  |     while i and p[i-1] not in seps: | ||||||
|  |         i -= 1 | ||||||
|  |     head, tail = p[:i], p[i:]  # now tail has no slashes | ||||||
|  |     # remove trailing slashes from head, unless it's all slashes | ||||||
|  |     head2 = head | ||||||
|  |     while head2 and head2[-1:] in seps: | ||||||
|  |         head2 = head2[:-1] | ||||||
|  |     head = head2 or head | ||||||
|  |     return d + head, tail | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Split a path in root and extension. | ||||||
|  | # The extension is everything starting at the last dot in the last | ||||||
|  | # pathname component; the root is everything before that. | ||||||
|  | # It is always true that root + ext == p. | ||||||
|  |  | ||||||
|  | def splitext(p): | ||||||
|  |     return genericpath._splitext(p, _get_sep(p), _get_altsep(p), | ||||||
|  |                                  _get_dot(p)) | ||||||
|  | splitext.__doc__ = genericpath._splitext.__doc__ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return the tail (basename) part of a path. | ||||||
|  |  | ||||||
|  | def basename(p): | ||||||
|  |     """Returns the final component of a pathname""" | ||||||
|  |     return split(p)[1] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return the head (dirname) part of a path. | ||||||
|  |  | ||||||
|  | def dirname(p): | ||||||
|  |     """Returns the directory component of a pathname""" | ||||||
|  |     return split(p)[0] | ||||||
|  |  | ||||||
|  | # Is a path a symbolic link? | ||||||
|  | # This will always return false on systems where os.lstat doesn't exist. | ||||||
|  |  | ||||||
|  | def islink(path): | ||||||
|  |     """Test whether a path is a symbolic link. | ||||||
|  |     This will always return false for Windows prior to 6.0. | ||||||
|  |     """ | ||||||
|  |     try: | ||||||
|  |         st = os.lstat(path) | ||||||
|  |     except (OSError, AttributeError): | ||||||
|  |         return False | ||||||
|  |     return stat.S_ISLNK(st.st_mode) | ||||||
|  |  | ||||||
|  | # Being true for dangling symbolic links is also useful. | ||||||
|  |  | ||||||
|  | def lexists(path): | ||||||
|  |     """Test whether a path exists.  Returns True for broken symbolic links""" | ||||||
|  |     try: | ||||||
|  |         st = os.lstat(path) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |     return True | ||||||
|  |  | ||||||
|  | # Is a path a mount point? | ||||||
|  | # Any drive letter root (eg c:\) | ||||||
|  | # Any share UNC (eg \\server\share) | ||||||
|  | # Any volume mounted on a filesystem folder | ||||||
|  | # | ||||||
|  | # No one method detects all three situations. Historically we've lexically | ||||||
|  | # detected drive letter roots and share UNCs. The canonical approach to | ||||||
|  | # detecting mounted volumes (querying the reparse tag) fails for the most | ||||||
|  | # common case: drive letter roots. The alternative which uses GetVolumePathName | ||||||
|  | # fails if the drive letter is the result of a SUBST. | ||||||
|  | try: | ||||||
|  |     from nt import _getvolumepathname | ||||||
|  | except ImportError: | ||||||
|  |     _getvolumepathname = None | ||||||
|  | def ismount(path): | ||||||
|  |     """Test whether a path is a mount point (a drive root, the root of a | ||||||
|  |     share, or a mounted volume)""" | ||||||
|  |     seps = _get_bothseps(path) | ||||||
|  |     path = abspath(path) | ||||||
|  |     root, rest = splitdrive(path) | ||||||
|  |     if root and root[0] in seps: | ||||||
|  |         return (not rest) or (rest in seps) | ||||||
|  |     if rest in seps: | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     if _getvolumepathname: | ||||||
|  |         return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps) | ||||||
|  |     else: | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Expand paths beginning with '~' or '~user'. | ||||||
|  | # '~' means $HOME; '~user' means that user's home directory. | ||||||
|  | # If the path doesn't begin with '~', or if the user or $HOME is unknown, | ||||||
|  | # the path is returned unchanged (leaving error reporting to whatever | ||||||
|  | # function is called with the expanded path as argument). | ||||||
|  | # See also module 'glob' for expansion of *, ? and [...] in pathnames. | ||||||
|  | # (A function should also be defined to do full *sh-style environment | ||||||
|  | # variable expansion.) | ||||||
|  |  | ||||||
|  | def expanduser(path): | ||||||
|  |     """Expand ~ and ~user constructs. | ||||||
|  |  | ||||||
|  |     If user or $HOME is unknown, do nothing.""" | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         tilde = b'~' | ||||||
|  |     else: | ||||||
|  |         tilde = '~' | ||||||
|  |     if not path.startswith(tilde): | ||||||
|  |         return path | ||||||
|  |     i, n = 1, len(path) | ||||||
|  |     while i < n and path[i] not in _get_bothseps(path): | ||||||
|  |         i += 1 | ||||||
|  |  | ||||||
|  |     if 'HOME' in os.environ: | ||||||
|  |         userhome = os.environ['HOME'] | ||||||
|  |     elif 'USERPROFILE' in os.environ: | ||||||
|  |         userhome = os.environ['USERPROFILE'] | ||||||
|  |     elif not 'HOMEPATH' in os.environ: | ||||||
|  |         return path | ||||||
|  |     else: | ||||||
|  |         try: | ||||||
|  |             drive = os.environ['HOMEDRIVE'] | ||||||
|  |         except KeyError: | ||||||
|  |             drive = '' | ||||||
|  |         userhome = join(drive, os.environ['HOMEPATH']) | ||||||
|  |  | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         userhome = userhome.encode(sys.getfilesystemencoding()) | ||||||
|  |  | ||||||
|  |     if i != 1: #~user | ||||||
|  |         userhome = join(dirname(userhome), path[1:i]) | ||||||
|  |  | ||||||
|  |     return userhome + path[i:] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Expand paths containing shell variable substitutions. | ||||||
|  | # The following rules apply: | ||||||
|  | #       - no expansion within single quotes | ||||||
|  | #       - '$$' is translated into '$' | ||||||
|  | #       - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% | ||||||
|  | #       - ${varname} is accepted. | ||||||
|  | #       - $varname is accepted. | ||||||
|  | #       - %varname% is accepted. | ||||||
|  | #       - varnames can be made out of letters, digits and the characters '_-' | ||||||
|  | #         (though is not verified in the ${varname} and %varname% cases) | ||||||
|  | # XXX With COMMAND.COM you can use any characters in a variable name, | ||||||
|  | # XXX except '^|<>='. | ||||||
|  |  | ||||||
|  | def expandvars(path): | ||||||
|  |     """Expand shell variables of the forms $var, ${var} and %var%. | ||||||
|  |  | ||||||
|  |     Unknown variables are left unchanged.""" | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         if ord('$') not in path and ord('%') not in path: | ||||||
|  |             return path | ||||||
|  |         import string | ||||||
|  |         varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii') | ||||||
|  |         quote = b'\'' | ||||||
|  |         percent = b'%' | ||||||
|  |         brace = b'{' | ||||||
|  |         dollar = b'$' | ||||||
|  |         environ = getattr(os, 'environb', None) | ||||||
|  |     else: | ||||||
|  |         if '$' not in path and '%' not in path: | ||||||
|  |             return path | ||||||
|  |         import string | ||||||
|  |         varchars = string.ascii_letters + string.digits + '_-' | ||||||
|  |         quote = '\'' | ||||||
|  |         percent = '%' | ||||||
|  |         brace = '{' | ||||||
|  |         dollar = '$' | ||||||
|  |         environ = os.environ | ||||||
|  |     res = path[:0] | ||||||
|  |     index = 0 | ||||||
|  |     pathlen = len(path) | ||||||
|  |     while index < pathlen: | ||||||
|  |         c = path[index:index+1] | ||||||
|  |         if c == quote:   # no expansion within single quotes | ||||||
|  |             path = path[index + 1:] | ||||||
|  |             pathlen = len(path) | ||||||
|  |             try: | ||||||
|  |                 index = path.index(c) | ||||||
|  |                 res += c + path[:index + 1] | ||||||
|  |             except ValueError: | ||||||
|  |                 res += c + path | ||||||
|  |                 index = pathlen - 1 | ||||||
|  |         elif c == percent:  # variable or '%' | ||||||
|  |             if path[index + 1:index + 2] == percent: | ||||||
|  |                 res += c | ||||||
|  |                 index += 1 | ||||||
|  |             else: | ||||||
|  |                 path = path[index+1:] | ||||||
|  |                 pathlen = len(path) | ||||||
|  |                 try: | ||||||
|  |                     index = path.index(percent) | ||||||
|  |                 except ValueError: | ||||||
|  |                     res += percent + path | ||||||
|  |                     index = pathlen - 1 | ||||||
|  |                 else: | ||||||
|  |                     var = path[:index] | ||||||
|  |                     try: | ||||||
|  |                         if environ is None: | ||||||
|  |                             value = os.fsencode(os.environ[os.fsdecode(var)]) | ||||||
|  |                         else: | ||||||
|  |                             value = environ[var] | ||||||
|  |                     except KeyError: | ||||||
|  |                         value = percent + var + percent | ||||||
|  |                     res += value | ||||||
|  |         elif c == dollar:  # variable or '$$' | ||||||
|  |             if path[index + 1:index + 2] == dollar: | ||||||
|  |                 res += c | ||||||
|  |                 index += 1 | ||||||
|  |             elif path[index + 1:index + 2] == brace: | ||||||
|  |                 path = path[index+2:] | ||||||
|  |                 pathlen = len(path) | ||||||
|  |                 try: | ||||||
|  |                     if isinstance(path, bytes): | ||||||
|  |                         index = path.index(b'}') | ||||||
|  |                     else: | ||||||
|  |                         index = path.index('}') | ||||||
|  |                 except ValueError: | ||||||
|  |                     if isinstance(path, bytes): | ||||||
|  |                         res += b'${' + path | ||||||
|  |                     else: | ||||||
|  |                         res += '${' + path | ||||||
|  |                     index = pathlen - 1 | ||||||
|  |                 else: | ||||||
|  |                     var = path[:index] | ||||||
|  |                     try: | ||||||
|  |                         if environ is None: | ||||||
|  |                             value = os.fsencode(os.environ[os.fsdecode(var)]) | ||||||
|  |                         else: | ||||||
|  |                             value = environ[var] | ||||||
|  |                     except KeyError: | ||||||
|  |                         if isinstance(path, bytes): | ||||||
|  |                             value = b'${' + var + b'}' | ||||||
|  |                         else: | ||||||
|  |                             value = '${' + var + '}' | ||||||
|  |                     res += value | ||||||
|  |             else: | ||||||
|  |                 var = path[:0] | ||||||
|  |                 index += 1 | ||||||
|  |                 c = path[index:index + 1] | ||||||
|  |                 while c and c in varchars: | ||||||
|  |                     var += c | ||||||
|  |                     index += 1 | ||||||
|  |                     c = path[index:index + 1] | ||||||
|  |                 try: | ||||||
|  |                     if environ is None: | ||||||
|  |                         value = os.fsencode(os.environ[os.fsdecode(var)]) | ||||||
|  |                     else: | ||||||
|  |                         value = environ[var] | ||||||
|  |                 except KeyError: | ||||||
|  |                     value = dollar + var | ||||||
|  |                 res += value | ||||||
|  |                 if c: | ||||||
|  |                     index -= 1 | ||||||
|  |         else: | ||||||
|  |             res += c | ||||||
|  |         index += 1 | ||||||
|  |     return res | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. | ||||||
|  | # Previously, this function also truncated pathnames to 8+3 format, | ||||||
|  | # but as this module is called "ntpath", that's obviously wrong! | ||||||
|  |  | ||||||
|  | def normpath(path): | ||||||
|  |     """Normalize path, eliminating double slashes, etc.""" | ||||||
|  |     sep = _get_sep(path) | ||||||
|  |     dotdot = _get_dot(path) * 2 | ||||||
|  |     special_prefixes = _get_special(path) | ||||||
|  |     if path.startswith(special_prefixes): | ||||||
|  |         # in the case of paths with these prefixes: | ||||||
|  |         # \\.\ -> device names | ||||||
|  |         # \\?\ -> literal paths | ||||||
|  |         # do not do any normalization, but return the path unchanged | ||||||
|  |         return path | ||||||
|  |     path = path.replace(_get_altsep(path), sep) | ||||||
|  |     prefix, path = splitdrive(path) | ||||||
|  |  | ||||||
|  |     # collapse initial backslashes | ||||||
|  |     if path.startswith(sep): | ||||||
|  |         prefix += sep | ||||||
|  |         path = path.lstrip(sep) | ||||||
|  |  | ||||||
|  |     comps = path.split(sep) | ||||||
|  |     i = 0 | ||||||
|  |     while i < len(comps): | ||||||
|  |         if not comps[i] or comps[i] == _get_dot(path): | ||||||
|  |             del comps[i] | ||||||
|  |         elif comps[i] == dotdot: | ||||||
|  |             if i > 0 and comps[i-1] != dotdot: | ||||||
|  |                 del comps[i-1:i+1] | ||||||
|  |                 i -= 1 | ||||||
|  |             elif i == 0 and prefix.endswith(_get_sep(path)): | ||||||
|  |                 del comps[i] | ||||||
|  |             else: | ||||||
|  |                 i += 1 | ||||||
|  |         else: | ||||||
|  |             i += 1 | ||||||
|  |     # If the path is now empty, substitute '.' | ||||||
|  |     if not prefix and not comps: | ||||||
|  |         comps.append(_get_dot(path)) | ||||||
|  |     return prefix + sep.join(comps) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return an absolute path. | ||||||
|  | try: | ||||||
|  |     from nt import _getfullpathname | ||||||
|  |  | ||||||
|  | except ImportError: # not running on Windows - mock up something sensible | ||||||
|  |     def abspath(path): | ||||||
|  |         """Return the absolute version of a path.""" | ||||||
|  |         if not isabs(path): | ||||||
|  |             if isinstance(path, bytes): | ||||||
|  |                 cwd = os.getcwdb() | ||||||
|  |             else: | ||||||
|  |                 cwd = os.getcwd() | ||||||
|  |             path = join(cwd, path) | ||||||
|  |         return normpath(path) | ||||||
|  |  | ||||||
|  | else:  # use native Windows method on Windows | ||||||
|  |     def abspath(path): | ||||||
|  |         """Return the absolute version of a path.""" | ||||||
|  |  | ||||||
|  |         if path: # Empty path must return current working directory. | ||||||
|  |             try: | ||||||
|  |                 path = _getfullpathname(path) | ||||||
|  |             except OSError: | ||||||
|  |                 pass # Bad path - return unchanged. | ||||||
|  |         elif isinstance(path, bytes): | ||||||
|  |             path = os.getcwdb() | ||||||
|  |         else: | ||||||
|  |             path = os.getcwd() | ||||||
|  |         return normpath(path) | ||||||
|  |  | ||||||
|  | # realpath is a no-op on systems without islink support | ||||||
|  | realpath = abspath | ||||||
|  | # Win9x family and earlier have no Unicode filename support. | ||||||
|  | supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and | ||||||
|  |                               sys.getwindowsversion()[3] >= 2) | ||||||
|  |  | ||||||
|  | def relpath(path, start=curdir): | ||||||
|  |     """Return a relative version of a path""" | ||||||
|  |     sep = _get_sep(path) | ||||||
|  |  | ||||||
|  |     if start is curdir: | ||||||
|  |         start = _get_dot(path) | ||||||
|  |  | ||||||
|  |     if not path: | ||||||
|  |         raise ValueError("no path specified") | ||||||
|  |  | ||||||
|  |     start_abs = abspath(normpath(start)) | ||||||
|  |     path_abs = abspath(normpath(path)) | ||||||
|  |     start_drive, start_rest = splitdrive(start_abs) | ||||||
|  |     path_drive, path_rest = splitdrive(path_abs) | ||||||
|  |     if normcase(start_drive) != normcase(path_drive): | ||||||
|  |         error = "path is on mount '{0}', start on mount '{1}'".format( | ||||||
|  |             path_drive, start_drive) | ||||||
|  |         raise ValueError(error) | ||||||
|  |  | ||||||
|  |     start_list = [x for x in start_rest.split(sep) if x] | ||||||
|  |     path_list = [x for x in path_rest.split(sep) if x] | ||||||
|  |     # Work out how much of the filepath is shared by start and path. | ||||||
|  |     i = 0 | ||||||
|  |     for e1, e2 in zip(start_list, path_list): | ||||||
|  |         if normcase(e1) != normcase(e2): | ||||||
|  |             break | ||||||
|  |         i += 1 | ||||||
|  |  | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         pardir = b'..' | ||||||
|  |     else: | ||||||
|  |         pardir = '..' | ||||||
|  |     rel_list = [pardir] * (len(start_list)-i) + path_list[i:] | ||||||
|  |     if not rel_list: | ||||||
|  |         return _get_dot(path) | ||||||
|  |     return join(*rel_list) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # determine if two files are in fact the same file | ||||||
|  | try: | ||||||
|  |     # GetFinalPathNameByHandle is available starting with Windows 6.0. | ||||||
|  |     # Windows XP and non-Windows OS'es will mock _getfinalpathname. | ||||||
|  |     if sys.getwindowsversion()[:2] >= (6, 0): | ||||||
|  |         from nt import _getfinalpathname | ||||||
|  |     else: | ||||||
|  |         raise ImportError | ||||||
|  | except (AttributeError, ImportError): | ||||||
|  |     # On Windows XP and earlier, two files are the same if their absolute | ||||||
|  |     # pathnames are the same. | ||||||
|  |     # Non-Windows operating systems fake this method with an XP | ||||||
|  |     # approximation. | ||||||
|  |     def _getfinalpathname(f): | ||||||
|  |         return normcase(abspath(f)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     # The genericpath.isdir implementation uses os.stat and checks the mode | ||||||
|  |     # attribute to tell whether or not the path is a directory. | ||||||
|  |     # This is overkill on Windows - just pass the path to GetFileAttributes | ||||||
|  |     # and check the attribute from there. | ||||||
|  |     from nt import _isdir as isdir | ||||||
|  | except ImportError: | ||||||
|  |     # Use genericpath.isdir as imported above. | ||||||
|  |     pass | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/operator.py |  | ||||||
							
								
								
									
										411
									
								
								v1/flask/lib/python3.4/operator.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										411
									
								
								v1/flask/lib/python3.4/operator.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,411 @@ | |||||||
|  | """ | ||||||
|  | Operator Interface | ||||||
|  |  | ||||||
|  | This module exports a set of functions corresponding to the intrinsic | ||||||
|  | operators of Python.  For example, operator.add(x, y) is equivalent | ||||||
|  | to the expression x+y.  The function names are those used for special | ||||||
|  | methods; variants without leading and trailing '__' are also provided | ||||||
|  | for convenience. | ||||||
|  |  | ||||||
|  | This is the pure Python implementation of the module. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | __all__ = ['abs', 'add', 'and_', 'attrgetter', 'concat', 'contains', 'countOf', | ||||||
|  |            'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', | ||||||
|  |            'iconcat', 'ifloordiv', 'ilshift', 'imod', 'imul', 'index', | ||||||
|  |            'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', 'is_', | ||||||
|  |            'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le', | ||||||
|  |            'length_hint', 'lshift', 'lt', 'methodcaller', 'mod', 'mul', 'ne', | ||||||
|  |            'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', 'setitem', 'sub', | ||||||
|  |            'truediv', 'truth', 'xor'] | ||||||
|  |  | ||||||
|  | from builtins import abs as _abs | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Comparison Operations *******************************************************# | ||||||
|  |  | ||||||
|  | def lt(a, b): | ||||||
|  |     "Same as a < b." | ||||||
|  |     return a < b | ||||||
|  |  | ||||||
|  | def le(a, b): | ||||||
|  |     "Same as a <= b." | ||||||
|  |     return a <= b | ||||||
|  |  | ||||||
|  | def eq(a, b): | ||||||
|  |     "Same as a == b." | ||||||
|  |     return a == b | ||||||
|  |  | ||||||
|  | def ne(a, b): | ||||||
|  |     "Same as a != b." | ||||||
|  |     return a != b | ||||||
|  |  | ||||||
|  | def ge(a, b): | ||||||
|  |     "Same as a >= b." | ||||||
|  |     return a >= b | ||||||
|  |  | ||||||
|  | def gt(a, b): | ||||||
|  |     "Same as a > b." | ||||||
|  |     return a > b | ||||||
|  |  | ||||||
|  | # Logical Operations **********************************************************# | ||||||
|  |  | ||||||
|  | def not_(a): | ||||||
|  |     "Same as not a." | ||||||
|  |     return not a | ||||||
|  |  | ||||||
|  | def truth(a): | ||||||
|  |     "Return True if a is true, False otherwise." | ||||||
|  |     return True if a else False | ||||||
|  |  | ||||||
|  | def is_(a, b): | ||||||
|  |     "Same as a is b." | ||||||
|  |     return a is b | ||||||
|  |  | ||||||
|  | def is_not(a, b): | ||||||
|  |     "Same as a is not b." | ||||||
|  |     return a is not b | ||||||
|  |  | ||||||
|  | # Mathematical/Bitwise Operations *********************************************# | ||||||
|  |  | ||||||
|  | def abs(a): | ||||||
|  |     "Same as abs(a)." | ||||||
|  |     return _abs(a) | ||||||
|  |  | ||||||
|  | def add(a, b): | ||||||
|  |     "Same as a + b." | ||||||
|  |     return a + b | ||||||
|  |  | ||||||
|  | def and_(a, b): | ||||||
|  |     "Same as a & b." | ||||||
|  |     return a & b | ||||||
|  |  | ||||||
|  | def floordiv(a, b): | ||||||
|  |     "Same as a // b." | ||||||
|  |     return a // b | ||||||
|  |  | ||||||
|  | def index(a): | ||||||
|  |     "Same as a.__index__()." | ||||||
|  |     return a.__index__() | ||||||
|  |  | ||||||
|  | def inv(a): | ||||||
|  |     "Same as ~a." | ||||||
|  |     return ~a | ||||||
|  | invert = inv | ||||||
|  |  | ||||||
|  | def lshift(a, b): | ||||||
|  |     "Same as a << b." | ||||||
|  |     return a << b | ||||||
|  |  | ||||||
|  | def mod(a, b): | ||||||
|  |     "Same as a % b." | ||||||
|  |     return a % b | ||||||
|  |  | ||||||
|  | def mul(a, b): | ||||||
|  |     "Same as a * b." | ||||||
|  |     return a * b | ||||||
|  |  | ||||||
|  | def neg(a): | ||||||
|  |     "Same as -a." | ||||||
|  |     return -a | ||||||
|  |  | ||||||
|  | def or_(a, b): | ||||||
|  |     "Same as a | b." | ||||||
|  |     return a | b | ||||||
|  |  | ||||||
|  | def pos(a): | ||||||
|  |     "Same as +a." | ||||||
|  |     return +a | ||||||
|  |  | ||||||
|  | def pow(a, b): | ||||||
|  |     "Same as a ** b." | ||||||
|  |     return a ** b | ||||||
|  |  | ||||||
|  | def rshift(a, b): | ||||||
|  |     "Same as a >> b." | ||||||
|  |     return a >> b | ||||||
|  |  | ||||||
|  | def sub(a, b): | ||||||
|  |     "Same as a - b." | ||||||
|  |     return a - b | ||||||
|  |  | ||||||
|  | def truediv(a, b): | ||||||
|  |     "Same as a / b." | ||||||
|  |     return a / b | ||||||
|  |  | ||||||
|  | def xor(a, b): | ||||||
|  |     "Same as a ^ b." | ||||||
|  |     return a ^ b | ||||||
|  |  | ||||||
|  | # Sequence Operations *********************************************************# | ||||||
|  |  | ||||||
|  | def concat(a, b): | ||||||
|  |     "Same as a + b, for a and b sequences." | ||||||
|  |     if not hasattr(a, '__getitem__'): | ||||||
|  |         msg = "'%s' object can't be concatenated" % type(a).__name__ | ||||||
|  |         raise TypeError(msg) | ||||||
|  |     return a + b | ||||||
|  |  | ||||||
|  | def contains(a, b): | ||||||
|  |     "Same as b in a (note reversed operands)." | ||||||
|  |     return b in a | ||||||
|  |  | ||||||
|  | def countOf(a, b): | ||||||
|  |     "Return the number of times b occurs in a." | ||||||
|  |     count = 0 | ||||||
|  |     for i in a: | ||||||
|  |         if i == b: | ||||||
|  |             count += 1 | ||||||
|  |     return count | ||||||
|  |  | ||||||
|  | def delitem(a, b): | ||||||
|  |     "Same as del a[b]." | ||||||
|  |     del a[b] | ||||||
|  |  | ||||||
|  | def getitem(a, b): | ||||||
|  |     "Same as a[b]." | ||||||
|  |     return a[b] | ||||||
|  |  | ||||||
|  | def indexOf(a, b): | ||||||
|  |     "Return the first index of b in a." | ||||||
|  |     for i, j in enumerate(a): | ||||||
|  |         if j == b: | ||||||
|  |             return i | ||||||
|  |     else: | ||||||
|  |         raise ValueError('sequence.index(x): x not in sequence') | ||||||
|  |  | ||||||
|  | def setitem(a, b, c): | ||||||
|  |     "Same as a[b] = c." | ||||||
|  |     a[b] = c | ||||||
|  |  | ||||||
|  | def length_hint(obj, default=0): | ||||||
|  |     """ | ||||||
|  |     Return an estimate of the number of items in obj. | ||||||
|  |     This is useful for presizing containers when building from an iterable. | ||||||
|  |  | ||||||
|  |     If the object supports len(), the result will be exact. Otherwise, it may | ||||||
|  |     over- or under-estimate by an arbitrary amount. The result will be an | ||||||
|  |     integer >= 0. | ||||||
|  |     """ | ||||||
|  |     if not isinstance(default, int): | ||||||
|  |         msg = ("'%s' object cannot be interpreted as an integer" % | ||||||
|  |                type(default).__name__) | ||||||
|  |         raise TypeError(msg) | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         return len(obj) | ||||||
|  |     except TypeError: | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         hint = type(obj).__length_hint__ | ||||||
|  |     except AttributeError: | ||||||
|  |         return default | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         val = hint(obj) | ||||||
|  |     except TypeError: | ||||||
|  |         return default | ||||||
|  |     if val is NotImplemented: | ||||||
|  |         return default | ||||||
|  |     if not isinstance(val, int): | ||||||
|  |         msg = ('__length_hint__ must be integer, not %s' % | ||||||
|  |                type(val).__name__) | ||||||
|  |         raise TypeError(msg) | ||||||
|  |     if val < 0: | ||||||
|  |         msg = '__length_hint__() should return >= 0' | ||||||
|  |         raise ValueError(msg) | ||||||
|  |     return val | ||||||
|  |  | ||||||
|  | # Generalized Lookup Objects **************************************************# | ||||||
|  |  | ||||||
|  | class attrgetter: | ||||||
|  |     """ | ||||||
|  |     Return a callable object that fetches the given attribute(s) from its operand. | ||||||
|  |     After f = attrgetter('name'), the call f(r) returns r.name. | ||||||
|  |     After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). | ||||||
|  |     After h = attrgetter('name.first', 'name.last'), the call h(r) returns | ||||||
|  |     (r.name.first, r.name.last). | ||||||
|  |     """ | ||||||
|  |     def __init__(self, attr, *attrs): | ||||||
|  |         if not attrs: | ||||||
|  |             if not isinstance(attr, str): | ||||||
|  |                 raise TypeError('attribute name must be a string') | ||||||
|  |             names = attr.split('.') | ||||||
|  |             def func(obj): | ||||||
|  |                 for name in names: | ||||||
|  |                     obj = getattr(obj, name) | ||||||
|  |                 return obj | ||||||
|  |             self._call = func | ||||||
|  |         else: | ||||||
|  |             getters = tuple(map(attrgetter, (attr,) + attrs)) | ||||||
|  |             def func(obj): | ||||||
|  |                 return tuple(getter(obj) for getter in getters) | ||||||
|  |             self._call = func | ||||||
|  |  | ||||||
|  |     def __call__(self, obj): | ||||||
|  |         return self._call(obj) | ||||||
|  |  | ||||||
|  | class itemgetter: | ||||||
|  |     """ | ||||||
|  |     Return a callable object that fetches the given item(s) from its operand. | ||||||
|  |     After f = itemgetter(2), the call f(r) returns r[2]. | ||||||
|  |     After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) | ||||||
|  |     """ | ||||||
|  |     def __init__(self, item, *items): | ||||||
|  |         if not items: | ||||||
|  |             def func(obj): | ||||||
|  |                 return obj[item] | ||||||
|  |             self._call = func | ||||||
|  |         else: | ||||||
|  |             items = (item,) + items | ||||||
|  |             def func(obj): | ||||||
|  |                 return tuple(obj[i] for i in items) | ||||||
|  |             self._call = func | ||||||
|  |  | ||||||
|  |     def __call__(self, obj): | ||||||
|  |         return self._call(obj) | ||||||
|  |  | ||||||
|  | class methodcaller: | ||||||
|  |     """ | ||||||
|  |     Return a callable object that calls the given method on its operand. | ||||||
|  |     After f = methodcaller('name'), the call f(r) returns r.name(). | ||||||
|  |     After g = methodcaller('name', 'date', foo=1), the call g(r) returns | ||||||
|  |     r.name('date', foo=1). | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(*args, **kwargs): | ||||||
|  |         if len(args) < 2: | ||||||
|  |             msg = "methodcaller needs at least one argument, the method name" | ||||||
|  |             raise TypeError(msg) | ||||||
|  |         self = args[0] | ||||||
|  |         self._name = args[1] | ||||||
|  |         self._args = args[2:] | ||||||
|  |         self._kwargs = kwargs | ||||||
|  |  | ||||||
|  |     def __call__(self, obj): | ||||||
|  |         return getattr(obj, self._name)(*self._args, **self._kwargs) | ||||||
|  |  | ||||||
|  | # In-place Operations *********************************************************# | ||||||
|  |  | ||||||
|  | def iadd(a, b): | ||||||
|  |     "Same as a += b." | ||||||
|  |     a += b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def iand(a, b): | ||||||
|  |     "Same as a &= b." | ||||||
|  |     a &= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def iconcat(a, b): | ||||||
|  |     "Same as a += b, for a and b sequences." | ||||||
|  |     if not hasattr(a, '__getitem__'): | ||||||
|  |         msg = "'%s' object can't be concatenated" % type(a).__name__ | ||||||
|  |         raise TypeError(msg) | ||||||
|  |     a += b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def ifloordiv(a, b): | ||||||
|  |     "Same as a //= b." | ||||||
|  |     a //= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def ilshift(a, b): | ||||||
|  |     "Same as a <<= b." | ||||||
|  |     a <<= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def imod(a, b): | ||||||
|  |     "Same as a %= b." | ||||||
|  |     a %= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def imul(a, b): | ||||||
|  |     "Same as a *= b." | ||||||
|  |     a *= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def ior(a, b): | ||||||
|  |     "Same as a |= b." | ||||||
|  |     a |= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def ipow(a, b): | ||||||
|  |     "Same as a **= b." | ||||||
|  |     a **=b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def irshift(a, b): | ||||||
|  |     "Same as a >>= b." | ||||||
|  |     a >>= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def isub(a, b): | ||||||
|  |     "Same as a -= b." | ||||||
|  |     a -= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def itruediv(a, b): | ||||||
|  |     "Same as a /= b." | ||||||
|  |     a /= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  | def ixor(a, b): | ||||||
|  |     "Same as a ^= b." | ||||||
|  |     a ^= b | ||||||
|  |     return a | ||||||
|  |  | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from _operator import * | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
|  | else: | ||||||
|  |     from _operator import __doc__ | ||||||
|  |  | ||||||
|  | # All of these "__func__ = func" assignments have to happen after importing | ||||||
|  | # from _operator to make sure they're set to the right function | ||||||
|  | __lt__ = lt | ||||||
|  | __le__ = le | ||||||
|  | __eq__ = eq | ||||||
|  | __ne__ = ne | ||||||
|  | __ge__ = ge | ||||||
|  | __gt__ = gt | ||||||
|  | __not__ = not_ | ||||||
|  | __abs__ = abs | ||||||
|  | __add__ = add | ||||||
|  | __and__ = and_ | ||||||
|  | __floordiv__ = floordiv | ||||||
|  | __index__ = index | ||||||
|  | __inv__ = inv | ||||||
|  | __invert__ = invert | ||||||
|  | __lshift__ = lshift | ||||||
|  | __mod__ = mod | ||||||
|  | __mul__ = mul | ||||||
|  | __neg__ = neg | ||||||
|  | __or__ = or_ | ||||||
|  | __pos__ = pos | ||||||
|  | __pow__ = pow | ||||||
|  | __rshift__ = rshift | ||||||
|  | __sub__ = sub | ||||||
|  | __truediv__ = truediv | ||||||
|  | __xor__ = xor | ||||||
|  | __concat__ = concat | ||||||
|  | __contains__ = contains | ||||||
|  | __delitem__ = delitem | ||||||
|  | __getitem__ = getitem | ||||||
|  | __setitem__ = setitem | ||||||
|  | __iadd__ = iadd | ||||||
|  | __iand__ = iand | ||||||
|  | __iconcat__ = iconcat | ||||||
|  | __ifloordiv__ = ifloordiv | ||||||
|  | __ilshift__ = ilshift | ||||||
|  | __imod__ = imod | ||||||
|  | __imul__ = imul | ||||||
|  | __ior__ = ior | ||||||
|  | __ipow__ = ipow | ||||||
|  | __irshift__ = irshift | ||||||
|  | __isub__ = isub | ||||||
|  | __itruediv__ = itruediv | ||||||
|  | __ixor__ = ixor | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/os.py |  | ||||||
							
								
								
									
										982
									
								
								v1/flask/lib/python3.4/os.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										982
									
								
								v1/flask/lib/python3.4/os.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,982 @@ | |||||||
|  | r"""OS routines for NT or Posix depending on what system we're on. | ||||||
|  |  | ||||||
|  | This exports: | ||||||
|  |   - all functions from posix, nt or ce, e.g. unlink, stat, etc. | ||||||
|  |   - os.path is either posixpath or ntpath | ||||||
|  |   - os.name is either 'posix', 'nt' or 'ce'. | ||||||
|  |   - os.curdir is a string representing the current directory ('.' or ':') | ||||||
|  |   - os.pardir is a string representing the parent directory ('..' or '::') | ||||||
|  |   - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') | ||||||
|  |   - os.extsep is the extension separator (always '.') | ||||||
|  |   - os.altsep is the alternate pathname separator (None or '/') | ||||||
|  |   - os.pathsep is the component separator used in $PATH etc | ||||||
|  |   - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') | ||||||
|  |   - os.defpath is the default search path for executables | ||||||
|  |   - os.devnull is the file path of the null device ('/dev/null', etc.) | ||||||
|  |  | ||||||
|  | Programs that import and use 'os' stand a better chance of being | ||||||
|  | portable between different platforms.  Of course, they must then | ||||||
|  | only use functions that are defined by all platforms (e.g., unlink | ||||||
|  | and opendir), and leave all pathname manipulation to os.path | ||||||
|  | (e.g., split and join). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | #' | ||||||
|  |  | ||||||
|  | import sys, errno | ||||||
|  | import stat as st | ||||||
|  |  | ||||||
|  | _names = sys.builtin_module_names | ||||||
|  |  | ||||||
|  | # Note:  more names are added to __all__ later. | ||||||
|  | __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", | ||||||
|  |            "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", | ||||||
|  |            "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", | ||||||
|  |            "popen", "extsep"] | ||||||
|  |  | ||||||
|  | def _exists(name): | ||||||
|  |     return name in globals() | ||||||
|  |  | ||||||
|  | def _get_exports_list(module): | ||||||
|  |     try: | ||||||
|  |         return list(module.__all__) | ||||||
|  |     except AttributeError: | ||||||
|  |         return [n for n in dir(module) if n[0] != '_'] | ||||||
|  |  | ||||||
|  | # Any new dependencies of the os module and/or changes in path separator | ||||||
|  | # requires updating importlib as well. | ||||||
|  | if 'posix' in _names: | ||||||
|  |     name = 'posix' | ||||||
|  |     linesep = '\n' | ||||||
|  |     from posix import * | ||||||
|  |     try: | ||||||
|  |         from posix import _exit | ||||||
|  |         __all__.append('_exit') | ||||||
|  |     except ImportError: | ||||||
|  |         pass | ||||||
|  |     import posixpath as path | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         from posix import _have_functions | ||||||
|  |     except ImportError: | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  | elif 'nt' in _names: | ||||||
|  |     name = 'nt' | ||||||
|  |     linesep = '\r\n' | ||||||
|  |     from nt import * | ||||||
|  |     try: | ||||||
|  |         from nt import _exit | ||||||
|  |         __all__.append('_exit') | ||||||
|  |     except ImportError: | ||||||
|  |         pass | ||||||
|  |     import ntpath as path | ||||||
|  |  | ||||||
|  |     import nt | ||||||
|  |     __all__.extend(_get_exports_list(nt)) | ||||||
|  |     del nt | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         from nt import _have_functions | ||||||
|  |     except ImportError: | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  | elif 'ce' in _names: | ||||||
|  |     name = 'ce' | ||||||
|  |     linesep = '\r\n' | ||||||
|  |     from ce import * | ||||||
|  |     try: | ||||||
|  |         from ce import _exit | ||||||
|  |         __all__.append('_exit') | ||||||
|  |     except ImportError: | ||||||
|  |         pass | ||||||
|  |     # We can use the standard Windows path. | ||||||
|  |     import ntpath as path | ||||||
|  |  | ||||||
|  |     import ce | ||||||
|  |     __all__.extend(_get_exports_list(ce)) | ||||||
|  |     del ce | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         from ce import _have_functions | ||||||
|  |     except ImportError: | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  | else: | ||||||
|  |     raise ImportError('no os specific module found') | ||||||
|  |  | ||||||
|  | sys.modules['os.path'] = path | ||||||
|  | from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, | ||||||
|  |     devnull) | ||||||
|  |  | ||||||
|  | del _names | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if _exists("_have_functions"): | ||||||
|  |     _globals = globals() | ||||||
|  |     def _add(str, fn): | ||||||
|  |         if (fn in _globals) and (str in _have_functions): | ||||||
|  |             _set.add(_globals[fn]) | ||||||
|  |  | ||||||
|  |     _set = set() | ||||||
|  |     _add("HAVE_FACCESSAT",  "access") | ||||||
|  |     _add("HAVE_FCHMODAT",   "chmod") | ||||||
|  |     _add("HAVE_FCHOWNAT",   "chown") | ||||||
|  |     _add("HAVE_FSTATAT",    "stat") | ||||||
|  |     _add("HAVE_FUTIMESAT",  "utime") | ||||||
|  |     _add("HAVE_LINKAT",     "link") | ||||||
|  |     _add("HAVE_MKDIRAT",    "mkdir") | ||||||
|  |     _add("HAVE_MKFIFOAT",   "mkfifo") | ||||||
|  |     _add("HAVE_MKNODAT",    "mknod") | ||||||
|  |     _add("HAVE_OPENAT",     "open") | ||||||
|  |     _add("HAVE_READLINKAT", "readlink") | ||||||
|  |     _add("HAVE_RENAMEAT",   "rename") | ||||||
|  |     _add("HAVE_SYMLINKAT",  "symlink") | ||||||
|  |     _add("HAVE_UNLINKAT",   "unlink") | ||||||
|  |     _add("HAVE_UNLINKAT",   "rmdir") | ||||||
|  |     _add("HAVE_UTIMENSAT",  "utime") | ||||||
|  |     supports_dir_fd = _set | ||||||
|  |  | ||||||
|  |     _set = set() | ||||||
|  |     _add("HAVE_FACCESSAT",  "access") | ||||||
|  |     supports_effective_ids = _set | ||||||
|  |  | ||||||
|  |     _set = set() | ||||||
|  |     _add("HAVE_FCHDIR",     "chdir") | ||||||
|  |     _add("HAVE_FCHMOD",     "chmod") | ||||||
|  |     _add("HAVE_FCHOWN",     "chown") | ||||||
|  |     _add("HAVE_FDOPENDIR",  "listdir") | ||||||
|  |     _add("HAVE_FEXECVE",    "execve") | ||||||
|  |     _set.add(stat) # fstat always works | ||||||
|  |     _add("HAVE_FTRUNCATE",  "truncate") | ||||||
|  |     _add("HAVE_FUTIMENS",   "utime") | ||||||
|  |     _add("HAVE_FUTIMES",    "utime") | ||||||
|  |     _add("HAVE_FPATHCONF",  "pathconf") | ||||||
|  |     if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 | ||||||
|  |         _add("HAVE_FSTATVFS", "statvfs") | ||||||
|  |     supports_fd = _set | ||||||
|  |  | ||||||
|  |     _set = set() | ||||||
|  |     _add("HAVE_FACCESSAT",  "access") | ||||||
|  |     # Some platforms don't support lchmod().  Often the function exists | ||||||
|  |     # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. | ||||||
|  |     # (No, I don't know why that's a good design.)  ./configure will detect | ||||||
|  |     # this and reject it--so HAVE_LCHMOD still won't be defined on such | ||||||
|  |     # platforms.  This is Very Helpful. | ||||||
|  |     # | ||||||
|  |     # However, sometimes platforms without a working lchmod() *do* have | ||||||
|  |     # fchmodat().  (Examples: Linux kernel 3.2 with glibc 2.15, | ||||||
|  |     # OpenIndiana 3.x.)  And fchmodat() has a flag that theoretically makes | ||||||
|  |     # it behave like lchmod().  So in theory it would be a suitable | ||||||
|  |     # replacement for lchmod().  But when lchmod() doesn't work, fchmodat()'s | ||||||
|  |     # flag doesn't work *either*.  Sadly ./configure isn't sophisticated | ||||||
|  |     # enough to detect this condition--it only determines whether or not | ||||||
|  |     # fchmodat() minimally works. | ||||||
|  |     # | ||||||
|  |     # Therefore we simply ignore fchmodat() when deciding whether or not | ||||||
|  |     # os.chmod supports follow_symlinks.  Just checking lchmod() is | ||||||
|  |     # sufficient.  After all--if you have a working fchmodat(), your | ||||||
|  |     # lchmod() almost certainly works too. | ||||||
|  |     # | ||||||
|  |     # _add("HAVE_FCHMODAT",   "chmod") | ||||||
|  |     _add("HAVE_FCHOWNAT",   "chown") | ||||||
|  |     _add("HAVE_FSTATAT",    "stat") | ||||||
|  |     _add("HAVE_LCHFLAGS",   "chflags") | ||||||
|  |     _add("HAVE_LCHMOD",     "chmod") | ||||||
|  |     if _exists("lchown"): # mac os x10.3 | ||||||
|  |         _add("HAVE_LCHOWN", "chown") | ||||||
|  |     _add("HAVE_LINKAT",     "link") | ||||||
|  |     _add("HAVE_LUTIMES",    "utime") | ||||||
|  |     _add("HAVE_LSTAT",      "stat") | ||||||
|  |     _add("HAVE_FSTATAT",    "stat") | ||||||
|  |     _add("HAVE_UTIMENSAT",  "utime") | ||||||
|  |     _add("MS_WINDOWS",      "stat") | ||||||
|  |     supports_follow_symlinks = _set | ||||||
|  |  | ||||||
|  |     del _set | ||||||
|  |     del _have_functions | ||||||
|  |     del _globals | ||||||
|  |     del _add | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Python uses fixed values for the SEEK_ constants; they are mapped | ||||||
|  | # to native constants if necessary in posixmodule.c | ||||||
|  | # Other possible SEEK values are directly imported from posixmodule.c | ||||||
|  | SEEK_SET = 0 | ||||||
|  | SEEK_CUR = 1 | ||||||
|  | SEEK_END = 2 | ||||||
|  |  | ||||||
|  | # Super directory utilities. | ||||||
|  | # (Inspired by Eric Raymond; the doc strings are mostly his) | ||||||
|  |  | ||||||
|  | def makedirs(name, mode=0o777, exist_ok=False): | ||||||
|  |     """makedirs(name [, mode=0o777][, exist_ok=False]) | ||||||
|  |  | ||||||
|  |     Super-mkdir; create a leaf directory and all intermediate ones.  Works like | ||||||
|  |     mkdir, except that any intermediate path segment (not just the rightmost) | ||||||
|  |     will be created if it does not exist. If the target directory already | ||||||
|  |     exists, raise an OSError if exist_ok is False. Otherwise no exception is | ||||||
|  |     raised.  This is recursive. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     head, tail = path.split(name) | ||||||
|  |     if not tail: | ||||||
|  |         head, tail = path.split(head) | ||||||
|  |     if head and tail and not path.exists(head): | ||||||
|  |         try: | ||||||
|  |             makedirs(head, mode, exist_ok) | ||||||
|  |         except FileExistsError: | ||||||
|  |             # Defeats race condition when another thread created the path | ||||||
|  |             pass | ||||||
|  |         cdir = curdir | ||||||
|  |         if isinstance(tail, bytes): | ||||||
|  |             cdir = bytes(curdir, 'ASCII') | ||||||
|  |         if tail == cdir:           # xxx/newdir/. exists if xxx/newdir exists | ||||||
|  |             return | ||||||
|  |     try: | ||||||
|  |         mkdir(name, mode) | ||||||
|  |     except OSError: | ||||||
|  |         # Cannot rely on checking for EEXIST, since the operating system | ||||||
|  |         # could give priority to other errors like EACCES or EROFS | ||||||
|  |         if not exist_ok or not path.isdir(name): | ||||||
|  |             raise | ||||||
|  |  | ||||||
|  | def removedirs(name): | ||||||
|  |     """removedirs(name) | ||||||
|  |  | ||||||
|  |     Super-rmdir; remove a leaf directory and all empty intermediate | ||||||
|  |     ones.  Works like rmdir except that, if the leaf directory is | ||||||
|  |     successfully removed, directories corresponding to rightmost path | ||||||
|  |     segments will be pruned away until either the whole path is | ||||||
|  |     consumed or an error occurs.  Errors during this latter phase are | ||||||
|  |     ignored -- they generally mean that a directory was not empty. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     rmdir(name) | ||||||
|  |     head, tail = path.split(name) | ||||||
|  |     if not tail: | ||||||
|  |         head, tail = path.split(head) | ||||||
|  |     while head and tail: | ||||||
|  |         try: | ||||||
|  |             rmdir(head) | ||||||
|  |         except OSError: | ||||||
|  |             break | ||||||
|  |         head, tail = path.split(head) | ||||||
|  |  | ||||||
|  | def renames(old, new): | ||||||
|  |     """renames(old, new) | ||||||
|  |  | ||||||
|  |     Super-rename; create directories as necessary and delete any left | ||||||
|  |     empty.  Works like rename, except creation of any intermediate | ||||||
|  |     directories needed to make the new pathname good is attempted | ||||||
|  |     first.  After the rename, directories corresponding to rightmost | ||||||
|  |     path segments of the old name will be pruned until either the | ||||||
|  |     whole path is consumed or a nonempty directory is found. | ||||||
|  |  | ||||||
|  |     Note: this function can fail with the new directory structure made | ||||||
|  |     if you lack permissions needed to unlink the leaf directory or | ||||||
|  |     file. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     head, tail = path.split(new) | ||||||
|  |     if head and tail and not path.exists(head): | ||||||
|  |         makedirs(head) | ||||||
|  |     rename(old, new) | ||||||
|  |     head, tail = path.split(old) | ||||||
|  |     if head and tail: | ||||||
|  |         try: | ||||||
|  |             removedirs(head) | ||||||
|  |         except OSError: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  | __all__.extend(["makedirs", "removedirs", "renames"]) | ||||||
|  |  | ||||||
|  | def walk(top, topdown=True, onerror=None, followlinks=False): | ||||||
|  |     """Directory tree generator. | ||||||
|  |  | ||||||
|  |     For each directory in the directory tree rooted at top (including top | ||||||
|  |     itself, but excluding '.' and '..'), yields a 3-tuple | ||||||
|  |  | ||||||
|  |         dirpath, dirnames, filenames | ||||||
|  |  | ||||||
|  |     dirpath is a string, the path to the directory.  dirnames is a list of | ||||||
|  |     the names of the subdirectories in dirpath (excluding '.' and '..'). | ||||||
|  |     filenames is a list of the names of the non-directory files in dirpath. | ||||||
|  |     Note that the names in the lists are just names, with no path components. | ||||||
|  |     To get a full path (which begins with top) to a file or directory in | ||||||
|  |     dirpath, do os.path.join(dirpath, name). | ||||||
|  |  | ||||||
|  |     If optional arg 'topdown' is true or not specified, the triple for a | ||||||
|  |     directory is generated before the triples for any of its subdirectories | ||||||
|  |     (directories are generated top down).  If topdown is false, the triple | ||||||
|  |     for a directory is generated after the triples for all of its | ||||||
|  |     subdirectories (directories are generated bottom up). | ||||||
|  |  | ||||||
|  |     When topdown is true, the caller can modify the dirnames list in-place | ||||||
|  |     (e.g., via del or slice assignment), and walk will only recurse into the | ||||||
|  |     subdirectories whose names remain in dirnames; this can be used to prune the | ||||||
|  |     search, or to impose a specific order of visiting.  Modifying dirnames when | ||||||
|  |     topdown is false is ineffective, since the directories in dirnames have | ||||||
|  |     already been generated by the time dirnames itself is generated. No matter | ||||||
|  |     the value of topdown, the list of subdirectories is retrieved before the | ||||||
|  |     tuples for the directory and its subdirectories are generated. | ||||||
|  |  | ||||||
|  |     By default errors from the os.listdir() call are ignored.  If | ||||||
|  |     optional arg 'onerror' is specified, it should be a function; it | ||||||
|  |     will be called with one argument, an OSError instance.  It can | ||||||
|  |     report the error to continue with the walk, or raise the exception | ||||||
|  |     to abort the walk.  Note that the filename is available as the | ||||||
|  |     filename attribute of the exception object. | ||||||
|  |  | ||||||
|  |     By default, os.walk does not follow symbolic links to subdirectories on | ||||||
|  |     systems that support them.  In order to get this functionality, set the | ||||||
|  |     optional argument 'followlinks' to true. | ||||||
|  |  | ||||||
|  |     Caution:  if you pass a relative pathname for top, don't change the | ||||||
|  |     current working directory between resumptions of walk.  walk never | ||||||
|  |     changes the current directory, and assumes that the client doesn't | ||||||
|  |     either. | ||||||
|  |  | ||||||
|  |     Example: | ||||||
|  |  | ||||||
|  |     import os | ||||||
|  |     from os.path import join, getsize | ||||||
|  |     for root, dirs, files in os.walk('python/Lib/email'): | ||||||
|  |         print(root, "consumes", end="") | ||||||
|  |         print(sum([getsize(join(root, name)) for name in files]), end="") | ||||||
|  |         print("bytes in", len(files), "non-directory files") | ||||||
|  |         if 'CVS' in dirs: | ||||||
|  |             dirs.remove('CVS')  # don't visit CVS directories | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     islink, join, isdir = path.islink, path.join, path.isdir | ||||||
|  |  | ||||||
|  |     # We may not have read permission for top, in which case we can't | ||||||
|  |     # get a list of the files the directory contains.  os.walk | ||||||
|  |     # always suppressed the exception then, rather than blow up for a | ||||||
|  |     # minor reason when (say) a thousand readable directories are still | ||||||
|  |     # left to visit.  That logic is copied here. | ||||||
|  |     try: | ||||||
|  |         # Note that listdir is global in this module due | ||||||
|  |         # to earlier import-*. | ||||||
|  |         names = listdir(top) | ||||||
|  |     except OSError as err: | ||||||
|  |         if onerror is not None: | ||||||
|  |             onerror(err) | ||||||
|  |         return | ||||||
|  |  | ||||||
|  |     dirs, nondirs = [], [] | ||||||
|  |     for name in names: | ||||||
|  |         if isdir(join(top, name)): | ||||||
|  |             dirs.append(name) | ||||||
|  |         else: | ||||||
|  |             nondirs.append(name) | ||||||
|  |  | ||||||
|  |     if topdown: | ||||||
|  |         yield top, dirs, nondirs | ||||||
|  |     for name in dirs: | ||||||
|  |         new_path = join(top, name) | ||||||
|  |         if followlinks or not islink(new_path): | ||||||
|  |             yield from walk(new_path, topdown, onerror, followlinks) | ||||||
|  |     if not topdown: | ||||||
|  |         yield top, dirs, nondirs | ||||||
|  |  | ||||||
|  | __all__.append("walk") | ||||||
|  |  | ||||||
|  | if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd: | ||||||
|  |  | ||||||
|  |     def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): | ||||||
|  |         """Directory tree generator. | ||||||
|  |  | ||||||
|  |         This behaves exactly like walk(), except that it yields a 4-tuple | ||||||
|  |  | ||||||
|  |             dirpath, dirnames, filenames, dirfd | ||||||
|  |  | ||||||
|  |         `dirpath`, `dirnames` and `filenames` are identical to walk() output, | ||||||
|  |         and `dirfd` is a file descriptor referring to the directory `dirpath`. | ||||||
|  |  | ||||||
|  |         The advantage of fwalk() over walk() is that it's safe against symlink | ||||||
|  |         races (when follow_symlinks is False). | ||||||
|  |  | ||||||
|  |         If dir_fd is not None, it should be a file descriptor open to a directory, | ||||||
|  |           and top should be relative; top will then be relative to that directory. | ||||||
|  |           (dir_fd is always supported for fwalk.) | ||||||
|  |  | ||||||
|  |         Caution: | ||||||
|  |         Since fwalk() yields file descriptors, those are only valid until the | ||||||
|  |         next iteration step, so you should dup() them if you want to keep them | ||||||
|  |         for a longer period. | ||||||
|  |  | ||||||
|  |         Example: | ||||||
|  |  | ||||||
|  |         import os | ||||||
|  |         for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): | ||||||
|  |             print(root, "consumes", end="") | ||||||
|  |             print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]), | ||||||
|  |                   end="") | ||||||
|  |             print("bytes in", len(files), "non-directory files") | ||||||
|  |             if 'CVS' in dirs: | ||||||
|  |                 dirs.remove('CVS')  # don't visit CVS directories | ||||||
|  |         """ | ||||||
|  |         # Note: To guard against symlink races, we use the standard | ||||||
|  |         # lstat()/open()/fstat() trick. | ||||||
|  |         orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) | ||||||
|  |         topfd = open(top, O_RDONLY, dir_fd=dir_fd) | ||||||
|  |         try: | ||||||
|  |             if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and | ||||||
|  |                                     path.samestat(orig_st, stat(topfd)))): | ||||||
|  |                 yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks) | ||||||
|  |         finally: | ||||||
|  |             close(topfd) | ||||||
|  |  | ||||||
|  |     def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks): | ||||||
|  |         # Note: This uses O(depth of the directory tree) file descriptors: if | ||||||
|  |         # necessary, it can be adapted to only require O(1) FDs, see issue | ||||||
|  |         # #13734. | ||||||
|  |  | ||||||
|  |         names = listdir(topfd) | ||||||
|  |         dirs, nondirs = [], [] | ||||||
|  |         for name in names: | ||||||
|  |             try: | ||||||
|  |                 # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with | ||||||
|  |                 # walk() which reports symlinks to directories as directories. | ||||||
|  |                 # We do however check for symlinks before recursing into | ||||||
|  |                 # a subdirectory. | ||||||
|  |                 if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode): | ||||||
|  |                     dirs.append(name) | ||||||
|  |                 else: | ||||||
|  |                     nondirs.append(name) | ||||||
|  |             except FileNotFoundError: | ||||||
|  |                 try: | ||||||
|  |                     # Add dangling symlinks, ignore disappeared files | ||||||
|  |                     if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False) | ||||||
|  |                                 .st_mode): | ||||||
|  |                         nondirs.append(name) | ||||||
|  |                 except FileNotFoundError: | ||||||
|  |                     continue | ||||||
|  |  | ||||||
|  |         if topdown: | ||||||
|  |             yield toppath, dirs, nondirs, topfd | ||||||
|  |  | ||||||
|  |         for name in dirs: | ||||||
|  |             try: | ||||||
|  |                 orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks) | ||||||
|  |                 dirfd = open(name, O_RDONLY, dir_fd=topfd) | ||||||
|  |             except OSError as err: | ||||||
|  |                 if onerror is not None: | ||||||
|  |                     onerror(err) | ||||||
|  |                 return | ||||||
|  |             try: | ||||||
|  |                 if follow_symlinks or path.samestat(orig_st, stat(dirfd)): | ||||||
|  |                     dirpath = path.join(toppath, name) | ||||||
|  |                     yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks) | ||||||
|  |             finally: | ||||||
|  |                 close(dirfd) | ||||||
|  |  | ||||||
|  |         if not topdown: | ||||||
|  |             yield toppath, dirs, nondirs, topfd | ||||||
|  |  | ||||||
|  |     __all__.append("fwalk") | ||||||
|  |  | ||||||
|  | # Make sure os.environ exists, at least | ||||||
|  | try: | ||||||
|  |     environ | ||||||
|  | except NameError: | ||||||
|  |     environ = {} | ||||||
|  |  | ||||||
|  | def execl(file, *args): | ||||||
|  |     """execl(file, *args) | ||||||
|  |  | ||||||
|  |     Execute the executable file with argument list args, replacing the | ||||||
|  |     current process. """ | ||||||
|  |     execv(file, args) | ||||||
|  |  | ||||||
|  | def execle(file, *args): | ||||||
|  |     """execle(file, *args, env) | ||||||
|  |  | ||||||
|  |     Execute the executable file with argument list args and | ||||||
|  |     environment env, replacing the current process. """ | ||||||
|  |     env = args[-1] | ||||||
|  |     execve(file, args[:-1], env) | ||||||
|  |  | ||||||
|  | def execlp(file, *args): | ||||||
|  |     """execlp(file, *args) | ||||||
|  |  | ||||||
|  |     Execute the executable file (which is searched for along $PATH) | ||||||
|  |     with argument list args, replacing the current process. """ | ||||||
|  |     execvp(file, args) | ||||||
|  |  | ||||||
|  | def execlpe(file, *args): | ||||||
|  |     """execlpe(file, *args, env) | ||||||
|  |  | ||||||
|  |     Execute the executable file (which is searched for along $PATH) | ||||||
|  |     with argument list args and environment env, replacing the current | ||||||
|  |     process. """ | ||||||
|  |     env = args[-1] | ||||||
|  |     execvpe(file, args[:-1], env) | ||||||
|  |  | ||||||
|  | def execvp(file, args): | ||||||
|  |     """execvp(file, args) | ||||||
|  |  | ||||||
|  |     Execute the executable file (which is searched for along $PATH) | ||||||
|  |     with argument list args, replacing the current process. | ||||||
|  |     args may be a list or tuple of strings. """ | ||||||
|  |     _execvpe(file, args) | ||||||
|  |  | ||||||
|  | def execvpe(file, args, env): | ||||||
|  |     """execvpe(file, args, env) | ||||||
|  |  | ||||||
|  |     Execute the executable file (which is searched for along $PATH) | ||||||
|  |     with argument list args and environment env , replacing the | ||||||
|  |     current process. | ||||||
|  |     args may be a list or tuple of strings. """ | ||||||
|  |     _execvpe(file, args, env) | ||||||
|  |  | ||||||
|  | __all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) | ||||||
|  |  | ||||||
|  | def _execvpe(file, args, env=None): | ||||||
|  |     if env is not None: | ||||||
|  |         exec_func = execve | ||||||
|  |         argrest = (args, env) | ||||||
|  |     else: | ||||||
|  |         exec_func = execv | ||||||
|  |         argrest = (args,) | ||||||
|  |         env = environ | ||||||
|  |  | ||||||
|  |     head, tail = path.split(file) | ||||||
|  |     if head: | ||||||
|  |         exec_func(file, *argrest) | ||||||
|  |         return | ||||||
|  |     last_exc = saved_exc = None | ||||||
|  |     saved_tb = None | ||||||
|  |     path_list = get_exec_path(env) | ||||||
|  |     if name != 'nt': | ||||||
|  |         file = fsencode(file) | ||||||
|  |         path_list = map(fsencode, path_list) | ||||||
|  |     for dir in path_list: | ||||||
|  |         fullname = path.join(dir, file) | ||||||
|  |         try: | ||||||
|  |             exec_func(fullname, *argrest) | ||||||
|  |         except OSError as e: | ||||||
|  |             last_exc = e | ||||||
|  |             tb = sys.exc_info()[2] | ||||||
|  |             if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR | ||||||
|  |                 and saved_exc is None): | ||||||
|  |                 saved_exc = e | ||||||
|  |                 saved_tb = tb | ||||||
|  |     if saved_exc: | ||||||
|  |         raise saved_exc.with_traceback(saved_tb) | ||||||
|  |     raise last_exc.with_traceback(tb) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_exec_path(env=None): | ||||||
|  |     """Returns the sequence of directories that will be searched for the | ||||||
|  |     named executable (similar to a shell) when launching a process. | ||||||
|  |  | ||||||
|  |     *env* must be an environment variable dict or None.  If *env* is None, | ||||||
|  |     os.environ will be used. | ||||||
|  |     """ | ||||||
|  |     # Use a local import instead of a global import to limit the number of | ||||||
|  |     # modules loaded at startup: the os module is always loaded at startup by | ||||||
|  |     # Python. It may also avoid a bootstrap issue. | ||||||
|  |     import warnings | ||||||
|  |  | ||||||
|  |     if env is None: | ||||||
|  |         env = environ | ||||||
|  |  | ||||||
|  |     # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a | ||||||
|  |     # BytesWarning when using python -b or python -bb: ignore the warning | ||||||
|  |     with warnings.catch_warnings(): | ||||||
|  |         warnings.simplefilter("ignore", BytesWarning) | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             path_list = env.get('PATH') | ||||||
|  |         except TypeError: | ||||||
|  |             path_list = None | ||||||
|  |  | ||||||
|  |         if supports_bytes_environ: | ||||||
|  |             try: | ||||||
|  |                 path_listb = env[b'PATH'] | ||||||
|  |             except (KeyError, TypeError): | ||||||
|  |                 pass | ||||||
|  |             else: | ||||||
|  |                 if path_list is not None: | ||||||
|  |                     raise ValueError( | ||||||
|  |                         "env cannot contain 'PATH' and b'PATH' keys") | ||||||
|  |                 path_list = path_listb | ||||||
|  |  | ||||||
|  |             if path_list is not None and isinstance(path_list, bytes): | ||||||
|  |                 path_list = fsdecode(path_list) | ||||||
|  |  | ||||||
|  |     if path_list is None: | ||||||
|  |         path_list = defpath | ||||||
|  |     return path_list.split(pathsep) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Change environ to automatically call putenv(), unsetenv if they exist. | ||||||
|  | from _collections_abc import MutableMapping | ||||||
|  |  | ||||||
|  | class _Environ(MutableMapping): | ||||||
|  |     def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv): | ||||||
|  |         self.encodekey = encodekey | ||||||
|  |         self.decodekey = decodekey | ||||||
|  |         self.encodevalue = encodevalue | ||||||
|  |         self.decodevalue = decodevalue | ||||||
|  |         self.putenv = putenv | ||||||
|  |         self.unsetenv = unsetenv | ||||||
|  |         self._data = data | ||||||
|  |  | ||||||
|  |     def __getitem__(self, key): | ||||||
|  |         try: | ||||||
|  |             value = self._data[self.encodekey(key)] | ||||||
|  |         except KeyError: | ||||||
|  |             # raise KeyError with the original key value | ||||||
|  |             raise KeyError(key) from None | ||||||
|  |         return self.decodevalue(value) | ||||||
|  |  | ||||||
|  |     def __setitem__(self, key, value): | ||||||
|  |         key = self.encodekey(key) | ||||||
|  |         value = self.encodevalue(value) | ||||||
|  |         self.putenv(key, value) | ||||||
|  |         self._data[key] = value | ||||||
|  |  | ||||||
|  |     def __delitem__(self, key): | ||||||
|  |         encodedkey = self.encodekey(key) | ||||||
|  |         self.unsetenv(encodedkey) | ||||||
|  |         try: | ||||||
|  |             del self._data[encodedkey] | ||||||
|  |         except KeyError: | ||||||
|  |             # raise KeyError with the original key value | ||||||
|  |             raise KeyError(key) from None | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         for key in self._data: | ||||||
|  |             yield self.decodekey(key) | ||||||
|  |  | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self._data) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return 'environ({{{}}})'.format(', '.join( | ||||||
|  |             ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) | ||||||
|  |             for key, value in self._data.items()))) | ||||||
|  |  | ||||||
|  |     def copy(self): | ||||||
|  |         return dict(self) | ||||||
|  |  | ||||||
|  |     def setdefault(self, key, value): | ||||||
|  |         if key not in self: | ||||||
|  |             self[key] = value | ||||||
|  |         return self[key] | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     _putenv = putenv | ||||||
|  | except NameError: | ||||||
|  |     _putenv = lambda key, value: None | ||||||
|  | else: | ||||||
|  |     if "putenv" not in __all__: | ||||||
|  |         __all__.append("putenv") | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     _unsetenv = unsetenv | ||||||
|  | except NameError: | ||||||
|  |     _unsetenv = lambda key: _putenv(key, "") | ||||||
|  | else: | ||||||
|  |     if "unsetenv" not in __all__: | ||||||
|  |         __all__.append("unsetenv") | ||||||
|  |  | ||||||
|  | def _createenviron(): | ||||||
|  |     if name == 'nt': | ||||||
|  |         # Where Env Var Names Must Be UPPERCASE | ||||||
|  |         def check_str(value): | ||||||
|  |             if not isinstance(value, str): | ||||||
|  |                 raise TypeError("str expected, not %s" % type(value).__name__) | ||||||
|  |             return value | ||||||
|  |         encode = check_str | ||||||
|  |         decode = str | ||||||
|  |         def encodekey(key): | ||||||
|  |             return encode(key).upper() | ||||||
|  |         data = {} | ||||||
|  |         for key, value in environ.items(): | ||||||
|  |             data[encodekey(key)] = value | ||||||
|  |     else: | ||||||
|  |         # Where Env Var Names Can Be Mixed Case | ||||||
|  |         encoding = sys.getfilesystemencoding() | ||||||
|  |         def encode(value): | ||||||
|  |             if not isinstance(value, str): | ||||||
|  |                 raise TypeError("str expected, not %s" % type(value).__name__) | ||||||
|  |             return value.encode(encoding, 'surrogateescape') | ||||||
|  |         def decode(value): | ||||||
|  |             return value.decode(encoding, 'surrogateescape') | ||||||
|  |         encodekey = encode | ||||||
|  |         data = environ | ||||||
|  |     return _Environ(data, | ||||||
|  |         encodekey, decode, | ||||||
|  |         encode, decode, | ||||||
|  |         _putenv, _unsetenv) | ||||||
|  |  | ||||||
|  | # unicode environ | ||||||
|  | environ = _createenviron() | ||||||
|  | del _createenviron | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def getenv(key, default=None): | ||||||
|  |     """Get an environment variable, return None if it doesn't exist. | ||||||
|  |     The optional second argument can specify an alternate default. | ||||||
|  |     key, default and the result are str.""" | ||||||
|  |     return environ.get(key, default) | ||||||
|  |  | ||||||
|  | supports_bytes_environ = (name != 'nt') | ||||||
|  | __all__.extend(("getenv", "supports_bytes_environ")) | ||||||
|  |  | ||||||
|  | if supports_bytes_environ: | ||||||
|  |     def _check_bytes(value): | ||||||
|  |         if not isinstance(value, bytes): | ||||||
|  |             raise TypeError("bytes expected, not %s" % type(value).__name__) | ||||||
|  |         return value | ||||||
|  |  | ||||||
|  |     # bytes environ | ||||||
|  |     environb = _Environ(environ._data, | ||||||
|  |         _check_bytes, bytes, | ||||||
|  |         _check_bytes, bytes, | ||||||
|  |         _putenv, _unsetenv) | ||||||
|  |     del _check_bytes | ||||||
|  |  | ||||||
|  |     def getenvb(key, default=None): | ||||||
|  |         """Get an environment variable, return None if it doesn't exist. | ||||||
|  |         The optional second argument can specify an alternate default. | ||||||
|  |         key, default and the result are bytes.""" | ||||||
|  |         return environb.get(key, default) | ||||||
|  |  | ||||||
|  |     __all__.extend(("environb", "getenvb")) | ||||||
|  |  | ||||||
|  | def _fscodec(): | ||||||
|  |     encoding = sys.getfilesystemencoding() | ||||||
|  |     if encoding == 'mbcs': | ||||||
|  |         errors = 'strict' | ||||||
|  |     else: | ||||||
|  |         errors = 'surrogateescape' | ||||||
|  |  | ||||||
|  |     def fsencode(filename): | ||||||
|  |         """ | ||||||
|  |         Encode filename to the filesystem encoding with 'surrogateescape' error | ||||||
|  |         handler, return bytes unchanged. On Windows, use 'strict' error handler if | ||||||
|  |         the file system encoding is 'mbcs' (which is the default encoding). | ||||||
|  |         """ | ||||||
|  |         if isinstance(filename, bytes): | ||||||
|  |             return filename | ||||||
|  |         elif isinstance(filename, str): | ||||||
|  |             return filename.encode(encoding, errors) | ||||||
|  |         else: | ||||||
|  |             raise TypeError("expect bytes or str, not %s" % type(filename).__name__) | ||||||
|  |  | ||||||
|  |     def fsdecode(filename): | ||||||
|  |         """ | ||||||
|  |         Decode filename from the filesystem encoding with 'surrogateescape' error | ||||||
|  |         handler, return str unchanged. On Windows, use 'strict' error handler if | ||||||
|  |         the file system encoding is 'mbcs' (which is the default encoding). | ||||||
|  |         """ | ||||||
|  |         if isinstance(filename, str): | ||||||
|  |             return filename | ||||||
|  |         elif isinstance(filename, bytes): | ||||||
|  |             return filename.decode(encoding, errors) | ||||||
|  |         else: | ||||||
|  |             raise TypeError("expect bytes or str, not %s" % type(filename).__name__) | ||||||
|  |  | ||||||
|  |     return fsencode, fsdecode | ||||||
|  |  | ||||||
|  | fsencode, fsdecode = _fscodec() | ||||||
|  | del _fscodec | ||||||
|  |  | ||||||
|  | # Supply spawn*() (probably only for Unix) | ||||||
|  | if _exists("fork") and not _exists("spawnv") and _exists("execv"): | ||||||
|  |  | ||||||
|  |     P_WAIT = 0 | ||||||
|  |     P_NOWAIT = P_NOWAITO = 1 | ||||||
|  |  | ||||||
|  |     __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) | ||||||
|  |  | ||||||
|  |     # XXX Should we support P_DETACH?  I suppose it could fork()**2 | ||||||
|  |     # and close the std I/O streams.  Also, P_OVERLAY is the same | ||||||
|  |     # as execv*()? | ||||||
|  |  | ||||||
|  |     def _spawnvef(mode, file, args, env, func): | ||||||
|  |         # Internal helper; func is the exec*() function to use | ||||||
|  |         pid = fork() | ||||||
|  |         if not pid: | ||||||
|  |             # Child | ||||||
|  |             try: | ||||||
|  |                 if env is None: | ||||||
|  |                     func(file, args) | ||||||
|  |                 else: | ||||||
|  |                     func(file, args, env) | ||||||
|  |             except: | ||||||
|  |                 _exit(127) | ||||||
|  |         else: | ||||||
|  |             # Parent | ||||||
|  |             if mode == P_NOWAIT: | ||||||
|  |                 return pid # Caller is responsible for waiting! | ||||||
|  |             while 1: | ||||||
|  |                 wpid, sts = waitpid(pid, 0) | ||||||
|  |                 if WIFSTOPPED(sts): | ||||||
|  |                     continue | ||||||
|  |                 elif WIFSIGNALED(sts): | ||||||
|  |                     return -WTERMSIG(sts) | ||||||
|  |                 elif WIFEXITED(sts): | ||||||
|  |                     return WEXITSTATUS(sts) | ||||||
|  |                 else: | ||||||
|  |                     raise OSError("Not stopped, signaled or exited???") | ||||||
|  |  | ||||||
|  |     def spawnv(mode, file, args): | ||||||
|  |         """spawnv(mode, file, args) -> integer | ||||||
|  |  | ||||||
|  | Execute file with arguments from args in a subprocess. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         return _spawnvef(mode, file, args, None, execv) | ||||||
|  |  | ||||||
|  |     def spawnve(mode, file, args, env): | ||||||
|  |         """spawnve(mode, file, args, env) -> integer | ||||||
|  |  | ||||||
|  | Execute file with arguments from args in a subprocess with the | ||||||
|  | specified environment. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         return _spawnvef(mode, file, args, env, execve) | ||||||
|  |  | ||||||
|  |     # Note: spawnvp[e] is't currently supported on Windows | ||||||
|  |  | ||||||
|  |     def spawnvp(mode, file, args): | ||||||
|  |         """spawnvp(mode, file, args) -> integer | ||||||
|  |  | ||||||
|  | Execute file (which is looked for along $PATH) with arguments from | ||||||
|  | args in a subprocess. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         return _spawnvef(mode, file, args, None, execvp) | ||||||
|  |  | ||||||
|  |     def spawnvpe(mode, file, args, env): | ||||||
|  |         """spawnvpe(mode, file, args, env) -> integer | ||||||
|  |  | ||||||
|  | Execute file (which is looked for along $PATH) with arguments from | ||||||
|  | args in a subprocess with the supplied environment. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         return _spawnvef(mode, file, args, env, execvpe) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if _exists("spawnv"): | ||||||
|  |     # These aren't supplied by the basic Windows code | ||||||
|  |     # but can be easily implemented in Python | ||||||
|  |  | ||||||
|  |     def spawnl(mode, file, *args): | ||||||
|  |         """spawnl(mode, file, *args) -> integer | ||||||
|  |  | ||||||
|  | Execute file with arguments from args in a subprocess. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         return spawnv(mode, file, args) | ||||||
|  |  | ||||||
|  |     def spawnle(mode, file, *args): | ||||||
|  |         """spawnle(mode, file, *args, env) -> integer | ||||||
|  |  | ||||||
|  | Execute file with arguments from args in a subprocess with the | ||||||
|  | supplied environment. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         env = args[-1] | ||||||
|  |         return spawnve(mode, file, args[:-1], env) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     __all__.extend(["spawnl", "spawnle"]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if _exists("spawnvp"): | ||||||
|  |     # At the moment, Windows doesn't implement spawnvp[e], | ||||||
|  |     # so it won't have spawnlp[e] either. | ||||||
|  |     def spawnlp(mode, file, *args): | ||||||
|  |         """spawnlp(mode, file, *args) -> integer | ||||||
|  |  | ||||||
|  | Execute file (which is looked for along $PATH) with arguments from | ||||||
|  | args in a subprocess with the supplied environment. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         return spawnvp(mode, file, args) | ||||||
|  |  | ||||||
|  |     def spawnlpe(mode, file, *args): | ||||||
|  |         """spawnlpe(mode, file, *args, env) -> integer | ||||||
|  |  | ||||||
|  | Execute file (which is looked for along $PATH) with arguments from | ||||||
|  | args in a subprocess with the supplied environment. | ||||||
|  | If mode == P_NOWAIT return the pid of the process. | ||||||
|  | If mode == P_WAIT return the process's exit code if it exits normally; | ||||||
|  | otherwise return -SIG, where SIG is the signal that killed it. """ | ||||||
|  |         env = args[-1] | ||||||
|  |         return spawnvpe(mode, file, args[:-1], env) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     __all__.extend(["spawnlp", "spawnlpe"]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Supply os.popen() | ||||||
|  | def popen(cmd, mode="r", buffering=-1): | ||||||
|  |     if not isinstance(cmd, str): | ||||||
|  |         raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) | ||||||
|  |     if mode not in ("r", "w"): | ||||||
|  |         raise ValueError("invalid mode %r" % mode) | ||||||
|  |     if buffering == 0 or buffering is None: | ||||||
|  |         raise ValueError("popen() does not support unbuffered streams") | ||||||
|  |     import subprocess, io | ||||||
|  |     if mode == "r": | ||||||
|  |         proc = subprocess.Popen(cmd, | ||||||
|  |                                 shell=True, | ||||||
|  |                                 stdout=subprocess.PIPE, | ||||||
|  |                                 bufsize=buffering) | ||||||
|  |         return _wrap_close(io.TextIOWrapper(proc.stdout), proc) | ||||||
|  |     else: | ||||||
|  |         proc = subprocess.Popen(cmd, | ||||||
|  |                                 shell=True, | ||||||
|  |                                 stdin=subprocess.PIPE, | ||||||
|  |                                 bufsize=buffering) | ||||||
|  |         return _wrap_close(io.TextIOWrapper(proc.stdin), proc) | ||||||
|  |  | ||||||
|  | # Helper for popen() -- a proxy for a file whose close waits for the process | ||||||
|  | class _wrap_close: | ||||||
|  |     def __init__(self, stream, proc): | ||||||
|  |         self._stream = stream | ||||||
|  |         self._proc = proc | ||||||
|  |     def close(self): | ||||||
|  |         self._stream.close() | ||||||
|  |         returncode = self._proc.wait() | ||||||
|  |         if returncode == 0: | ||||||
|  |             return None | ||||||
|  |         if name == 'nt': | ||||||
|  |             return returncode | ||||||
|  |         else: | ||||||
|  |             return returncode << 8  # Shift left to match old behavior | ||||||
|  |     def __enter__(self): | ||||||
|  |         return self | ||||||
|  |     def __exit__(self, *args): | ||||||
|  |         self.close() | ||||||
|  |     def __getattr__(self, name): | ||||||
|  |         return getattr(self._stream, name) | ||||||
|  |     def __iter__(self): | ||||||
|  |         return iter(self._stream) | ||||||
|  |  | ||||||
|  | # Supply os.fdopen() | ||||||
|  | def fdopen(fd, *args, **kwargs): | ||||||
|  |     if not isinstance(fd, int): | ||||||
|  |         raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) | ||||||
|  |     import io | ||||||
|  |     return io.open(fd, *args, **kwargs) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/posixpath.py |  | ||||||
							
								
								
									
										457
									
								
								v1/flask/lib/python3.4/posixpath.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										457
									
								
								v1/flask/lib/python3.4/posixpath.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,457 @@ | |||||||
|  | """Common operations on Posix pathnames. | ||||||
|  |  | ||||||
|  | Instead of importing this module directly, import os and refer to | ||||||
|  | this module as os.path.  The "os.path" name is an alias for this | ||||||
|  | module on Posix systems; on other systems (e.g. Mac, Windows), | ||||||
|  | os.path provides the same operations in a manner specific to that | ||||||
|  | platform, and is an alias to another module (e.g. macpath, ntpath). | ||||||
|  |  | ||||||
|  | Some of this can actually be useful on non-Posix systems too, e.g. | ||||||
|  | for manipulation of the pathname component of URLs. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | import stat | ||||||
|  | import genericpath | ||||||
|  | from genericpath import * | ||||||
|  |  | ||||||
|  | __all__ = ["normcase","isabs","join","splitdrive","split","splitext", | ||||||
|  |            "basename","dirname","commonprefix","getsize","getmtime", | ||||||
|  |            "getatime","getctime","islink","exists","lexists","isdir","isfile", | ||||||
|  |            "ismount", "expanduser","expandvars","normpath","abspath", | ||||||
|  |            "samefile","sameopenfile","samestat", | ||||||
|  |            "curdir","pardir","sep","pathsep","defpath","altsep","extsep", | ||||||
|  |            "devnull","realpath","supports_unicode_filenames","relpath"] | ||||||
|  |  | ||||||
|  | # Strings representing various path-related bits and pieces. | ||||||
|  | # These are primarily for export; internally, they are hardcoded. | ||||||
|  | curdir = '.' | ||||||
|  | pardir = '..' | ||||||
|  | extsep = '.' | ||||||
|  | sep = '/' | ||||||
|  | pathsep = ':' | ||||||
|  | defpath = ':/bin:/usr/bin' | ||||||
|  | altsep = None | ||||||
|  | devnull = '/dev/null' | ||||||
|  |  | ||||||
|  | def _get_sep(path): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         return b'/' | ||||||
|  |     else: | ||||||
|  |         return '/' | ||||||
|  |  | ||||||
|  | # Normalize the case of a pathname.  Trivial in Posix, string.lower on Mac. | ||||||
|  | # On MS-DOS this may also turn slashes into backslashes; however, other | ||||||
|  | # normalizations (such as optimizing '../' away) are not allowed | ||||||
|  | # (another function should be defined to do that). | ||||||
|  |  | ||||||
|  | def normcase(s): | ||||||
|  |     """Normalize case of pathname.  Has no effect under Posix""" | ||||||
|  |     if not isinstance(s, (bytes, str)): | ||||||
|  |         raise TypeError("normcase() argument must be str or bytes, " | ||||||
|  |                         "not '{}'".format(s.__class__.__name__)) | ||||||
|  |     return s | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return whether a path is absolute. | ||||||
|  | # Trivial in Posix, harder on the Mac or MS-DOS. | ||||||
|  |  | ||||||
|  | def isabs(s): | ||||||
|  |     """Test whether a path is absolute""" | ||||||
|  |     sep = _get_sep(s) | ||||||
|  |     return s.startswith(sep) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Join pathnames. | ||||||
|  | # Ignore the previous parts if a part is absolute. | ||||||
|  | # Insert a '/' unless the first part is empty or already ends in '/'. | ||||||
|  |  | ||||||
|  | def join(a, *p): | ||||||
|  |     """Join two or more pathname components, inserting '/' as needed. | ||||||
|  |     If any component is an absolute path, all previous path components | ||||||
|  |     will be discarded.  An empty last part will result in a path that | ||||||
|  |     ends with a separator.""" | ||||||
|  |     sep = _get_sep(a) | ||||||
|  |     path = a | ||||||
|  |     try: | ||||||
|  |         for b in p: | ||||||
|  |             if b.startswith(sep): | ||||||
|  |                 path = b | ||||||
|  |             elif not path or path.endswith(sep): | ||||||
|  |                 path += b | ||||||
|  |             else: | ||||||
|  |                 path += sep + b | ||||||
|  |     except TypeError: | ||||||
|  |         if all(isinstance(s, (str, bytes)) for s in (a,) + p): | ||||||
|  |             # Must have a mixture of text and binary data | ||||||
|  |             raise TypeError("Can't mix strings and bytes in path " | ||||||
|  |                             "components") from None | ||||||
|  |         raise | ||||||
|  |     return path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Split a path in head (everything up to the last '/') and tail (the | ||||||
|  | # rest).  If the path ends in '/', tail will be empty.  If there is no | ||||||
|  | # '/' in the path, head  will be empty. | ||||||
|  | # Trailing '/'es are stripped from head unless it is the root. | ||||||
|  |  | ||||||
|  | def split(p): | ||||||
|  |     """Split a pathname.  Returns tuple "(head, tail)" where "tail" is | ||||||
|  |     everything after the final slash.  Either part may be empty.""" | ||||||
|  |     sep = _get_sep(p) | ||||||
|  |     i = p.rfind(sep) + 1 | ||||||
|  |     head, tail = p[:i], p[i:] | ||||||
|  |     if head and head != sep*len(head): | ||||||
|  |         head = head.rstrip(sep) | ||||||
|  |     return head, tail | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Split a path in root and extension. | ||||||
|  | # The extension is everything starting at the last dot in the last | ||||||
|  | # pathname component; the root is everything before that. | ||||||
|  | # It is always true that root + ext == p. | ||||||
|  |  | ||||||
|  | def splitext(p): | ||||||
|  |     if isinstance(p, bytes): | ||||||
|  |         sep = b'/' | ||||||
|  |         extsep = b'.' | ||||||
|  |     else: | ||||||
|  |         sep = '/' | ||||||
|  |         extsep = '.' | ||||||
|  |     return genericpath._splitext(p, sep, None, extsep) | ||||||
|  | splitext.__doc__ = genericpath._splitext.__doc__ | ||||||
|  |  | ||||||
|  | # Split a pathname into a drive specification and the rest of the | ||||||
|  | # path.  Useful on DOS/Windows/NT; on Unix, the drive is always empty. | ||||||
|  |  | ||||||
|  | def splitdrive(p): | ||||||
|  |     """Split a pathname into drive and path. On Posix, drive is always | ||||||
|  |     empty.""" | ||||||
|  |     return p[:0], p | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return the tail (basename) part of a path, same as split(path)[1]. | ||||||
|  |  | ||||||
|  | def basename(p): | ||||||
|  |     """Returns the final component of a pathname""" | ||||||
|  |     sep = _get_sep(p) | ||||||
|  |     i = p.rfind(sep) + 1 | ||||||
|  |     return p[i:] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return the head (dirname) part of a path, same as split(path)[0]. | ||||||
|  |  | ||||||
|  | def dirname(p): | ||||||
|  |     """Returns the directory component of a pathname""" | ||||||
|  |     sep = _get_sep(p) | ||||||
|  |     i = p.rfind(sep) + 1 | ||||||
|  |     head = p[:i] | ||||||
|  |     if head and head != sep*len(head): | ||||||
|  |         head = head.rstrip(sep) | ||||||
|  |     return head | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Is a path a symbolic link? | ||||||
|  | # This will always return false on systems where os.lstat doesn't exist. | ||||||
|  |  | ||||||
|  | def islink(path): | ||||||
|  |     """Test whether a path is a symbolic link""" | ||||||
|  |     try: | ||||||
|  |         st = os.lstat(path) | ||||||
|  |     except (OSError, AttributeError): | ||||||
|  |         return False | ||||||
|  |     return stat.S_ISLNK(st.st_mode) | ||||||
|  |  | ||||||
|  | # Being true for dangling symbolic links is also useful. | ||||||
|  |  | ||||||
|  | def lexists(path): | ||||||
|  |     """Test whether a path exists.  Returns True for broken symbolic links""" | ||||||
|  |     try: | ||||||
|  |         os.lstat(path) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |     return True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Is a path a mount point? | ||||||
|  | # (Does this work for all UNIXes?  Is it even guaranteed to work by Posix?) | ||||||
|  |  | ||||||
|  | def ismount(path): | ||||||
|  |     """Test whether a path is a mount point""" | ||||||
|  |     try: | ||||||
|  |         s1 = os.lstat(path) | ||||||
|  |     except OSError: | ||||||
|  |         # It doesn't exist -- so not a mount point. :-) | ||||||
|  |         return False | ||||||
|  |     else: | ||||||
|  |         # A symlink can never be a mount point | ||||||
|  |         if stat.S_ISLNK(s1.st_mode): | ||||||
|  |             return False | ||||||
|  |  | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         parent = join(path, b'..') | ||||||
|  |     else: | ||||||
|  |         parent = join(path, '..') | ||||||
|  |     try: | ||||||
|  |         s2 = os.lstat(parent) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     dev1 = s1.st_dev | ||||||
|  |     dev2 = s2.st_dev | ||||||
|  |     if dev1 != dev2: | ||||||
|  |         return True     # path/.. on a different device as path | ||||||
|  |     ino1 = s1.st_ino | ||||||
|  |     ino2 = s2.st_ino | ||||||
|  |     if ino1 == ino2: | ||||||
|  |         return True     # path/.. is the same i-node as path | ||||||
|  |     return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Expand paths beginning with '~' or '~user'. | ||||||
|  | # '~' means $HOME; '~user' means that user's home directory. | ||||||
|  | # If the path doesn't begin with '~', or if the user or $HOME is unknown, | ||||||
|  | # the path is returned unchanged (leaving error reporting to whatever | ||||||
|  | # function is called with the expanded path as argument). | ||||||
|  | # See also module 'glob' for expansion of *, ? and [...] in pathnames. | ||||||
|  | # (A function should also be defined to do full *sh-style environment | ||||||
|  | # variable expansion.) | ||||||
|  |  | ||||||
|  | def expanduser(path): | ||||||
|  |     """Expand ~ and ~user constructions.  If user or $HOME is unknown, | ||||||
|  |     do nothing.""" | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         tilde = b'~' | ||||||
|  |     else: | ||||||
|  |         tilde = '~' | ||||||
|  |     if not path.startswith(tilde): | ||||||
|  |         return path | ||||||
|  |     sep = _get_sep(path) | ||||||
|  |     i = path.find(sep, 1) | ||||||
|  |     if i < 0: | ||||||
|  |         i = len(path) | ||||||
|  |     if i == 1: | ||||||
|  |         if 'HOME' not in os.environ: | ||||||
|  |             import pwd | ||||||
|  |             userhome = pwd.getpwuid(os.getuid()).pw_dir | ||||||
|  |         else: | ||||||
|  |             userhome = os.environ['HOME'] | ||||||
|  |     else: | ||||||
|  |         import pwd | ||||||
|  |         name = path[1:i] | ||||||
|  |         if isinstance(name, bytes): | ||||||
|  |             name = str(name, 'ASCII') | ||||||
|  |         try: | ||||||
|  |             pwent = pwd.getpwnam(name) | ||||||
|  |         except KeyError: | ||||||
|  |             return path | ||||||
|  |         userhome = pwent.pw_dir | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         userhome = os.fsencode(userhome) | ||||||
|  |         root = b'/' | ||||||
|  |     else: | ||||||
|  |         root = '/' | ||||||
|  |     userhome = userhome.rstrip(root) | ||||||
|  |     return (userhome + path[i:]) or root | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Expand paths containing shell variable substitutions. | ||||||
|  | # This expands the forms $variable and ${variable} only. | ||||||
|  | # Non-existent variables are left unchanged. | ||||||
|  |  | ||||||
|  | _varprog = None | ||||||
|  | _varprogb = None | ||||||
|  |  | ||||||
|  | def expandvars(path): | ||||||
|  |     """Expand shell variables of form $var and ${var}.  Unknown variables | ||||||
|  |     are left unchanged.""" | ||||||
|  |     global _varprog, _varprogb | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         if b'$' not in path: | ||||||
|  |             return path | ||||||
|  |         if not _varprogb: | ||||||
|  |             import re | ||||||
|  |             _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII) | ||||||
|  |         search = _varprogb.search | ||||||
|  |         start = b'{' | ||||||
|  |         end = b'}' | ||||||
|  |         environ = getattr(os, 'environb', None) | ||||||
|  |     else: | ||||||
|  |         if '$' not in path: | ||||||
|  |             return path | ||||||
|  |         if not _varprog: | ||||||
|  |             import re | ||||||
|  |             _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII) | ||||||
|  |         search = _varprog.search | ||||||
|  |         start = '{' | ||||||
|  |         end = '}' | ||||||
|  |         environ = os.environ | ||||||
|  |     i = 0 | ||||||
|  |     while True: | ||||||
|  |         m = search(path, i) | ||||||
|  |         if not m: | ||||||
|  |             break | ||||||
|  |         i, j = m.span(0) | ||||||
|  |         name = m.group(1) | ||||||
|  |         if name.startswith(start) and name.endswith(end): | ||||||
|  |             name = name[1:-1] | ||||||
|  |         try: | ||||||
|  |             if environ is None: | ||||||
|  |                 value = os.fsencode(os.environ[os.fsdecode(name)]) | ||||||
|  |             else: | ||||||
|  |                 value = environ[name] | ||||||
|  |         except KeyError: | ||||||
|  |             i = j | ||||||
|  |         else: | ||||||
|  |             tail = path[j:] | ||||||
|  |             path = path[:i] + value | ||||||
|  |             i = len(path) | ||||||
|  |             path += tail | ||||||
|  |     return path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. | ||||||
|  | # It should be understood that this may change the meaning of the path | ||||||
|  | # if it contains symbolic links! | ||||||
|  |  | ||||||
|  | def normpath(path): | ||||||
|  |     """Normalize path, eliminating double slashes, etc.""" | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         sep = b'/' | ||||||
|  |         empty = b'' | ||||||
|  |         dot = b'.' | ||||||
|  |         dotdot = b'..' | ||||||
|  |     else: | ||||||
|  |         sep = '/' | ||||||
|  |         empty = '' | ||||||
|  |         dot = '.' | ||||||
|  |         dotdot = '..' | ||||||
|  |     if path == empty: | ||||||
|  |         return dot | ||||||
|  |     initial_slashes = path.startswith(sep) | ||||||
|  |     # POSIX allows one or two initial slashes, but treats three or more | ||||||
|  |     # as single slash. | ||||||
|  |     if (initial_slashes and | ||||||
|  |         path.startswith(sep*2) and not path.startswith(sep*3)): | ||||||
|  |         initial_slashes = 2 | ||||||
|  |     comps = path.split(sep) | ||||||
|  |     new_comps = [] | ||||||
|  |     for comp in comps: | ||||||
|  |         if comp in (empty, dot): | ||||||
|  |             continue | ||||||
|  |         if (comp != dotdot or (not initial_slashes and not new_comps) or | ||||||
|  |              (new_comps and new_comps[-1] == dotdot)): | ||||||
|  |             new_comps.append(comp) | ||||||
|  |         elif new_comps: | ||||||
|  |             new_comps.pop() | ||||||
|  |     comps = new_comps | ||||||
|  |     path = sep.join(comps) | ||||||
|  |     if initial_slashes: | ||||||
|  |         path = sep*initial_slashes + path | ||||||
|  |     return path or dot | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def abspath(path): | ||||||
|  |     """Return an absolute path.""" | ||||||
|  |     if not isabs(path): | ||||||
|  |         if isinstance(path, bytes): | ||||||
|  |             cwd = os.getcwdb() | ||||||
|  |         else: | ||||||
|  |             cwd = os.getcwd() | ||||||
|  |         path = join(cwd, path) | ||||||
|  |     return normpath(path) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Return a canonical path (i.e. the absolute location of a file on the | ||||||
|  | # filesystem). | ||||||
|  |  | ||||||
|  | def realpath(filename): | ||||||
|  |     """Return the canonical path of the specified filename, eliminating any | ||||||
|  | symbolic links encountered in the path.""" | ||||||
|  |     path, ok = _joinrealpath(filename[:0], filename, {}) | ||||||
|  |     return abspath(path) | ||||||
|  |  | ||||||
|  | # Join two paths, normalizing ang eliminating any symbolic links | ||||||
|  | # encountered in the second path. | ||||||
|  | def _joinrealpath(path, rest, seen): | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         sep = b'/' | ||||||
|  |         curdir = b'.' | ||||||
|  |         pardir = b'..' | ||||||
|  |     else: | ||||||
|  |         sep = '/' | ||||||
|  |         curdir = '.' | ||||||
|  |         pardir = '..' | ||||||
|  |  | ||||||
|  |     if isabs(rest): | ||||||
|  |         rest = rest[1:] | ||||||
|  |         path = sep | ||||||
|  |  | ||||||
|  |     while rest: | ||||||
|  |         name, _, rest = rest.partition(sep) | ||||||
|  |         if not name or name == curdir: | ||||||
|  |             # current dir | ||||||
|  |             continue | ||||||
|  |         if name == pardir: | ||||||
|  |             # parent dir | ||||||
|  |             if path: | ||||||
|  |                 path, name = split(path) | ||||||
|  |                 if name == pardir: | ||||||
|  |                     path = join(path, pardir, pardir) | ||||||
|  |             else: | ||||||
|  |                 path = pardir | ||||||
|  |             continue | ||||||
|  |         newpath = join(path, name) | ||||||
|  |         if not islink(newpath): | ||||||
|  |             path = newpath | ||||||
|  |             continue | ||||||
|  |         # Resolve the symbolic link | ||||||
|  |         if newpath in seen: | ||||||
|  |             # Already seen this path | ||||||
|  |             path = seen[newpath] | ||||||
|  |             if path is not None: | ||||||
|  |                 # use cached value | ||||||
|  |                 continue | ||||||
|  |             # The symlink is not resolved, so we must have a symlink loop. | ||||||
|  |             # Return already resolved part + rest of the path unchanged. | ||||||
|  |             return join(newpath, rest), False | ||||||
|  |         seen[newpath] = None # not resolved symlink | ||||||
|  |         path, ok = _joinrealpath(path, os.readlink(newpath), seen) | ||||||
|  |         if not ok: | ||||||
|  |             return join(path, rest), False | ||||||
|  |         seen[newpath] = path # resolved symlink | ||||||
|  |  | ||||||
|  |     return path, True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | supports_unicode_filenames = (sys.platform == 'darwin') | ||||||
|  |  | ||||||
|  | def relpath(path, start=None): | ||||||
|  |     """Return a relative version of a path""" | ||||||
|  |  | ||||||
|  |     if not path: | ||||||
|  |         raise ValueError("no path specified") | ||||||
|  |  | ||||||
|  |     if isinstance(path, bytes): | ||||||
|  |         curdir = b'.' | ||||||
|  |         sep = b'/' | ||||||
|  |         pardir = b'..' | ||||||
|  |     else: | ||||||
|  |         curdir = '.' | ||||||
|  |         sep = '/' | ||||||
|  |         pardir = '..' | ||||||
|  |  | ||||||
|  |     if start is None: | ||||||
|  |         start = curdir | ||||||
|  |  | ||||||
|  |     start_list = [x for x in abspath(start).split(sep) if x] | ||||||
|  |     path_list = [x for x in abspath(path).split(sep) if x] | ||||||
|  |  | ||||||
|  |     # Work out how much of the filepath is shared by start and path. | ||||||
|  |     i = len(commonprefix([start_list, path_list])) | ||||||
|  |  | ||||||
|  |     rel_list = [pardir] * (len(start_list)-i) + path_list[i:] | ||||||
|  |     if not rel_list: | ||||||
|  |         return curdir | ||||||
|  |     return join(*rel_list) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/random.py |  | ||||||
							
								
								
									
										742
									
								
								v1/flask/lib/python3.4/random.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										742
									
								
								v1/flask/lib/python3.4/random.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,742 @@ | |||||||
|  | """Random variable generators. | ||||||
|  |  | ||||||
|  |     integers | ||||||
|  |     -------- | ||||||
|  |            uniform within range | ||||||
|  |  | ||||||
|  |     sequences | ||||||
|  |     --------- | ||||||
|  |            pick random element | ||||||
|  |            pick random sample | ||||||
|  |            generate random permutation | ||||||
|  |  | ||||||
|  |     distributions on the real line: | ||||||
|  |     ------------------------------ | ||||||
|  |            uniform | ||||||
|  |            triangular | ||||||
|  |            normal (Gaussian) | ||||||
|  |            lognormal | ||||||
|  |            negative exponential | ||||||
|  |            gamma | ||||||
|  |            beta | ||||||
|  |            pareto | ||||||
|  |            Weibull | ||||||
|  |  | ||||||
|  |     distributions on the circle (angles 0 to 2pi) | ||||||
|  |     --------------------------------------------- | ||||||
|  |            circular uniform | ||||||
|  |            von Mises | ||||||
|  |  | ||||||
|  | General notes on the underlying Mersenne Twister core generator: | ||||||
|  |  | ||||||
|  | * The period is 2**19937-1. | ||||||
|  | * It is one of the most extensively tested generators in existence. | ||||||
|  | * The random() method is implemented in C, executes in a single Python step, | ||||||
|  |   and is, therefore, threadsafe. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from warnings import warn as _warn | ||||||
|  | from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType | ||||||
|  | from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil | ||||||
|  | from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin | ||||||
|  | from os import urandom as _urandom | ||||||
|  | from _collections_abc import Set as _Set, Sequence as _Sequence | ||||||
|  | from hashlib import sha512 as _sha512 | ||||||
|  |  | ||||||
|  | __all__ = ["Random","seed","random","uniform","randint","choice","sample", | ||||||
|  |            "randrange","shuffle","normalvariate","lognormvariate", | ||||||
|  |            "expovariate","vonmisesvariate","gammavariate","triangular", | ||||||
|  |            "gauss","betavariate","paretovariate","weibullvariate", | ||||||
|  |            "getstate","setstate", "getrandbits", | ||||||
|  |            "SystemRandom"] | ||||||
|  |  | ||||||
|  | NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) | ||||||
|  | TWOPI = 2.0*_pi | ||||||
|  | LOG4 = _log(4.0) | ||||||
|  | SG_MAGICCONST = 1.0 + _log(4.5) | ||||||
|  | BPF = 53        # Number of bits in a float | ||||||
|  | RECIP_BPF = 2**-BPF | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Translated by Guido van Rossum from C source provided by | ||||||
|  | # Adrian Baddeley.  Adapted by Raymond Hettinger for use with | ||||||
|  | # the Mersenne Twister  and os.urandom() core generators. | ||||||
|  |  | ||||||
|  | import _random | ||||||
|  |  | ||||||
|  | class Random(_random.Random): | ||||||
|  |     """Random number generator base class used by bound module functions. | ||||||
|  |  | ||||||
|  |     Used to instantiate instances of Random to get generators that don't | ||||||
|  |     share state. | ||||||
|  |  | ||||||
|  |     Class Random can also be subclassed if you want to use a different basic | ||||||
|  |     generator of your own devising: in that case, override the following | ||||||
|  |     methods:  random(), seed(), getstate(), and setstate(). | ||||||
|  |     Optionally, implement a getrandbits() method so that randrange() | ||||||
|  |     can cover arbitrarily large ranges. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     VERSION = 3     # used by getstate/setstate | ||||||
|  |  | ||||||
|  |     def __init__(self, x=None): | ||||||
|  |         """Initialize an instance. | ||||||
|  |  | ||||||
|  |         Optional argument x controls seeding, as for Random.seed(). | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         self.seed(x) | ||||||
|  |         self.gauss_next = None | ||||||
|  |  | ||||||
|  |     def seed(self, a=None, version=2): | ||||||
|  |         """Initialize internal state from hashable object. | ||||||
|  |  | ||||||
|  |         None or no argument seeds from current time or from an operating | ||||||
|  |         system specific randomness source if available. | ||||||
|  |  | ||||||
|  |         For version 2 (the default), all of the bits are used if *a* is a str, | ||||||
|  |         bytes, or bytearray.  For version 1, the hash() of *a* is used instead. | ||||||
|  |  | ||||||
|  |         If *a* is an int, all bits are used. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         if a is None: | ||||||
|  |             try: | ||||||
|  |                 # Seed with enough bytes to span the 19937 bit | ||||||
|  |                 # state space for the Mersenne Twister | ||||||
|  |                 a = int.from_bytes(_urandom(2500), 'big') | ||||||
|  |             except NotImplementedError: | ||||||
|  |                 import time | ||||||
|  |                 a = int(time.time() * 256) # use fractional seconds | ||||||
|  |  | ||||||
|  |         if version == 2: | ||||||
|  |             if isinstance(a, (str, bytes, bytearray)): | ||||||
|  |                 if isinstance(a, str): | ||||||
|  |                     a = a.encode() | ||||||
|  |                 a += _sha512(a).digest() | ||||||
|  |                 a = int.from_bytes(a, 'big') | ||||||
|  |  | ||||||
|  |         super().seed(a) | ||||||
|  |         self.gauss_next = None | ||||||
|  |  | ||||||
|  |     def getstate(self): | ||||||
|  |         """Return internal state; can be passed to setstate() later.""" | ||||||
|  |         return self.VERSION, super().getstate(), self.gauss_next | ||||||
|  |  | ||||||
|  |     def setstate(self, state): | ||||||
|  |         """Restore internal state from object returned by getstate().""" | ||||||
|  |         version = state[0] | ||||||
|  |         if version == 3: | ||||||
|  |             version, internalstate, self.gauss_next = state | ||||||
|  |             super().setstate(internalstate) | ||||||
|  |         elif version == 2: | ||||||
|  |             version, internalstate, self.gauss_next = state | ||||||
|  |             # In version 2, the state was saved as signed ints, which causes | ||||||
|  |             #   inconsistencies between 32/64-bit systems. The state is | ||||||
|  |             #   really unsigned 32-bit ints, so we convert negative ints from | ||||||
|  |             #   version 2 to positive longs for version 3. | ||||||
|  |             try: | ||||||
|  |                 internalstate = tuple(x % (2**32) for x in internalstate) | ||||||
|  |             except ValueError as e: | ||||||
|  |                 raise TypeError from e | ||||||
|  |             super().setstate(internalstate) | ||||||
|  |         else: | ||||||
|  |             raise ValueError("state with version %s passed to " | ||||||
|  |                              "Random.setstate() of version %s" % | ||||||
|  |                              (version, self.VERSION)) | ||||||
|  |  | ||||||
|  | ## ---- Methods below this point do not need to be overridden when | ||||||
|  | ## ---- subclassing for the purpose of using a different core generator. | ||||||
|  |  | ||||||
|  | ## -------------------- pickle support  ------------------- | ||||||
|  |  | ||||||
|  |     # Issue 17489: Since __reduce__ was defined to fix #759889 this is no | ||||||
|  |     # longer called; we leave it here because it has been here since random was | ||||||
|  |     # rewritten back in 2001 and why risk breaking something. | ||||||
|  |     def __getstate__(self): # for pickle | ||||||
|  |         return self.getstate() | ||||||
|  |  | ||||||
|  |     def __setstate__(self, state):  # for pickle | ||||||
|  |         self.setstate(state) | ||||||
|  |  | ||||||
|  |     def __reduce__(self): | ||||||
|  |         return self.__class__, (), self.getstate() | ||||||
|  |  | ||||||
|  | ## -------------------- integer methods  ------------------- | ||||||
|  |  | ||||||
|  |     def randrange(self, start, stop=None, step=1, _int=int): | ||||||
|  |         """Choose a random item from range(start, stop[, step]). | ||||||
|  |  | ||||||
|  |         This fixes the problem with randint() which includes the | ||||||
|  |         endpoint; in Python this is usually not what you want. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # This code is a bit messy to make it fast for the | ||||||
|  |         # common case while still doing adequate error checking. | ||||||
|  |         istart = _int(start) | ||||||
|  |         if istart != start: | ||||||
|  |             raise ValueError("non-integer arg 1 for randrange()") | ||||||
|  |         if stop is None: | ||||||
|  |             if istart > 0: | ||||||
|  |                 return self._randbelow(istart) | ||||||
|  |             raise ValueError("empty range for randrange()") | ||||||
|  |  | ||||||
|  |         # stop argument supplied. | ||||||
|  |         istop = _int(stop) | ||||||
|  |         if istop != stop: | ||||||
|  |             raise ValueError("non-integer stop for randrange()") | ||||||
|  |         width = istop - istart | ||||||
|  |         if step == 1 and width > 0: | ||||||
|  |             return istart + self._randbelow(width) | ||||||
|  |         if step == 1: | ||||||
|  |             raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width)) | ||||||
|  |  | ||||||
|  |         # Non-unit step argument supplied. | ||||||
|  |         istep = _int(step) | ||||||
|  |         if istep != step: | ||||||
|  |             raise ValueError("non-integer step for randrange()") | ||||||
|  |         if istep > 0: | ||||||
|  |             n = (width + istep - 1) // istep | ||||||
|  |         elif istep < 0: | ||||||
|  |             n = (width + istep + 1) // istep | ||||||
|  |         else: | ||||||
|  |             raise ValueError("zero step for randrange()") | ||||||
|  |  | ||||||
|  |         if n <= 0: | ||||||
|  |             raise ValueError("empty range for randrange()") | ||||||
|  |  | ||||||
|  |         return istart + istep*self._randbelow(n) | ||||||
|  |  | ||||||
|  |     def randint(self, a, b): | ||||||
|  |         """Return random integer in range [a, b], including both end points. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         return self.randrange(a, b+1) | ||||||
|  |  | ||||||
|  |     def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type, | ||||||
|  |                    Method=_MethodType, BuiltinMethod=_BuiltinMethodType): | ||||||
|  |         "Return a random int in the range [0,n).  Raises ValueError if n==0." | ||||||
|  |  | ||||||
|  |         random = self.random | ||||||
|  |         getrandbits = self.getrandbits | ||||||
|  |         # Only call self.getrandbits if the original random() builtin method | ||||||
|  |         # has not been overridden or if a new getrandbits() was supplied. | ||||||
|  |         if type(random) is BuiltinMethod or type(getrandbits) is Method: | ||||||
|  |             k = n.bit_length()  # don't use (n-1) here because n can be 1 | ||||||
|  |             r = getrandbits(k)          # 0 <= r < 2**k | ||||||
|  |             while r >= n: | ||||||
|  |                 r = getrandbits(k) | ||||||
|  |             return r | ||||||
|  |         # There's an overriden random() method but no new getrandbits() method, | ||||||
|  |         # so we can only use random() from here. | ||||||
|  |         if n >= maxsize: | ||||||
|  |             _warn("Underlying random() generator does not supply \n" | ||||||
|  |                 "enough bits to choose from a population range this large.\n" | ||||||
|  |                 "To remove the range limitation, add a getrandbits() method.") | ||||||
|  |             return int(random() * n) | ||||||
|  |         rem = maxsize % n | ||||||
|  |         limit = (maxsize - rem) / maxsize   # int(limit * maxsize) % n == 0 | ||||||
|  |         r = random() | ||||||
|  |         while r >= limit: | ||||||
|  |             r = random() | ||||||
|  |         return int(r*maxsize) % n | ||||||
|  |  | ||||||
|  | ## -------------------- sequence methods  ------------------- | ||||||
|  |  | ||||||
|  |     def choice(self, seq): | ||||||
|  |         """Choose a random element from a non-empty sequence.""" | ||||||
|  |         try: | ||||||
|  |             i = self._randbelow(len(seq)) | ||||||
|  |         except ValueError: | ||||||
|  |             raise IndexError('Cannot choose from an empty sequence') | ||||||
|  |         return seq[i] | ||||||
|  |  | ||||||
|  |     def shuffle(self, x, random=None): | ||||||
|  |         """Shuffle list x in place, and return None. | ||||||
|  |  | ||||||
|  |         Optional argument random is a 0-argument function returning a | ||||||
|  |         random float in [0.0, 1.0); if it is the default None, the | ||||||
|  |         standard random.random will be used. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         if random is None: | ||||||
|  |             randbelow = self._randbelow | ||||||
|  |             for i in reversed(range(1, len(x))): | ||||||
|  |                 # pick an element in x[:i+1] with which to exchange x[i] | ||||||
|  |                 j = randbelow(i+1) | ||||||
|  |                 x[i], x[j] = x[j], x[i] | ||||||
|  |         else: | ||||||
|  |             _int = int | ||||||
|  |             for i in reversed(range(1, len(x))): | ||||||
|  |                 # pick an element in x[:i+1] with which to exchange x[i] | ||||||
|  |                 j = _int(random() * (i+1)) | ||||||
|  |                 x[i], x[j] = x[j], x[i] | ||||||
|  |  | ||||||
|  |     def sample(self, population, k): | ||||||
|  |         """Chooses k unique random elements from a population sequence or set. | ||||||
|  |  | ||||||
|  |         Returns a new list containing elements from the population while | ||||||
|  |         leaving the original population unchanged.  The resulting list is | ||||||
|  |         in selection order so that all sub-slices will also be valid random | ||||||
|  |         samples.  This allows raffle winners (the sample) to be partitioned | ||||||
|  |         into grand prize and second place winners (the subslices). | ||||||
|  |  | ||||||
|  |         Members of the population need not be hashable or unique.  If the | ||||||
|  |         population contains repeats, then each occurrence is a possible | ||||||
|  |         selection in the sample. | ||||||
|  |  | ||||||
|  |         To choose a sample in a range of integers, use range as an argument. | ||||||
|  |         This is especially fast and space efficient for sampling from a | ||||||
|  |         large population:   sample(range(10000000), 60) | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # Sampling without replacement entails tracking either potential | ||||||
|  |         # selections (the pool) in a list or previous selections in a set. | ||||||
|  |  | ||||||
|  |         # When the number of selections is small compared to the | ||||||
|  |         # population, then tracking selections is efficient, requiring | ||||||
|  |         # only a small set and an occasional reselection.  For | ||||||
|  |         # a larger number of selections, the pool tracking method is | ||||||
|  |         # preferred since the list takes less space than the | ||||||
|  |         # set and it doesn't suffer from frequent reselections. | ||||||
|  |  | ||||||
|  |         if isinstance(population, _Set): | ||||||
|  |             population = tuple(population) | ||||||
|  |         if not isinstance(population, _Sequence): | ||||||
|  |             raise TypeError("Population must be a sequence or set.  For dicts, use list(d).") | ||||||
|  |         randbelow = self._randbelow | ||||||
|  |         n = len(population) | ||||||
|  |         if not 0 <= k <= n: | ||||||
|  |             raise ValueError("Sample larger than population") | ||||||
|  |         result = [None] * k | ||||||
|  |         setsize = 21        # size of a small set minus size of an empty list | ||||||
|  |         if k > 5: | ||||||
|  |             setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets | ||||||
|  |         if n <= setsize: | ||||||
|  |             # An n-length list is smaller than a k-length set | ||||||
|  |             pool = list(population) | ||||||
|  |             for i in range(k):         # invariant:  non-selected at [0,n-i) | ||||||
|  |                 j = randbelow(n-i) | ||||||
|  |                 result[i] = pool[j] | ||||||
|  |                 pool[j] = pool[n-i-1]   # move non-selected item into vacancy | ||||||
|  |         else: | ||||||
|  |             selected = set() | ||||||
|  |             selected_add = selected.add | ||||||
|  |             for i in range(k): | ||||||
|  |                 j = randbelow(n) | ||||||
|  |                 while j in selected: | ||||||
|  |                     j = randbelow(n) | ||||||
|  |                 selected_add(j) | ||||||
|  |                 result[i] = population[j] | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  | ## -------------------- real-valued distributions  ------------------- | ||||||
|  |  | ||||||
|  | ## -------------------- uniform distribution ------------------- | ||||||
|  |  | ||||||
|  |     def uniform(self, a, b): | ||||||
|  |         "Get a random number in the range [a, b) or [a, b] depending on rounding." | ||||||
|  |         return a + (b-a) * self.random() | ||||||
|  |  | ||||||
|  | ## -------------------- triangular -------------------- | ||||||
|  |  | ||||||
|  |     def triangular(self, low=0.0, high=1.0, mode=None): | ||||||
|  |         """Triangular distribution. | ||||||
|  |  | ||||||
|  |         Continuous distribution bounded by given lower and upper limits, | ||||||
|  |         and having a given mode value in-between. | ||||||
|  |  | ||||||
|  |         http://en.wikipedia.org/wiki/Triangular_distribution | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         u = self.random() | ||||||
|  |         try: | ||||||
|  |             c = 0.5 if mode is None else (mode - low) / (high - low) | ||||||
|  |         except ZeroDivisionError: | ||||||
|  |             return low | ||||||
|  |         if u > c: | ||||||
|  |             u = 1.0 - u | ||||||
|  |             c = 1.0 - c | ||||||
|  |             low, high = high, low | ||||||
|  |         return low + (high - low) * (u * c) ** 0.5 | ||||||
|  |  | ||||||
|  | ## -------------------- normal distribution -------------------- | ||||||
|  |  | ||||||
|  |     def normalvariate(self, mu, sigma): | ||||||
|  |         """Normal distribution. | ||||||
|  |  | ||||||
|  |         mu is the mean, and sigma is the standard deviation. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         # mu = mean, sigma = standard deviation | ||||||
|  |  | ||||||
|  |         # Uses Kinderman and Monahan method. Reference: Kinderman, | ||||||
|  |         # A.J. and Monahan, J.F., "Computer generation of random | ||||||
|  |         # variables using the ratio of uniform deviates", ACM Trans | ||||||
|  |         # Math Software, 3, (1977), pp257-260. | ||||||
|  |  | ||||||
|  |         random = self.random | ||||||
|  |         while 1: | ||||||
|  |             u1 = random() | ||||||
|  |             u2 = 1.0 - random() | ||||||
|  |             z = NV_MAGICCONST*(u1-0.5)/u2 | ||||||
|  |             zz = z*z/4.0 | ||||||
|  |             if zz <= -_log(u2): | ||||||
|  |                 break | ||||||
|  |         return mu + z*sigma | ||||||
|  |  | ||||||
|  | ## -------------------- lognormal distribution -------------------- | ||||||
|  |  | ||||||
|  |     def lognormvariate(self, mu, sigma): | ||||||
|  |         """Log normal distribution. | ||||||
|  |  | ||||||
|  |         If you take the natural logarithm of this distribution, you'll get a | ||||||
|  |         normal distribution with mean mu and standard deviation sigma. | ||||||
|  |         mu can have any value, and sigma must be greater than zero. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         return _exp(self.normalvariate(mu, sigma)) | ||||||
|  |  | ||||||
|  | ## -------------------- exponential distribution -------------------- | ||||||
|  |  | ||||||
|  |     def expovariate(self, lambd): | ||||||
|  |         """Exponential distribution. | ||||||
|  |  | ||||||
|  |         lambd is 1.0 divided by the desired mean.  It should be | ||||||
|  |         nonzero.  (The parameter would be called "lambda", but that is | ||||||
|  |         a reserved word in Python.)  Returned values range from 0 to | ||||||
|  |         positive infinity if lambd is positive, and from negative | ||||||
|  |         infinity to 0 if lambd is negative. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         # lambd: rate lambd = 1/mean | ||||||
|  |         # ('lambda' is a Python reserved word) | ||||||
|  |  | ||||||
|  |         # we use 1-random() instead of random() to preclude the | ||||||
|  |         # possibility of taking the log of zero. | ||||||
|  |         return -_log(1.0 - self.random())/lambd | ||||||
|  |  | ||||||
|  | ## -------------------- von Mises distribution -------------------- | ||||||
|  |  | ||||||
|  |     def vonmisesvariate(self, mu, kappa): | ||||||
|  |         """Circular data distribution. | ||||||
|  |  | ||||||
|  |         mu is the mean angle, expressed in radians between 0 and 2*pi, and | ||||||
|  |         kappa is the concentration parameter, which must be greater than or | ||||||
|  |         equal to zero.  If kappa is equal to zero, this distribution reduces | ||||||
|  |         to a uniform random angle over the range 0 to 2*pi. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         # mu:    mean angle (in radians between 0 and 2*pi) | ||||||
|  |         # kappa: concentration parameter kappa (>= 0) | ||||||
|  |         # if kappa = 0 generate uniform random angle | ||||||
|  |  | ||||||
|  |         # Based upon an algorithm published in: Fisher, N.I., | ||||||
|  |         # "Statistical Analysis of Circular Data", Cambridge | ||||||
|  |         # University Press, 1993. | ||||||
|  |  | ||||||
|  |         # Thanks to Magnus Kessler for a correction to the | ||||||
|  |         # implementation of step 4. | ||||||
|  |  | ||||||
|  |         random = self.random | ||||||
|  |         if kappa <= 1e-6: | ||||||
|  |             return TWOPI * random() | ||||||
|  |  | ||||||
|  |         s = 0.5 / kappa | ||||||
|  |         r = s + _sqrt(1.0 + s * s) | ||||||
|  |  | ||||||
|  |         while 1: | ||||||
|  |             u1 = random() | ||||||
|  |             z = _cos(_pi * u1) | ||||||
|  |  | ||||||
|  |             d = z / (r + z) | ||||||
|  |             u2 = random() | ||||||
|  |             if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): | ||||||
|  |                 break | ||||||
|  |  | ||||||
|  |         q = 1.0 / r | ||||||
|  |         f = (q + z) / (1.0 + q * z) | ||||||
|  |         u3 = random() | ||||||
|  |         if u3 > 0.5: | ||||||
|  |             theta = (mu + _acos(f)) % TWOPI | ||||||
|  |         else: | ||||||
|  |             theta = (mu - _acos(f)) % TWOPI | ||||||
|  |  | ||||||
|  |         return theta | ||||||
|  |  | ||||||
|  | ## -------------------- gamma distribution -------------------- | ||||||
|  |  | ||||||
|  |     def gammavariate(self, alpha, beta): | ||||||
|  |         """Gamma distribution.  Not the gamma function! | ||||||
|  |  | ||||||
|  |         Conditions on the parameters are alpha > 0 and beta > 0. | ||||||
|  |  | ||||||
|  |         The probability distribution function is: | ||||||
|  |  | ||||||
|  |                     x ** (alpha - 1) * math.exp(-x / beta) | ||||||
|  |           pdf(x) =  -------------------------------------- | ||||||
|  |                       math.gamma(alpha) * beta ** alpha | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 | ||||||
|  |  | ||||||
|  |         # Warning: a few older sources define the gamma distribution in terms | ||||||
|  |         # of alpha > -1.0 | ||||||
|  |         if alpha <= 0.0 or beta <= 0.0: | ||||||
|  |             raise ValueError('gammavariate: alpha and beta must be > 0.0') | ||||||
|  |  | ||||||
|  |         random = self.random | ||||||
|  |         if alpha > 1.0: | ||||||
|  |  | ||||||
|  |             # Uses R.C.H. Cheng, "The generation of Gamma | ||||||
|  |             # variables with non-integral shape parameters", | ||||||
|  |             # Applied Statistics, (1977), 26, No. 1, p71-74 | ||||||
|  |  | ||||||
|  |             ainv = _sqrt(2.0 * alpha - 1.0) | ||||||
|  |             bbb = alpha - LOG4 | ||||||
|  |             ccc = alpha + ainv | ||||||
|  |  | ||||||
|  |             while 1: | ||||||
|  |                 u1 = random() | ||||||
|  |                 if not 1e-7 < u1 < .9999999: | ||||||
|  |                     continue | ||||||
|  |                 u2 = 1.0 - random() | ||||||
|  |                 v = _log(u1/(1.0-u1))/ainv | ||||||
|  |                 x = alpha*_exp(v) | ||||||
|  |                 z = u1*u1*u2 | ||||||
|  |                 r = bbb+ccc*v-x | ||||||
|  |                 if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): | ||||||
|  |                     return x * beta | ||||||
|  |  | ||||||
|  |         elif alpha == 1.0: | ||||||
|  |             # expovariate(1) | ||||||
|  |             u = random() | ||||||
|  |             while u <= 1e-7: | ||||||
|  |                 u = random() | ||||||
|  |             return -_log(u) * beta | ||||||
|  |  | ||||||
|  |         else:   # alpha is between 0 and 1 (exclusive) | ||||||
|  |  | ||||||
|  |             # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle | ||||||
|  |  | ||||||
|  |             while 1: | ||||||
|  |                 u = random() | ||||||
|  |                 b = (_e + alpha)/_e | ||||||
|  |                 p = b*u | ||||||
|  |                 if p <= 1.0: | ||||||
|  |                     x = p ** (1.0/alpha) | ||||||
|  |                 else: | ||||||
|  |                     x = -_log((b-p)/alpha) | ||||||
|  |                 u1 = random() | ||||||
|  |                 if p > 1.0: | ||||||
|  |                     if u1 <= x ** (alpha - 1.0): | ||||||
|  |                         break | ||||||
|  |                 elif u1 <= _exp(-x): | ||||||
|  |                     break | ||||||
|  |             return x * beta | ||||||
|  |  | ||||||
|  | ## -------------------- Gauss (faster alternative) -------------------- | ||||||
|  |  | ||||||
|  |     def gauss(self, mu, sigma): | ||||||
|  |         """Gaussian distribution. | ||||||
|  |  | ||||||
|  |         mu is the mean, and sigma is the standard deviation.  This is | ||||||
|  |         slightly faster than the normalvariate() function. | ||||||
|  |  | ||||||
|  |         Not thread-safe without a lock around calls. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # When x and y are two variables from [0, 1), uniformly | ||||||
|  |         # distributed, then | ||||||
|  |         # | ||||||
|  |         #    cos(2*pi*x)*sqrt(-2*log(1-y)) | ||||||
|  |         #    sin(2*pi*x)*sqrt(-2*log(1-y)) | ||||||
|  |         # | ||||||
|  |         # are two *independent* variables with normal distribution | ||||||
|  |         # (mu = 0, sigma = 1). | ||||||
|  |         # (Lambert Meertens) | ||||||
|  |         # (corrected version; bug discovered by Mike Miller, fixed by LM) | ||||||
|  |  | ||||||
|  |         # Multithreading note: When two threads call this function | ||||||
|  |         # simultaneously, it is possible that they will receive the | ||||||
|  |         # same return value.  The window is very small though.  To | ||||||
|  |         # avoid this, you have to use a lock around all calls.  (I | ||||||
|  |         # didn't want to slow this down in the serial case by using a | ||||||
|  |         # lock here.) | ||||||
|  |  | ||||||
|  |         random = self.random | ||||||
|  |         z = self.gauss_next | ||||||
|  |         self.gauss_next = None | ||||||
|  |         if z is None: | ||||||
|  |             x2pi = random() * TWOPI | ||||||
|  |             g2rad = _sqrt(-2.0 * _log(1.0 - random())) | ||||||
|  |             z = _cos(x2pi) * g2rad | ||||||
|  |             self.gauss_next = _sin(x2pi) * g2rad | ||||||
|  |  | ||||||
|  |         return mu + z*sigma | ||||||
|  |  | ||||||
|  | ## -------------------- beta -------------------- | ||||||
|  | ## See | ||||||
|  | ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html | ||||||
|  | ## for Ivan Frohne's insightful analysis of why the original implementation: | ||||||
|  | ## | ||||||
|  | ##    def betavariate(self, alpha, beta): | ||||||
|  | ##        # Discrete Event Simulation in C, pp 87-88. | ||||||
|  | ## | ||||||
|  | ##        y = self.expovariate(alpha) | ||||||
|  | ##        z = self.expovariate(1.0/beta) | ||||||
|  | ##        return z/(y+z) | ||||||
|  | ## | ||||||
|  | ## was dead wrong, and how it probably got that way. | ||||||
|  |  | ||||||
|  |     def betavariate(self, alpha, beta): | ||||||
|  |         """Beta distribution. | ||||||
|  |  | ||||||
|  |         Conditions on the parameters are alpha > 0 and beta > 0. | ||||||
|  |         Returned values range between 0 and 1. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # This version due to Janne Sinkkonen, and matches all the std | ||||||
|  |         # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). | ||||||
|  |         y = self.gammavariate(alpha, 1.) | ||||||
|  |         if y == 0: | ||||||
|  |             return 0.0 | ||||||
|  |         else: | ||||||
|  |             return y / (y + self.gammavariate(beta, 1.)) | ||||||
|  |  | ||||||
|  | ## -------------------- Pareto -------------------- | ||||||
|  |  | ||||||
|  |     def paretovariate(self, alpha): | ||||||
|  |         """Pareto distribution.  alpha is the shape parameter.""" | ||||||
|  |         # Jain, pg. 495 | ||||||
|  |  | ||||||
|  |         u = 1.0 - self.random() | ||||||
|  |         return 1.0 / u ** (1.0/alpha) | ||||||
|  |  | ||||||
|  | ## -------------------- Weibull -------------------- | ||||||
|  |  | ||||||
|  |     def weibullvariate(self, alpha, beta): | ||||||
|  |         """Weibull distribution. | ||||||
|  |  | ||||||
|  |         alpha is the scale parameter and beta is the shape parameter. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         # Jain, pg. 499; bug fix courtesy Bill Arms | ||||||
|  |  | ||||||
|  |         u = 1.0 - self.random() | ||||||
|  |         return alpha * (-_log(u)) ** (1.0/beta) | ||||||
|  |  | ||||||
|  | ## --------------- Operating System Random Source  ------------------ | ||||||
|  |  | ||||||
|  | class SystemRandom(Random): | ||||||
|  |     """Alternate random number generator using sources provided | ||||||
|  |     by the operating system (such as /dev/urandom on Unix or | ||||||
|  |     CryptGenRandom on Windows). | ||||||
|  |  | ||||||
|  |      Not available on all systems (see os.urandom() for details). | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def random(self): | ||||||
|  |         """Get the next random number in the range [0.0, 1.0).""" | ||||||
|  |         return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF | ||||||
|  |  | ||||||
|  |     def getrandbits(self, k): | ||||||
|  |         """getrandbits(k) -> x.  Generates an int with k random bits.""" | ||||||
|  |         if k <= 0: | ||||||
|  |             raise ValueError('number of bits must be greater than zero') | ||||||
|  |         if k != int(k): | ||||||
|  |             raise TypeError('number of bits should be an integer') | ||||||
|  |         numbytes = (k + 7) // 8                       # bits / 8 and rounded up | ||||||
|  |         x = int.from_bytes(_urandom(numbytes), 'big') | ||||||
|  |         return x >> (numbytes * 8 - k)                # trim excess bits | ||||||
|  |  | ||||||
|  |     def seed(self, *args, **kwds): | ||||||
|  |         "Stub method.  Not used for a system random number generator." | ||||||
|  |         return None | ||||||
|  |  | ||||||
|  |     def _notimplemented(self, *args, **kwds): | ||||||
|  |         "Method should not be called for a system random number generator." | ||||||
|  |         raise NotImplementedError('System entropy source does not have state.') | ||||||
|  |     getstate = setstate = _notimplemented | ||||||
|  |  | ||||||
|  | ## -------------------- test program -------------------- | ||||||
|  |  | ||||||
|  | def _test_generator(n, func, args): | ||||||
|  |     import time | ||||||
|  |     print(n, 'times', func.__name__) | ||||||
|  |     total = 0.0 | ||||||
|  |     sqsum = 0.0 | ||||||
|  |     smallest = 1e10 | ||||||
|  |     largest = -1e10 | ||||||
|  |     t0 = time.time() | ||||||
|  |     for i in range(n): | ||||||
|  |         x = func(*args) | ||||||
|  |         total += x | ||||||
|  |         sqsum = sqsum + x*x | ||||||
|  |         smallest = min(x, smallest) | ||||||
|  |         largest = max(x, largest) | ||||||
|  |     t1 = time.time() | ||||||
|  |     print(round(t1-t0, 3), 'sec,', end=' ') | ||||||
|  |     avg = total/n | ||||||
|  |     stddev = _sqrt(sqsum/n - avg*avg) | ||||||
|  |     print('avg %g, stddev %g, min %g, max %g' % \ | ||||||
|  |               (avg, stddev, smallest, largest)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _test(N=2000): | ||||||
|  |     _test_generator(N, random, ()) | ||||||
|  |     _test_generator(N, normalvariate, (0.0, 1.0)) | ||||||
|  |     _test_generator(N, lognormvariate, (0.0, 1.0)) | ||||||
|  |     _test_generator(N, vonmisesvariate, (0.0, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (0.01, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (0.1, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (0.1, 2.0)) | ||||||
|  |     _test_generator(N, gammavariate, (0.5, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (0.9, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (1.0, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (2.0, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (20.0, 1.0)) | ||||||
|  |     _test_generator(N, gammavariate, (200.0, 1.0)) | ||||||
|  |     _test_generator(N, gauss, (0.0, 1.0)) | ||||||
|  |     _test_generator(N, betavariate, (3.0, 3.0)) | ||||||
|  |     _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) | ||||||
|  |  | ||||||
|  | # Create one instance, seeded from current time, and export its methods | ||||||
|  | # as module-level functions.  The functions share state across all uses | ||||||
|  | #(both in the user's code and in the Python libraries), but that's fine | ||||||
|  | # for most programs and is easier for the casual user than making them | ||||||
|  | # instantiate their own Random() instance. | ||||||
|  |  | ||||||
|  | _inst = Random() | ||||||
|  | seed = _inst.seed | ||||||
|  | random = _inst.random | ||||||
|  | uniform = _inst.uniform | ||||||
|  | triangular = _inst.triangular | ||||||
|  | randint = _inst.randint | ||||||
|  | choice = _inst.choice | ||||||
|  | randrange = _inst.randrange | ||||||
|  | sample = _inst.sample | ||||||
|  | shuffle = _inst.shuffle | ||||||
|  | normalvariate = _inst.normalvariate | ||||||
|  | lognormvariate = _inst.lognormvariate | ||||||
|  | expovariate = _inst.expovariate | ||||||
|  | vonmisesvariate = _inst.vonmisesvariate | ||||||
|  | gammavariate = _inst.gammavariate | ||||||
|  | gauss = _inst.gauss | ||||||
|  | betavariate = _inst.betavariate | ||||||
|  | paretovariate = _inst.paretovariate | ||||||
|  | weibullvariate = _inst.weibullvariate | ||||||
|  | getstate = _inst.getstate | ||||||
|  | setstate = _inst.setstate | ||||||
|  | getrandbits = _inst.getrandbits | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     _test() | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/re.py |  | ||||||
							
								
								
									
										380
									
								
								v1/flask/lib/python3.4/re.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										380
									
								
								v1/flask/lib/python3.4/re.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,380 @@ | |||||||
|  | # | ||||||
|  | # Secret Labs' Regular Expression Engine | ||||||
|  | # | ||||||
|  | # re-compatible interface for the sre matching engine | ||||||
|  | # | ||||||
|  | # Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved. | ||||||
|  | # | ||||||
|  | # This version of the SRE library can be redistributed under CNRI's | ||||||
|  | # Python 1.6 license.  For any other use, please contact Secret Labs | ||||||
|  | # AB (info@pythonware.com). | ||||||
|  | # | ||||||
|  | # Portions of this engine have been developed in cooperation with | ||||||
|  | # CNRI.  Hewlett-Packard provided funding for 1.6 integration and | ||||||
|  | # other compatibility work. | ||||||
|  | # | ||||||
|  |  | ||||||
|  | r"""Support for regular expressions (RE). | ||||||
|  |  | ||||||
|  | This module provides regular expression matching operations similar to | ||||||
|  | those found in Perl.  It supports both 8-bit and Unicode strings; both | ||||||
|  | the pattern and the strings being processed can contain null bytes and | ||||||
|  | characters outside the US ASCII range. | ||||||
|  |  | ||||||
|  | Regular expressions can contain both special and ordinary characters. | ||||||
|  | Most ordinary characters, like "A", "a", or "0", are the simplest | ||||||
|  | regular expressions; they simply match themselves.  You can | ||||||
|  | concatenate ordinary characters, so last matches the string 'last'. | ||||||
|  |  | ||||||
|  | The special characters are: | ||||||
|  |     "."      Matches any character except a newline. | ||||||
|  |     "^"      Matches the start of the string. | ||||||
|  |     "$"      Matches the end of the string or just before the newline at | ||||||
|  |              the end of the string. | ||||||
|  |     "*"      Matches 0 or more (greedy) repetitions of the preceding RE. | ||||||
|  |              Greedy means that it will match as many repetitions as possible. | ||||||
|  |     "+"      Matches 1 or more (greedy) repetitions of the preceding RE. | ||||||
|  |     "?"      Matches 0 or 1 (greedy) of the preceding RE. | ||||||
|  |     *?,+?,?? Non-greedy versions of the previous three special characters. | ||||||
|  |     {m,n}    Matches from m to n repetitions of the preceding RE. | ||||||
|  |     {m,n}?   Non-greedy version of the above. | ||||||
|  |     "\\"     Either escapes special characters or signals a special sequence. | ||||||
|  |     []       Indicates a set of characters. | ||||||
|  |              A "^" as the first character indicates a complementing set. | ||||||
|  |     "|"      A|B, creates an RE that will match either A or B. | ||||||
|  |     (...)    Matches the RE inside the parentheses. | ||||||
|  |              The contents can be retrieved or matched later in the string. | ||||||
|  |     (?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below). | ||||||
|  |     (?:...)  Non-grouping version of regular parentheses. | ||||||
|  |     (?P<name>...) The substring matched by the group is accessible by name. | ||||||
|  |     (?P=name)     Matches the text matched earlier by the group named name. | ||||||
|  |     (?#...)  A comment; ignored. | ||||||
|  |     (?=...)  Matches if ... matches next, but doesn't consume the string. | ||||||
|  |     (?!...)  Matches if ... doesn't match next. | ||||||
|  |     (?<=...) Matches if preceded by ... (must be fixed length). | ||||||
|  |     (?<!...) Matches if not preceded by ... (must be fixed length). | ||||||
|  |     (?(id/name)yes|no) Matches yes pattern if the group with id/name matched, | ||||||
|  |                        the (optional) no pattern otherwise. | ||||||
|  |  | ||||||
|  | The special sequences consist of "\\" and a character from the list | ||||||
|  | below.  If the ordinary character is not on the list, then the | ||||||
|  | resulting RE will match the second character. | ||||||
|  |     \number  Matches the contents of the group of the same number. | ||||||
|  |     \A       Matches only at the start of the string. | ||||||
|  |     \Z       Matches only at the end of the string. | ||||||
|  |     \b       Matches the empty string, but only at the start or end of a word. | ||||||
|  |     \B       Matches the empty string, but not at the start or end of a word. | ||||||
|  |     \d       Matches any decimal digit; equivalent to the set [0-9] in | ||||||
|  |              bytes patterns or string patterns with the ASCII flag. | ||||||
|  |              In string patterns without the ASCII flag, it will match the whole | ||||||
|  |              range of Unicode digits. | ||||||
|  |     \D       Matches any non-digit character; equivalent to [^\d]. | ||||||
|  |     \s       Matches any whitespace character; equivalent to [ \t\n\r\f\v] in | ||||||
|  |              bytes patterns or string patterns with the ASCII flag. | ||||||
|  |              In string patterns without the ASCII flag, it will match the whole | ||||||
|  |              range of Unicode whitespace characters. | ||||||
|  |     \S       Matches any non-whitespace character; equivalent to [^\s]. | ||||||
|  |     \w       Matches any alphanumeric character; equivalent to [a-zA-Z0-9_] | ||||||
|  |              in bytes patterns or string patterns with the ASCII flag. | ||||||
|  |              In string patterns without the ASCII flag, it will match the | ||||||
|  |              range of Unicode alphanumeric characters (letters plus digits | ||||||
|  |              plus underscore). | ||||||
|  |              With LOCALE, it will match the set [0-9_] plus characters defined | ||||||
|  |              as letters for the current locale. | ||||||
|  |     \W       Matches the complement of \w. | ||||||
|  |     \\       Matches a literal backslash. | ||||||
|  |  | ||||||
|  | This module exports the following functions: | ||||||
|  |     match     Match a regular expression pattern to the beginning of a string. | ||||||
|  |     fullmatch Match a regular expression pattern to all of a string. | ||||||
|  |     search    Search a string for the presence of a pattern. | ||||||
|  |     sub       Substitute occurrences of a pattern found in a string. | ||||||
|  |     subn      Same as sub, but also return the number of substitutions made. | ||||||
|  |     split     Split a string by the occurrences of a pattern. | ||||||
|  |     findall   Find all occurrences of a pattern in a string. | ||||||
|  |     finditer  Return an iterator yielding a match object for each match. | ||||||
|  |     compile   Compile a pattern into a RegexObject. | ||||||
|  |     purge     Clear the regular expression cache. | ||||||
|  |     escape    Backslash all non-alphanumerics in a string. | ||||||
|  |  | ||||||
|  | Some of the functions in this module takes flags as optional parameters: | ||||||
|  |     A  ASCII       For string patterns, make \w, \W, \b, \B, \d, \D | ||||||
|  |                    match the corresponding ASCII character categories | ||||||
|  |                    (rather than the whole Unicode categories, which is the | ||||||
|  |                    default). | ||||||
|  |                    For bytes patterns, this flag is the only available | ||||||
|  |                    behaviour and needn't be specified. | ||||||
|  |     I  IGNORECASE  Perform case-insensitive matching. | ||||||
|  |     L  LOCALE      Make \w, \W, \b, \B, dependent on the current locale. | ||||||
|  |     M  MULTILINE   "^" matches the beginning of lines (after a newline) | ||||||
|  |                    as well as the string. | ||||||
|  |                    "$" matches the end of lines (before a newline) as well | ||||||
|  |                    as the end of the string. | ||||||
|  |     S  DOTALL      "." matches any character at all, including the newline. | ||||||
|  |     X  VERBOSE     Ignore whitespace and comments for nicer looking RE's. | ||||||
|  |     U  UNICODE     For compatibility only. Ignored for string patterns (it | ||||||
|  |                    is the default), and forbidden for bytes patterns. | ||||||
|  |  | ||||||
|  | This module also defines an exception 'error'. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | import sre_compile | ||||||
|  | import sre_parse | ||||||
|  | try: | ||||||
|  |     import _locale | ||||||
|  | except ImportError: | ||||||
|  |     _locale = None | ||||||
|  |  | ||||||
|  | # public symbols | ||||||
|  | __all__ = [ "match", "fullmatch", "search", "sub", "subn", "split", "findall", | ||||||
|  |     "compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X", | ||||||
|  |     "U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", | ||||||
|  |     "UNICODE", "error" ] | ||||||
|  |  | ||||||
|  | __version__ = "2.2.1" | ||||||
|  |  | ||||||
|  | # flags | ||||||
|  | A = ASCII = sre_compile.SRE_FLAG_ASCII # assume ascii "locale" | ||||||
|  | I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case | ||||||
|  | L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale | ||||||
|  | U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale" | ||||||
|  | M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline | ||||||
|  | S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline | ||||||
|  | X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments | ||||||
|  |  | ||||||
|  | # sre extensions (experimental, don't rely on these) | ||||||
|  | T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking | ||||||
|  | DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation | ||||||
|  |  | ||||||
|  | # sre exception | ||||||
|  | error = sre_compile.error | ||||||
|  |  | ||||||
|  | # -------------------------------------------------------------------- | ||||||
|  | # public interface | ||||||
|  |  | ||||||
|  | def match(pattern, string, flags=0): | ||||||
|  |     """Try to apply the pattern at the start of the string, returning | ||||||
|  |     a match object, or None if no match was found.""" | ||||||
|  |     return _compile(pattern, flags).match(string) | ||||||
|  |  | ||||||
|  | def fullmatch(pattern, string, flags=0): | ||||||
|  |     """Try to apply the pattern to all of the string, returning | ||||||
|  |     a match object, or None if no match was found.""" | ||||||
|  |     return _compile(pattern, flags).fullmatch(string) | ||||||
|  |  | ||||||
|  | def search(pattern, string, flags=0): | ||||||
|  |     """Scan through string looking for a match to the pattern, returning | ||||||
|  |     a match object, or None if no match was found.""" | ||||||
|  |     return _compile(pattern, flags).search(string) | ||||||
|  |  | ||||||
|  | def sub(pattern, repl, string, count=0, flags=0): | ||||||
|  |     """Return the string obtained by replacing the leftmost | ||||||
|  |     non-overlapping occurrences of the pattern in string by the | ||||||
|  |     replacement repl.  repl can be either a string or a callable; | ||||||
|  |     if a string, backslash escapes in it are processed.  If it is | ||||||
|  |     a callable, it's passed the match object and must return | ||||||
|  |     a replacement string to be used.""" | ||||||
|  |     return _compile(pattern, flags).sub(repl, string, count) | ||||||
|  |  | ||||||
|  | def subn(pattern, repl, string, count=0, flags=0): | ||||||
|  |     """Return a 2-tuple containing (new_string, number). | ||||||
|  |     new_string is the string obtained by replacing the leftmost | ||||||
|  |     non-overlapping occurrences of the pattern in the source | ||||||
|  |     string by the replacement repl.  number is the number of | ||||||
|  |     substitutions that were made. repl can be either a string or a | ||||||
|  |     callable; if a string, backslash escapes in it are processed. | ||||||
|  |     If it is a callable, it's passed the match object and must | ||||||
|  |     return a replacement string to be used.""" | ||||||
|  |     return _compile(pattern, flags).subn(repl, string, count) | ||||||
|  |  | ||||||
|  | def split(pattern, string, maxsplit=0, flags=0): | ||||||
|  |     """Split the source string by the occurrences of the pattern, | ||||||
|  |     returning a list containing the resulting substrings.  If | ||||||
|  |     capturing parentheses are used in pattern, then the text of all | ||||||
|  |     groups in the pattern are also returned as part of the resulting | ||||||
|  |     list.  If maxsplit is nonzero, at most maxsplit splits occur, | ||||||
|  |     and the remainder of the string is returned as the final element | ||||||
|  |     of the list.""" | ||||||
|  |     return _compile(pattern, flags).split(string, maxsplit) | ||||||
|  |  | ||||||
|  | def findall(pattern, string, flags=0): | ||||||
|  |     """Return a list of all non-overlapping matches in the string. | ||||||
|  |  | ||||||
|  |     If one or more capturing groups are present in the pattern, return | ||||||
|  |     a list of groups; this will be a list of tuples if the pattern | ||||||
|  |     has more than one group. | ||||||
|  |  | ||||||
|  |     Empty matches are included in the result.""" | ||||||
|  |     return _compile(pattern, flags).findall(string) | ||||||
|  |  | ||||||
|  | if sys.hexversion >= 0x02020000: | ||||||
|  |     __all__.append("finditer") | ||||||
|  |     def finditer(pattern, string, flags=0): | ||||||
|  |         """Return an iterator over all non-overlapping matches in the | ||||||
|  |         string.  For each match, the iterator returns a match object. | ||||||
|  |  | ||||||
|  |         Empty matches are included in the result.""" | ||||||
|  |         return _compile(pattern, flags).finditer(string) | ||||||
|  |  | ||||||
|  | def compile(pattern, flags=0): | ||||||
|  |     "Compile a regular expression pattern, returning a pattern object." | ||||||
|  |     return _compile(pattern, flags) | ||||||
|  |  | ||||||
|  | def purge(): | ||||||
|  |     "Clear the regular expression caches" | ||||||
|  |     _cache.clear() | ||||||
|  |     _cache_repl.clear() | ||||||
|  |  | ||||||
|  | def template(pattern, flags=0): | ||||||
|  |     "Compile a template pattern, returning a pattern object" | ||||||
|  |     return _compile(pattern, flags|T) | ||||||
|  |  | ||||||
|  | _alphanum_str = frozenset( | ||||||
|  |     "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") | ||||||
|  | _alphanum_bytes = frozenset( | ||||||
|  |     b"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890") | ||||||
|  |  | ||||||
|  | def escape(pattern): | ||||||
|  |     """ | ||||||
|  |     Escape all the characters in pattern except ASCII letters, numbers and '_'. | ||||||
|  |     """ | ||||||
|  |     if isinstance(pattern, str): | ||||||
|  |         alphanum = _alphanum_str | ||||||
|  |         s = list(pattern) | ||||||
|  |         for i, c in enumerate(pattern): | ||||||
|  |             if c not in alphanum: | ||||||
|  |                 if c == "\000": | ||||||
|  |                     s[i] = "\\000" | ||||||
|  |                 else: | ||||||
|  |                     s[i] = "\\" + c | ||||||
|  |         return "".join(s) | ||||||
|  |     else: | ||||||
|  |         alphanum = _alphanum_bytes | ||||||
|  |         s = [] | ||||||
|  |         esc = ord(b"\\") | ||||||
|  |         for c in pattern: | ||||||
|  |             if c in alphanum: | ||||||
|  |                 s.append(c) | ||||||
|  |             else: | ||||||
|  |                 if c == 0: | ||||||
|  |                     s.extend(b"\\000") | ||||||
|  |                 else: | ||||||
|  |                     s.append(esc) | ||||||
|  |                     s.append(c) | ||||||
|  |         return bytes(s) | ||||||
|  |  | ||||||
|  | # -------------------------------------------------------------------- | ||||||
|  | # internals | ||||||
|  |  | ||||||
|  | _cache = {} | ||||||
|  | _cache_repl = {} | ||||||
|  |  | ||||||
|  | _pattern_type = type(sre_compile.compile("", 0)) | ||||||
|  |  | ||||||
|  | _MAXCACHE = 512 | ||||||
|  | def _compile(pattern, flags): | ||||||
|  |     # internal: compile pattern | ||||||
|  |     bypass_cache = flags & DEBUG | ||||||
|  |     if not bypass_cache: | ||||||
|  |         try: | ||||||
|  |             p, loc = _cache[type(pattern), pattern, flags] | ||||||
|  |             if loc is None or loc == _locale.setlocale(_locale.LC_CTYPE): | ||||||
|  |                 return p | ||||||
|  |         except KeyError: | ||||||
|  |             pass | ||||||
|  |     if isinstance(pattern, _pattern_type): | ||||||
|  |         if flags: | ||||||
|  |             raise ValueError( | ||||||
|  |                 "Cannot process flags argument with a compiled pattern") | ||||||
|  |         return pattern | ||||||
|  |     if not sre_compile.isstring(pattern): | ||||||
|  |         raise TypeError("first argument must be string or compiled pattern") | ||||||
|  |     p = sre_compile.compile(pattern, flags) | ||||||
|  |     if not bypass_cache: | ||||||
|  |         if len(_cache) >= _MAXCACHE: | ||||||
|  |             _cache.clear() | ||||||
|  |         if p.flags & LOCALE: | ||||||
|  |             if not _locale: | ||||||
|  |                 return p | ||||||
|  |             loc = _locale.setlocale(_locale.LC_CTYPE) | ||||||
|  |         else: | ||||||
|  |             loc = None | ||||||
|  |         _cache[type(pattern), pattern, flags] = p, loc | ||||||
|  |     return p | ||||||
|  |  | ||||||
|  | def _compile_repl(repl, pattern): | ||||||
|  |     # internal: compile replacement pattern | ||||||
|  |     try: | ||||||
|  |         return _cache_repl[repl, pattern] | ||||||
|  |     except KeyError: | ||||||
|  |         pass | ||||||
|  |     p = sre_parse.parse_template(repl, pattern) | ||||||
|  |     if len(_cache_repl) >= _MAXCACHE: | ||||||
|  |         _cache_repl.clear() | ||||||
|  |     _cache_repl[repl, pattern] = p | ||||||
|  |     return p | ||||||
|  |  | ||||||
|  | def _expand(pattern, match, template): | ||||||
|  |     # internal: match.expand implementation hook | ||||||
|  |     template = sre_parse.parse_template(template, pattern) | ||||||
|  |     return sre_parse.expand_template(template, match) | ||||||
|  |  | ||||||
|  | def _subx(pattern, template): | ||||||
|  |     # internal: pattern.sub/subn implementation helper | ||||||
|  |     template = _compile_repl(template, pattern) | ||||||
|  |     if not template[0] and len(template[1]) == 1: | ||||||
|  |         # literal replacement | ||||||
|  |         return template[1][0] | ||||||
|  |     def filter(match, template=template): | ||||||
|  |         return sre_parse.expand_template(template, match) | ||||||
|  |     return filter | ||||||
|  |  | ||||||
|  | # register myself for pickling | ||||||
|  |  | ||||||
|  | import copyreg | ||||||
|  |  | ||||||
|  | def _pickle(p): | ||||||
|  |     return _compile, (p.pattern, p.flags) | ||||||
|  |  | ||||||
|  | copyreg.pickle(_pattern_type, _pickle, _compile) | ||||||
|  |  | ||||||
|  | # -------------------------------------------------------------------- | ||||||
|  | # experimental stuff (see python-dev discussions for details) | ||||||
|  |  | ||||||
|  | class Scanner: | ||||||
|  |     def __init__(self, lexicon, flags=0): | ||||||
|  |         from sre_constants import BRANCH, SUBPATTERN | ||||||
|  |         self.lexicon = lexicon | ||||||
|  |         # combine phrases into a compound pattern | ||||||
|  |         p = [] | ||||||
|  |         s = sre_parse.Pattern() | ||||||
|  |         s.flags = flags | ||||||
|  |         for phrase, action in lexicon: | ||||||
|  |             p.append(sre_parse.SubPattern(s, [ | ||||||
|  |                 (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), | ||||||
|  |                 ])) | ||||||
|  |         s.groups = len(p)+1 | ||||||
|  |         p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) | ||||||
|  |         self.scanner = sre_compile.compile(p) | ||||||
|  |     def scan(self, string): | ||||||
|  |         result = [] | ||||||
|  |         append = result.append | ||||||
|  |         match = self.scanner.scanner(string).match | ||||||
|  |         i = 0 | ||||||
|  |         while 1: | ||||||
|  |             m = match() | ||||||
|  |             if not m: | ||||||
|  |                 break | ||||||
|  |             j = m.end() | ||||||
|  |             if i == j: | ||||||
|  |                 break | ||||||
|  |             action = self.lexicon[m.lastindex-1][1] | ||||||
|  |             if callable(action): | ||||||
|  |                 self.match = m | ||||||
|  |                 action = action(self, m.group()) | ||||||
|  |             if action is not None: | ||||||
|  |                 append(action) | ||||||
|  |             i = j | ||||||
|  |         return result, string[i:] | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/reprlib.py |  | ||||||
							
								
								
									
										157
									
								
								v1/flask/lib/python3.4/reprlib.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								v1/flask/lib/python3.4/reprlib.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,157 @@ | |||||||
|  | """Redo the builtin repr() (representation) but with limits on most sizes.""" | ||||||
|  |  | ||||||
|  | __all__ = ["Repr", "repr", "recursive_repr"] | ||||||
|  |  | ||||||
|  | import builtins | ||||||
|  | from itertools import islice | ||||||
|  | try: | ||||||
|  |     from _thread import get_ident | ||||||
|  | except ImportError: | ||||||
|  |     from _dummy_thread import get_ident | ||||||
|  |  | ||||||
|  | def recursive_repr(fillvalue='...'): | ||||||
|  |     'Decorator to make a repr function return fillvalue for a recursive call' | ||||||
|  |  | ||||||
|  |     def decorating_function(user_function): | ||||||
|  |         repr_running = set() | ||||||
|  |  | ||||||
|  |         def wrapper(self): | ||||||
|  |             key = id(self), get_ident() | ||||||
|  |             if key in repr_running: | ||||||
|  |                 return fillvalue | ||||||
|  |             repr_running.add(key) | ||||||
|  |             try: | ||||||
|  |                 result = user_function(self) | ||||||
|  |             finally: | ||||||
|  |                 repr_running.discard(key) | ||||||
|  |             return result | ||||||
|  |  | ||||||
|  |         # Can't use functools.wraps() here because of bootstrap issues | ||||||
|  |         wrapper.__module__ = getattr(user_function, '__module__') | ||||||
|  |         wrapper.__doc__ = getattr(user_function, '__doc__') | ||||||
|  |         wrapper.__name__ = getattr(user_function, '__name__') | ||||||
|  |         wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) | ||||||
|  |         return wrapper | ||||||
|  |  | ||||||
|  |     return decorating_function | ||||||
|  |  | ||||||
|  | class Repr: | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         self.maxlevel = 6 | ||||||
|  |         self.maxtuple = 6 | ||||||
|  |         self.maxlist = 6 | ||||||
|  |         self.maxarray = 5 | ||||||
|  |         self.maxdict = 4 | ||||||
|  |         self.maxset = 6 | ||||||
|  |         self.maxfrozenset = 6 | ||||||
|  |         self.maxdeque = 6 | ||||||
|  |         self.maxstring = 30 | ||||||
|  |         self.maxlong = 40 | ||||||
|  |         self.maxother = 30 | ||||||
|  |  | ||||||
|  |     def repr(self, x): | ||||||
|  |         return self.repr1(x, self.maxlevel) | ||||||
|  |  | ||||||
|  |     def repr1(self, x, level): | ||||||
|  |         typename = type(x).__name__ | ||||||
|  |         if ' ' in typename: | ||||||
|  |             parts = typename.split() | ||||||
|  |             typename = '_'.join(parts) | ||||||
|  |         if hasattr(self, 'repr_' + typename): | ||||||
|  |             return getattr(self, 'repr_' + typename)(x, level) | ||||||
|  |         else: | ||||||
|  |             return self.repr_instance(x, level) | ||||||
|  |  | ||||||
|  |     def _repr_iterable(self, x, level, left, right, maxiter, trail=''): | ||||||
|  |         n = len(x) | ||||||
|  |         if level <= 0 and n: | ||||||
|  |             s = '...' | ||||||
|  |         else: | ||||||
|  |             newlevel = level - 1 | ||||||
|  |             repr1 = self.repr1 | ||||||
|  |             pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] | ||||||
|  |             if n > maxiter:  pieces.append('...') | ||||||
|  |             s = ', '.join(pieces) | ||||||
|  |             if n == 1 and trail:  right = trail + right | ||||||
|  |         return '%s%s%s' % (left, s, right) | ||||||
|  |  | ||||||
|  |     def repr_tuple(self, x, level): | ||||||
|  |         return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') | ||||||
|  |  | ||||||
|  |     def repr_list(self, x, level): | ||||||
|  |         return self._repr_iterable(x, level, '[', ']', self.maxlist) | ||||||
|  |  | ||||||
|  |     def repr_array(self, x, level): | ||||||
|  |         header = "array('%s', [" % x.typecode | ||||||
|  |         return self._repr_iterable(x, level, header, '])', self.maxarray) | ||||||
|  |  | ||||||
|  |     def repr_set(self, x, level): | ||||||
|  |         x = _possibly_sorted(x) | ||||||
|  |         return self._repr_iterable(x, level, 'set([', '])', self.maxset) | ||||||
|  |  | ||||||
|  |     def repr_frozenset(self, x, level): | ||||||
|  |         x = _possibly_sorted(x) | ||||||
|  |         return self._repr_iterable(x, level, 'frozenset([', '])', | ||||||
|  |                                    self.maxfrozenset) | ||||||
|  |  | ||||||
|  |     def repr_deque(self, x, level): | ||||||
|  |         return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) | ||||||
|  |  | ||||||
|  |     def repr_dict(self, x, level): | ||||||
|  |         n = len(x) | ||||||
|  |         if n == 0: return '{}' | ||||||
|  |         if level <= 0: return '{...}' | ||||||
|  |         newlevel = level - 1 | ||||||
|  |         repr1 = self.repr1 | ||||||
|  |         pieces = [] | ||||||
|  |         for key in islice(_possibly_sorted(x), self.maxdict): | ||||||
|  |             keyrepr = repr1(key, newlevel) | ||||||
|  |             valrepr = repr1(x[key], newlevel) | ||||||
|  |             pieces.append('%s: %s' % (keyrepr, valrepr)) | ||||||
|  |         if n > self.maxdict: pieces.append('...') | ||||||
|  |         s = ', '.join(pieces) | ||||||
|  |         return '{%s}' % (s,) | ||||||
|  |  | ||||||
|  |     def repr_str(self, x, level): | ||||||
|  |         s = builtins.repr(x[:self.maxstring]) | ||||||
|  |         if len(s) > self.maxstring: | ||||||
|  |             i = max(0, (self.maxstring-3)//2) | ||||||
|  |             j = max(0, self.maxstring-3-i) | ||||||
|  |             s = builtins.repr(x[:i] + x[len(x)-j:]) | ||||||
|  |             s = s[:i] + '...' + s[len(s)-j:] | ||||||
|  |         return s | ||||||
|  |  | ||||||
|  |     def repr_int(self, x, level): | ||||||
|  |         s = builtins.repr(x) # XXX Hope this isn't too slow... | ||||||
|  |         if len(s) > self.maxlong: | ||||||
|  |             i = max(0, (self.maxlong-3)//2) | ||||||
|  |             j = max(0, self.maxlong-3-i) | ||||||
|  |             s = s[:i] + '...' + s[len(s)-j:] | ||||||
|  |         return s | ||||||
|  |  | ||||||
|  |     def repr_instance(self, x, level): | ||||||
|  |         try: | ||||||
|  |             s = builtins.repr(x) | ||||||
|  |             # Bugs in x.__repr__() can cause arbitrary | ||||||
|  |             # exceptions -- then make up something | ||||||
|  |         except Exception: | ||||||
|  |             return '<%s instance at %x>' % (x.__class__.__name__, id(x)) | ||||||
|  |         if len(s) > self.maxother: | ||||||
|  |             i = max(0, (self.maxother-3)//2) | ||||||
|  |             j = max(0, self.maxother-3-i) | ||||||
|  |             s = s[:i] + '...' + s[len(s)-j:] | ||||||
|  |         return s | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _possibly_sorted(x): | ||||||
|  |     # Since not all sequences of items can be sorted and comparison | ||||||
|  |     # functions may raise arbitrary exceptions, return an unsorted | ||||||
|  |     # sequence in that case. | ||||||
|  |     try: | ||||||
|  |         return sorted(x) | ||||||
|  |     except Exception: | ||||||
|  |         return list(x) | ||||||
|  |  | ||||||
|  | aRepr = Repr() | ||||||
|  | repr = aRepr.repr | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/rlcompleter.py |  | ||||||
							
								
								
									
										177
									
								
								v1/flask/lib/python3.4/rlcompleter.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										177
									
								
								v1/flask/lib/python3.4/rlcompleter.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,177 @@ | |||||||
|  | """Word completion for GNU readline. | ||||||
|  |  | ||||||
|  | The completer completes keywords, built-ins and globals in a selectable | ||||||
|  | namespace (which defaults to __main__); when completing NAME.NAME..., it | ||||||
|  | evaluates (!) the expression up to the last dot and completes its attributes. | ||||||
|  |  | ||||||
|  | It's very cool to do "import sys" type "sys.", hit the completion key (twice), | ||||||
|  | and see the list of names defined by the sys module! | ||||||
|  |  | ||||||
|  | Tip: to use the tab key as the completion key, call | ||||||
|  |  | ||||||
|  |     readline.parse_and_bind("tab: complete") | ||||||
|  |  | ||||||
|  | Notes: | ||||||
|  |  | ||||||
|  | - Exceptions raised by the completer function are *ignored* (and generally cause | ||||||
|  |   the completion to fail).  This is a feature -- since readline sets the tty | ||||||
|  |   device in raw (or cbreak) mode, printing a traceback wouldn't work well | ||||||
|  |   without some complicated hoopla to save, reset and restore the tty state. | ||||||
|  |  | ||||||
|  | - The evaluation of the NAME.NAME... form may cause arbitrary application | ||||||
|  |   defined code to be executed if an object with a __getattr__ hook is found. | ||||||
|  |   Since it is the responsibility of the application (or the user) to enable this | ||||||
|  |   feature, I consider this an acceptable risk.  More complicated expressions | ||||||
|  |   (e.g. function calls or indexing operations) are *not* evaluated. | ||||||
|  |  | ||||||
|  | - When the original stdin is not a tty device, GNU readline is never | ||||||
|  |   used, and this module (and the readline module) are silently inactive. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import atexit | ||||||
|  | import builtins | ||||||
|  | import __main__ | ||||||
|  |  | ||||||
|  | __all__ = ["Completer"] | ||||||
|  |  | ||||||
|  | class Completer: | ||||||
|  |     def __init__(self, namespace = None): | ||||||
|  |         """Create a new completer for the command line. | ||||||
|  |  | ||||||
|  |         Completer([namespace]) -> completer instance. | ||||||
|  |  | ||||||
|  |         If unspecified, the default namespace where completions are performed | ||||||
|  |         is __main__ (technically, __main__.__dict__). Namespaces should be | ||||||
|  |         given as dictionaries. | ||||||
|  |  | ||||||
|  |         Completer instances should be used as the completion mechanism of | ||||||
|  |         readline via the set_completer() call: | ||||||
|  |  | ||||||
|  |         readline.set_completer(Completer(my_namespace).complete) | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         if namespace and not isinstance(namespace, dict): | ||||||
|  |             raise TypeError('namespace must be a dictionary') | ||||||
|  |  | ||||||
|  |         # Don't bind to namespace quite yet, but flag whether the user wants a | ||||||
|  |         # specific namespace or to use __main__.__dict__. This will allow us | ||||||
|  |         # to bind to __main__.__dict__ at completion time, not now. | ||||||
|  |         if namespace is None: | ||||||
|  |             self.use_main_ns = 1 | ||||||
|  |         else: | ||||||
|  |             self.use_main_ns = 0 | ||||||
|  |             self.namespace = namespace | ||||||
|  |  | ||||||
|  |     def complete(self, text, state): | ||||||
|  |         """Return the next possible completion for 'text'. | ||||||
|  |  | ||||||
|  |         This is called successively with state == 0, 1, 2, ... until it | ||||||
|  |         returns None.  The completion should begin with 'text'. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         if self.use_main_ns: | ||||||
|  |             self.namespace = __main__.__dict__ | ||||||
|  |  | ||||||
|  |         if not text.strip(): | ||||||
|  |             if state == 0: | ||||||
|  |                 return '\t' | ||||||
|  |             else: | ||||||
|  |                 return None | ||||||
|  |  | ||||||
|  |         if state == 0: | ||||||
|  |             if "." in text: | ||||||
|  |                 self.matches = self.attr_matches(text) | ||||||
|  |             else: | ||||||
|  |                 self.matches = self.global_matches(text) | ||||||
|  |         try: | ||||||
|  |             return self.matches[state] | ||||||
|  |         except IndexError: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |     def _callable_postfix(self, val, word): | ||||||
|  |         if callable(val): | ||||||
|  |             word = word + "(" | ||||||
|  |         return word | ||||||
|  |  | ||||||
|  |     def global_matches(self, text): | ||||||
|  |         """Compute matches when text is a simple name. | ||||||
|  |  | ||||||
|  |         Return a list of all keywords, built-in functions and names currently | ||||||
|  |         defined in self.namespace that match. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         import keyword | ||||||
|  |         matches = [] | ||||||
|  |         seen = {"__builtins__"} | ||||||
|  |         n = len(text) | ||||||
|  |         for word in keyword.kwlist: | ||||||
|  |             if word[:n] == text: | ||||||
|  |                 seen.add(word) | ||||||
|  |                 matches.append(word) | ||||||
|  |         for nspace in [self.namespace, builtins.__dict__]: | ||||||
|  |             for word, val in nspace.items(): | ||||||
|  |                 if word[:n] == text and word not in seen: | ||||||
|  |                     seen.add(word) | ||||||
|  |                     matches.append(self._callable_postfix(val, word)) | ||||||
|  |         return matches | ||||||
|  |  | ||||||
|  |     def attr_matches(self, text): | ||||||
|  |         """Compute matches when text contains a dot. | ||||||
|  |  | ||||||
|  |         Assuming the text is of the form NAME.NAME....[NAME], and is | ||||||
|  |         evaluable in self.namespace, it will be evaluated and its attributes | ||||||
|  |         (as revealed by dir()) are used as possible completions.  (For class | ||||||
|  |         instances, class members are also considered.) | ||||||
|  |  | ||||||
|  |         WARNING: this can still invoke arbitrary C code, if an object | ||||||
|  |         with a __getattr__ hook is evaluated. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         import re | ||||||
|  |         m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) | ||||||
|  |         if not m: | ||||||
|  |             return [] | ||||||
|  |         expr, attr = m.group(1, 3) | ||||||
|  |         try: | ||||||
|  |             thisobject = eval(expr, self.namespace) | ||||||
|  |         except Exception: | ||||||
|  |             return [] | ||||||
|  |  | ||||||
|  |         # get the content of the object, except __builtins__ | ||||||
|  |         words = set(dir(thisobject)) | ||||||
|  |         words.discard("__builtins__") | ||||||
|  |  | ||||||
|  |         if hasattr(thisobject, '__class__'): | ||||||
|  |             words.add('__class__') | ||||||
|  |             words.update(get_class_members(thisobject.__class__)) | ||||||
|  |         matches = [] | ||||||
|  |         n = len(attr) | ||||||
|  |         for word in words: | ||||||
|  |             if word[:n] == attr: | ||||||
|  |                 try: | ||||||
|  |                     val = getattr(thisobject, word) | ||||||
|  |                 except Exception: | ||||||
|  |                     continue  # Exclude properties that are not set | ||||||
|  |                 word = self._callable_postfix(val, "%s.%s" % (expr, word)) | ||||||
|  |                 matches.append(word) | ||||||
|  |         matches.sort() | ||||||
|  |         return matches | ||||||
|  |  | ||||||
|  | def get_class_members(klass): | ||||||
|  |     ret = dir(klass) | ||||||
|  |     if hasattr(klass,'__bases__'): | ||||||
|  |         for base in klass.__bases__: | ||||||
|  |             ret = ret + get_class_members(base) | ||||||
|  |     return ret | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     import readline | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
|  | else: | ||||||
|  |     readline.set_completer(Completer().complete) | ||||||
|  |     # Release references early at shutdown (the readline module's | ||||||
|  |     # contents are quasi-immortal, and the completer function holds a | ||||||
|  |     # reference to globals). | ||||||
|  |     atexit.register(lambda: readline.set_completer(None)) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/shutil.py |  | ||||||
							
								
								
									
										1141
									
								
								v1/flask/lib/python3.4/shutil.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1141
									
								
								v1/flask/lib/python3.4/shutil.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/sre_compile.py |  | ||||||
							
								
								
									
										592
									
								
								v1/flask/lib/python3.4/sre_compile.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										592
									
								
								v1/flask/lib/python3.4/sre_compile.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,592 @@ | |||||||
|  | # | ||||||
|  | # Secret Labs' Regular Expression Engine | ||||||
|  | # | ||||||
|  | # convert template to internal format | ||||||
|  | # | ||||||
|  | # Copyright (c) 1997-2001 by Secret Labs AB.  All rights reserved. | ||||||
|  | # | ||||||
|  | # See the sre.py file for information on usage and redistribution. | ||||||
|  | # | ||||||
|  |  | ||||||
|  | """Internal support module for sre""" | ||||||
|  |  | ||||||
|  | import _sre | ||||||
|  | import sre_parse | ||||||
|  | from sre_constants import * | ||||||
|  | from _sre import MAXREPEAT | ||||||
|  |  | ||||||
|  | assert _sre.MAGIC == MAGIC, "SRE module mismatch" | ||||||
|  |  | ||||||
|  | if _sre.CODESIZE == 2: | ||||||
|  |     MAXCODE = 65535 | ||||||
|  | else: | ||||||
|  |     MAXCODE = 0xFFFFFFFF | ||||||
|  |  | ||||||
|  | _LITERAL_CODES = set([LITERAL, NOT_LITERAL]) | ||||||
|  | _REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT]) | ||||||
|  | _SUCCESS_CODES = set([SUCCESS, FAILURE]) | ||||||
|  | _ASSERT_CODES = set([ASSERT, ASSERT_NOT]) | ||||||
|  |  | ||||||
|  | # Sets of lowercase characters which have the same uppercase. | ||||||
|  | _equivalences = ( | ||||||
|  |     # LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I | ||||||
|  |     (0x69, 0x131), # iı | ||||||
|  |     # LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S | ||||||
|  |     (0x73, 0x17f), # sſ | ||||||
|  |     # MICRO SIGN, GREEK SMALL LETTER MU | ||||||
|  |     (0xb5, 0x3bc), # µμ | ||||||
|  |     # COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI | ||||||
|  |     (0x345, 0x3b9, 0x1fbe), # \u0345ιι | ||||||
|  |     # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA | ||||||
|  |     (0x390, 0x1fd3), # ΐΐ | ||||||
|  |     # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA | ||||||
|  |     (0x3b0, 0x1fe3), # ΰΰ | ||||||
|  |     # GREEK SMALL LETTER BETA, GREEK BETA SYMBOL | ||||||
|  |     (0x3b2, 0x3d0), # βϐ | ||||||
|  |     # GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL | ||||||
|  |     (0x3b5, 0x3f5), # εϵ | ||||||
|  |     # GREEK SMALL LETTER THETA, GREEK THETA SYMBOL | ||||||
|  |     (0x3b8, 0x3d1), # θϑ | ||||||
|  |     # GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL | ||||||
|  |     (0x3ba, 0x3f0), # κϰ | ||||||
|  |     # GREEK SMALL LETTER PI, GREEK PI SYMBOL | ||||||
|  |     (0x3c0, 0x3d6), # πϖ | ||||||
|  |     # GREEK SMALL LETTER RHO, GREEK RHO SYMBOL | ||||||
|  |     (0x3c1, 0x3f1), # ρϱ | ||||||
|  |     # GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA | ||||||
|  |     (0x3c2, 0x3c3), # ςσ | ||||||
|  |     # GREEK SMALL LETTER PHI, GREEK PHI SYMBOL | ||||||
|  |     (0x3c6, 0x3d5), # φϕ | ||||||
|  |     # LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE | ||||||
|  |     (0x1e61, 0x1e9b), # ṡẛ | ||||||
|  |     # LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST | ||||||
|  |     (0xfb05, 0xfb06), # ſtst | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | # Maps the lowercase code to lowercase codes which have the same uppercase. | ||||||
|  | _ignorecase_fixes = {i: tuple(j for j in t if i != j) | ||||||
|  |                      for t in _equivalences for i in t} | ||||||
|  |  | ||||||
|  | def _compile(code, pattern, flags): | ||||||
|  |     # internal: compile a (sub)pattern | ||||||
|  |     emit = code.append | ||||||
|  |     _len = len | ||||||
|  |     LITERAL_CODES = _LITERAL_CODES | ||||||
|  |     REPEATING_CODES = _REPEATING_CODES | ||||||
|  |     SUCCESS_CODES = _SUCCESS_CODES | ||||||
|  |     ASSERT_CODES = _ASSERT_CODES | ||||||
|  |     if (flags & SRE_FLAG_IGNORECASE and | ||||||
|  |             not (flags & SRE_FLAG_LOCALE) and | ||||||
|  |             flags & SRE_FLAG_UNICODE): | ||||||
|  |         fixes = _ignorecase_fixes | ||||||
|  |     else: | ||||||
|  |         fixes = None | ||||||
|  |     for op, av in pattern: | ||||||
|  |         if op in LITERAL_CODES: | ||||||
|  |             if flags & SRE_FLAG_IGNORECASE: | ||||||
|  |                 lo = _sre.getlower(av, flags) | ||||||
|  |                 if fixes and lo in fixes: | ||||||
|  |                     emit(OPCODES[IN_IGNORE]) | ||||||
|  |                     skip = _len(code); emit(0) | ||||||
|  |                     if op is NOT_LITERAL: | ||||||
|  |                         emit(OPCODES[NEGATE]) | ||||||
|  |                     for k in (lo,) + fixes[lo]: | ||||||
|  |                         emit(OPCODES[LITERAL]) | ||||||
|  |                         emit(k) | ||||||
|  |                     emit(OPCODES[FAILURE]) | ||||||
|  |                     code[skip] = _len(code) - skip | ||||||
|  |                 else: | ||||||
|  |                     emit(OPCODES[OP_IGNORE[op]]) | ||||||
|  |                     emit(lo) | ||||||
|  |             else: | ||||||
|  |                 emit(OPCODES[op]) | ||||||
|  |                 emit(av) | ||||||
|  |         elif op is IN: | ||||||
|  |             if flags & SRE_FLAG_IGNORECASE: | ||||||
|  |                 emit(OPCODES[OP_IGNORE[op]]) | ||||||
|  |                 def fixup(literal, flags=flags): | ||||||
|  |                     return _sre.getlower(literal, flags) | ||||||
|  |             else: | ||||||
|  |                 emit(OPCODES[op]) | ||||||
|  |                 fixup = None | ||||||
|  |             skip = _len(code); emit(0) | ||||||
|  |             _compile_charset(av, flags, code, fixup, fixes) | ||||||
|  |             code[skip] = _len(code) - skip | ||||||
|  |         elif op is ANY: | ||||||
|  |             if flags & SRE_FLAG_DOTALL: | ||||||
|  |                 emit(OPCODES[ANY_ALL]) | ||||||
|  |             else: | ||||||
|  |                 emit(OPCODES[ANY]) | ||||||
|  |         elif op in REPEATING_CODES: | ||||||
|  |             if flags & SRE_FLAG_TEMPLATE: | ||||||
|  |                 raise error("internal: unsupported template operator") | ||||||
|  |             elif _simple(av) and op is not REPEAT: | ||||||
|  |                 if op is MAX_REPEAT: | ||||||
|  |                     emit(OPCODES[REPEAT_ONE]) | ||||||
|  |                 else: | ||||||
|  |                     emit(OPCODES[MIN_REPEAT_ONE]) | ||||||
|  |                 skip = _len(code); emit(0) | ||||||
|  |                 emit(av[0]) | ||||||
|  |                 emit(av[1]) | ||||||
|  |                 _compile(code, av[2], flags) | ||||||
|  |                 emit(OPCODES[SUCCESS]) | ||||||
|  |                 code[skip] = _len(code) - skip | ||||||
|  |             else: | ||||||
|  |                 emit(OPCODES[REPEAT]) | ||||||
|  |                 skip = _len(code); emit(0) | ||||||
|  |                 emit(av[0]) | ||||||
|  |                 emit(av[1]) | ||||||
|  |                 _compile(code, av[2], flags) | ||||||
|  |                 code[skip] = _len(code) - skip | ||||||
|  |                 if op is MAX_REPEAT: | ||||||
|  |                     emit(OPCODES[MAX_UNTIL]) | ||||||
|  |                 else: | ||||||
|  |                     emit(OPCODES[MIN_UNTIL]) | ||||||
|  |         elif op is SUBPATTERN: | ||||||
|  |             if av[0]: | ||||||
|  |                 emit(OPCODES[MARK]) | ||||||
|  |                 emit((av[0]-1)*2) | ||||||
|  |             # _compile_info(code, av[1], flags) | ||||||
|  |             _compile(code, av[1], flags) | ||||||
|  |             if av[0]: | ||||||
|  |                 emit(OPCODES[MARK]) | ||||||
|  |                 emit((av[0]-1)*2+1) | ||||||
|  |         elif op in SUCCESS_CODES: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |         elif op in ASSERT_CODES: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |             skip = _len(code); emit(0) | ||||||
|  |             if av[0] >= 0: | ||||||
|  |                 emit(0) # look ahead | ||||||
|  |             else: | ||||||
|  |                 lo, hi = av[1].getwidth() | ||||||
|  |                 if lo != hi: | ||||||
|  |                     raise error("look-behind requires fixed-width pattern") | ||||||
|  |                 emit(lo) # look behind | ||||||
|  |             _compile(code, av[1], flags) | ||||||
|  |             emit(OPCODES[SUCCESS]) | ||||||
|  |             code[skip] = _len(code) - skip | ||||||
|  |         elif op is CALL: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |             skip = _len(code); emit(0) | ||||||
|  |             _compile(code, av, flags) | ||||||
|  |             emit(OPCODES[SUCCESS]) | ||||||
|  |             code[skip] = _len(code) - skip | ||||||
|  |         elif op is AT: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |             if flags & SRE_FLAG_MULTILINE: | ||||||
|  |                 av = AT_MULTILINE.get(av, av) | ||||||
|  |             if flags & SRE_FLAG_LOCALE: | ||||||
|  |                 av = AT_LOCALE.get(av, av) | ||||||
|  |             elif flags & SRE_FLAG_UNICODE: | ||||||
|  |                 av = AT_UNICODE.get(av, av) | ||||||
|  |             emit(ATCODES[av]) | ||||||
|  |         elif op is BRANCH: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |             tail = [] | ||||||
|  |             tailappend = tail.append | ||||||
|  |             for av in av[1]: | ||||||
|  |                 skip = _len(code); emit(0) | ||||||
|  |                 # _compile_info(code, av, flags) | ||||||
|  |                 _compile(code, av, flags) | ||||||
|  |                 emit(OPCODES[JUMP]) | ||||||
|  |                 tailappend(_len(code)); emit(0) | ||||||
|  |                 code[skip] = _len(code) - skip | ||||||
|  |             emit(0) # end of branch | ||||||
|  |             for tail in tail: | ||||||
|  |                 code[tail] = _len(code) - tail | ||||||
|  |         elif op is CATEGORY: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |             if flags & SRE_FLAG_LOCALE: | ||||||
|  |                 av = CH_LOCALE[av] | ||||||
|  |             elif flags & SRE_FLAG_UNICODE: | ||||||
|  |                 av = CH_UNICODE[av] | ||||||
|  |             emit(CHCODES[av]) | ||||||
|  |         elif op is GROUPREF: | ||||||
|  |             if flags & SRE_FLAG_IGNORECASE: | ||||||
|  |                 emit(OPCODES[OP_IGNORE[op]]) | ||||||
|  |             else: | ||||||
|  |                 emit(OPCODES[op]) | ||||||
|  |             emit(av-1) | ||||||
|  |         elif op is GROUPREF_EXISTS: | ||||||
|  |             emit(OPCODES[op]) | ||||||
|  |             emit(av[0]-1) | ||||||
|  |             skipyes = _len(code); emit(0) | ||||||
|  |             _compile(code, av[1], flags) | ||||||
|  |             if av[2]: | ||||||
|  |                 emit(OPCODES[JUMP]) | ||||||
|  |                 skipno = _len(code); emit(0) | ||||||
|  |                 code[skipyes] = _len(code) - skipyes + 1 | ||||||
|  |                 _compile(code, av[2], flags) | ||||||
|  |                 code[skipno] = _len(code) - skipno | ||||||
|  |             else: | ||||||
|  |                 code[skipyes] = _len(code) - skipyes + 1 | ||||||
|  |         else: | ||||||
|  |             raise ValueError("unsupported operand type", op) | ||||||
|  |  | ||||||
|  | def _compile_charset(charset, flags, code, fixup=None, fixes=None): | ||||||
|  |     # compile charset subprogram | ||||||
|  |     emit = code.append | ||||||
|  |     for op, av in _optimize_charset(charset, fixup, fixes, | ||||||
|  |                                     flags & SRE_FLAG_UNICODE): | ||||||
|  |         emit(OPCODES[op]) | ||||||
|  |         if op is NEGATE: | ||||||
|  |             pass | ||||||
|  |         elif op is LITERAL: | ||||||
|  |             emit(av) | ||||||
|  |         elif op is RANGE: | ||||||
|  |             emit(av[0]) | ||||||
|  |             emit(av[1]) | ||||||
|  |         elif op is CHARSET: | ||||||
|  |             code.extend(av) | ||||||
|  |         elif op is BIGCHARSET: | ||||||
|  |             code.extend(av) | ||||||
|  |         elif op is CATEGORY: | ||||||
|  |             if flags & SRE_FLAG_LOCALE: | ||||||
|  |                 emit(CHCODES[CH_LOCALE[av]]) | ||||||
|  |             elif flags & SRE_FLAG_UNICODE: | ||||||
|  |                 emit(CHCODES[CH_UNICODE[av]]) | ||||||
|  |             else: | ||||||
|  |                 emit(CHCODES[av]) | ||||||
|  |         else: | ||||||
|  |             raise error("internal: unsupported set operator") | ||||||
|  |     emit(OPCODES[FAILURE]) | ||||||
|  |  | ||||||
|  | def _optimize_charset(charset, fixup, fixes, isunicode): | ||||||
|  |     # internal: optimize character set | ||||||
|  |     out = [] | ||||||
|  |     tail = [] | ||||||
|  |     charmap = bytearray(256) | ||||||
|  |     for op, av in charset: | ||||||
|  |         while True: | ||||||
|  |             try: | ||||||
|  |                 if op is LITERAL: | ||||||
|  |                     if fixup: | ||||||
|  |                         i = fixup(av) | ||||||
|  |                         charmap[i] = 1 | ||||||
|  |                         if fixes and i in fixes: | ||||||
|  |                             for k in fixes[i]: | ||||||
|  |                                 charmap[k] = 1 | ||||||
|  |                     else: | ||||||
|  |                         charmap[av] = 1 | ||||||
|  |                 elif op is RANGE: | ||||||
|  |                     r = range(av[0], av[1]+1) | ||||||
|  |                     if fixup: | ||||||
|  |                         r = map(fixup, r) | ||||||
|  |                     if fixup and fixes: | ||||||
|  |                         for i in r: | ||||||
|  |                             charmap[i] = 1 | ||||||
|  |                             if i in fixes: | ||||||
|  |                                 for k in fixes[i]: | ||||||
|  |                                     charmap[k] = 1 | ||||||
|  |                     else: | ||||||
|  |                         for i in r: | ||||||
|  |                             charmap[i] = 1 | ||||||
|  |                 elif op is NEGATE: | ||||||
|  |                     out.append((op, av)) | ||||||
|  |                 else: | ||||||
|  |                     tail.append((op, av)) | ||||||
|  |             except IndexError: | ||||||
|  |                 if len(charmap) == 256: | ||||||
|  |                     # character set contains non-UCS1 character codes | ||||||
|  |                     charmap += b'\0' * 0xff00 | ||||||
|  |                     continue | ||||||
|  |                 # character set contains non-BMP character codes | ||||||
|  |                 if fixup and isunicode and op is RANGE: | ||||||
|  |                     lo, hi = av | ||||||
|  |                     ranges = [av] | ||||||
|  |                     # There are only two ranges of cased astral characters: | ||||||
|  |                     # 10400-1044F (Deseret) and 118A0-118DF (Warang Citi). | ||||||
|  |                     _fixup_range(max(0x10000, lo), min(0x11fff, hi), | ||||||
|  |                                  ranges, fixup) | ||||||
|  |                     for lo, hi in ranges: | ||||||
|  |                         if lo == hi: | ||||||
|  |                             tail.append((LITERAL, hi)) | ||||||
|  |                         else: | ||||||
|  |                             tail.append((RANGE, (lo, hi))) | ||||||
|  |                 else: | ||||||
|  |                     tail.append((op, av)) | ||||||
|  |             break | ||||||
|  |  | ||||||
|  |     # compress character map | ||||||
|  |     runs = [] | ||||||
|  |     q = 0 | ||||||
|  |     while True: | ||||||
|  |         p = charmap.find(1, q) | ||||||
|  |         if p < 0: | ||||||
|  |             break | ||||||
|  |         if len(runs) >= 2: | ||||||
|  |             runs = None | ||||||
|  |             break | ||||||
|  |         q = charmap.find(0, p) | ||||||
|  |         if q < 0: | ||||||
|  |             runs.append((p, len(charmap))) | ||||||
|  |             break | ||||||
|  |         runs.append((p, q)) | ||||||
|  |     if runs is not None: | ||||||
|  |         # use literal/range | ||||||
|  |         for p, q in runs: | ||||||
|  |             if q - p == 1: | ||||||
|  |                 out.append((LITERAL, p)) | ||||||
|  |             else: | ||||||
|  |                 out.append((RANGE, (p, q - 1))) | ||||||
|  |         out += tail | ||||||
|  |         # if the case was changed or new representation is more compact | ||||||
|  |         if fixup or len(out) < len(charset): | ||||||
|  |             return out | ||||||
|  |         # else original character set is good enough | ||||||
|  |         return charset | ||||||
|  |  | ||||||
|  |     # use bitmap | ||||||
|  |     if len(charmap) == 256: | ||||||
|  |         data = _mk_bitmap(charmap) | ||||||
|  |         out.append((CHARSET, data)) | ||||||
|  |         out += tail | ||||||
|  |         return out | ||||||
|  |  | ||||||
|  |     # To represent a big charset, first a bitmap of all characters in the | ||||||
|  |     # set is constructed. Then, this bitmap is sliced into chunks of 256 | ||||||
|  |     # characters, duplicate chunks are eliminated, and each chunk is | ||||||
|  |     # given a number. In the compiled expression, the charset is | ||||||
|  |     # represented by a 32-bit word sequence, consisting of one word for | ||||||
|  |     # the number of different chunks, a sequence of 256 bytes (64 words) | ||||||
|  |     # of chunk numbers indexed by their original chunk position, and a | ||||||
|  |     # sequence of 256-bit chunks (8 words each). | ||||||
|  |  | ||||||
|  |     # Compression is normally good: in a typical charset, large ranges of | ||||||
|  |     # Unicode will be either completely excluded (e.g. if only cyrillic | ||||||
|  |     # letters are to be matched), or completely included (e.g. if large | ||||||
|  |     # subranges of Kanji match). These ranges will be represented by | ||||||
|  |     # chunks of all one-bits or all zero-bits. | ||||||
|  |  | ||||||
|  |     # Matching can be also done efficiently: the more significant byte of | ||||||
|  |     # the Unicode character is an index into the chunk number, and the | ||||||
|  |     # less significant byte is a bit index in the chunk (just like the | ||||||
|  |     # CHARSET matching). | ||||||
|  |  | ||||||
|  |     charmap = bytes(charmap) # should be hashable | ||||||
|  |     comps = {} | ||||||
|  |     mapping = bytearray(256) | ||||||
|  |     block = 0 | ||||||
|  |     data = bytearray() | ||||||
|  |     for i in range(0, 65536, 256): | ||||||
|  |         chunk = charmap[i: i + 256] | ||||||
|  |         if chunk in comps: | ||||||
|  |             mapping[i // 256] = comps[chunk] | ||||||
|  |         else: | ||||||
|  |             mapping[i // 256] = comps[chunk] = block | ||||||
|  |             block += 1 | ||||||
|  |             data += chunk | ||||||
|  |     data = _mk_bitmap(data) | ||||||
|  |     data[0:0] = [block] + _bytes_to_codes(mapping) | ||||||
|  |     out.append((BIGCHARSET, data)) | ||||||
|  |     out += tail | ||||||
|  |     return out | ||||||
|  |  | ||||||
|  | def _fixup_range(lo, hi, ranges, fixup): | ||||||
|  |     for i in map(fixup, range(lo, hi+1)): | ||||||
|  |         for k, (lo, hi) in enumerate(ranges): | ||||||
|  |             if i < lo: | ||||||
|  |                 if l == lo - 1: | ||||||
|  |                     ranges[k] = (i, hi) | ||||||
|  |                 else: | ||||||
|  |                     ranges.insert(k, (i, i)) | ||||||
|  |                 break | ||||||
|  |             elif i > hi: | ||||||
|  |                 if i == hi + 1: | ||||||
|  |                     ranges[k] = (lo, i) | ||||||
|  |                     break | ||||||
|  |             else: | ||||||
|  |                 break | ||||||
|  |         else: | ||||||
|  |             ranges.append((i, i)) | ||||||
|  |  | ||||||
|  | _CODEBITS = _sre.CODESIZE * 8 | ||||||
|  | _BITS_TRANS = b'0' + b'1' * 255 | ||||||
|  | def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): | ||||||
|  |     s = bits.translate(_BITS_TRANS)[::-1] | ||||||
|  |     return [_int(s[i - _CODEBITS: i], 2) | ||||||
|  |             for i in range(len(s), 0, -_CODEBITS)] | ||||||
|  |  | ||||||
|  | def _bytes_to_codes(b): | ||||||
|  |     # Convert block indices to word array | ||||||
|  |     a = memoryview(b).cast('I') | ||||||
|  |     assert a.itemsize == _sre.CODESIZE | ||||||
|  |     assert len(a) * a.itemsize == len(b) | ||||||
|  |     return a.tolist() | ||||||
|  |  | ||||||
|  | def _simple(av): | ||||||
|  |     # check if av is a "simple" operator | ||||||
|  |     lo, hi = av[2].getwidth() | ||||||
|  |     return lo == hi == 1 and av[2][0][0] != SUBPATTERN | ||||||
|  |  | ||||||
|  | def _generate_overlap_table(prefix): | ||||||
|  |     """ | ||||||
|  |     Generate an overlap table for the following prefix. | ||||||
|  |     An overlap table is a table of the same size as the prefix which | ||||||
|  |     informs about the potential self-overlap for each index in the prefix: | ||||||
|  |     - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] | ||||||
|  |     - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with | ||||||
|  |       prefix[0:k] | ||||||
|  |     """ | ||||||
|  |     table = [0] * len(prefix) | ||||||
|  |     for i in range(1, len(prefix)): | ||||||
|  |         idx = table[i - 1] | ||||||
|  |         while prefix[i] != prefix[idx]: | ||||||
|  |             if idx == 0: | ||||||
|  |                 table[i] = 0 | ||||||
|  |                 break | ||||||
|  |             idx = table[idx - 1] | ||||||
|  |         else: | ||||||
|  |             table[i] = idx + 1 | ||||||
|  |     return table | ||||||
|  |  | ||||||
|  | def _compile_info(code, pattern, flags): | ||||||
|  |     # internal: compile an info block.  in the current version, | ||||||
|  |     # this contains min/max pattern width, and an optional literal | ||||||
|  |     # prefix or a character map | ||||||
|  |     lo, hi = pattern.getwidth() | ||||||
|  |     if lo == 0: | ||||||
|  |         return # not worth it | ||||||
|  |     # look for a literal prefix | ||||||
|  |     prefix = [] | ||||||
|  |     prefixappend = prefix.append | ||||||
|  |     prefix_skip = 0 | ||||||
|  |     charset = [] # not used | ||||||
|  |     charsetappend = charset.append | ||||||
|  |     if not (flags & SRE_FLAG_IGNORECASE): | ||||||
|  |         # look for literal prefix | ||||||
|  |         for op, av in pattern.data: | ||||||
|  |             if op is LITERAL: | ||||||
|  |                 if len(prefix) == prefix_skip: | ||||||
|  |                     prefix_skip = prefix_skip + 1 | ||||||
|  |                 prefixappend(av) | ||||||
|  |             elif op is SUBPATTERN and len(av[1]) == 1: | ||||||
|  |                 op, av = av[1][0] | ||||||
|  |                 if op is LITERAL: | ||||||
|  |                     prefixappend(av) | ||||||
|  |                 else: | ||||||
|  |                     break | ||||||
|  |             else: | ||||||
|  |                 break | ||||||
|  |         # if no prefix, look for charset prefix | ||||||
|  |         if not prefix and pattern.data: | ||||||
|  |             op, av = pattern.data[0] | ||||||
|  |             if op is SUBPATTERN and av[1]: | ||||||
|  |                 op, av = av[1][0] | ||||||
|  |                 if op is LITERAL: | ||||||
|  |                     charsetappend((op, av)) | ||||||
|  |                 elif op is BRANCH: | ||||||
|  |                     c = [] | ||||||
|  |                     cappend = c.append | ||||||
|  |                     for p in av[1]: | ||||||
|  |                         if not p: | ||||||
|  |                             break | ||||||
|  |                         op, av = p[0] | ||||||
|  |                         if op is LITERAL: | ||||||
|  |                             cappend((op, av)) | ||||||
|  |                         else: | ||||||
|  |                             break | ||||||
|  |                     else: | ||||||
|  |                         charset = c | ||||||
|  |             elif op is BRANCH: | ||||||
|  |                 c = [] | ||||||
|  |                 cappend = c.append | ||||||
|  |                 for p in av[1]: | ||||||
|  |                     if not p: | ||||||
|  |                         break | ||||||
|  |                     op, av = p[0] | ||||||
|  |                     if op is LITERAL: | ||||||
|  |                         cappend((op, av)) | ||||||
|  |                     else: | ||||||
|  |                         break | ||||||
|  |                 else: | ||||||
|  |                     charset = c | ||||||
|  |             elif op is IN: | ||||||
|  |                 charset = av | ||||||
|  | ##     if prefix: | ||||||
|  | ##         print "*** PREFIX", prefix, prefix_skip | ||||||
|  | ##     if charset: | ||||||
|  | ##         print "*** CHARSET", charset | ||||||
|  |     # add an info block | ||||||
|  |     emit = code.append | ||||||
|  |     emit(OPCODES[INFO]) | ||||||
|  |     skip = len(code); emit(0) | ||||||
|  |     # literal flag | ||||||
|  |     mask = 0 | ||||||
|  |     if prefix: | ||||||
|  |         mask = SRE_INFO_PREFIX | ||||||
|  |         if len(prefix) == prefix_skip == len(pattern.data): | ||||||
|  |             mask = mask + SRE_INFO_LITERAL | ||||||
|  |     elif charset: | ||||||
|  |         mask = mask + SRE_INFO_CHARSET | ||||||
|  |     emit(mask) | ||||||
|  |     # pattern length | ||||||
|  |     if lo < MAXCODE: | ||||||
|  |         emit(lo) | ||||||
|  |     else: | ||||||
|  |         emit(MAXCODE) | ||||||
|  |         prefix = prefix[:MAXCODE] | ||||||
|  |     if hi < MAXCODE: | ||||||
|  |         emit(hi) | ||||||
|  |     else: | ||||||
|  |         emit(0) | ||||||
|  |     # add literal prefix | ||||||
|  |     if prefix: | ||||||
|  |         emit(len(prefix)) # length | ||||||
|  |         emit(prefix_skip) # skip | ||||||
|  |         code.extend(prefix) | ||||||
|  |         # generate overlap table | ||||||
|  |         code.extend(_generate_overlap_table(prefix)) | ||||||
|  |     elif charset: | ||||||
|  |         _compile_charset(charset, flags, code) | ||||||
|  |     code[skip] = len(code) - skip | ||||||
|  |  | ||||||
|  | def isstring(obj): | ||||||
|  |     return isinstance(obj, (str, bytes)) | ||||||
|  |  | ||||||
|  | def _code(p, flags): | ||||||
|  |  | ||||||
|  |     flags = p.pattern.flags | flags | ||||||
|  |     code = [] | ||||||
|  |  | ||||||
|  |     # compile info block | ||||||
|  |     _compile_info(code, p, flags) | ||||||
|  |  | ||||||
|  |     # compile the pattern | ||||||
|  |     _compile(code, p.data, flags) | ||||||
|  |  | ||||||
|  |     code.append(OPCODES[SUCCESS]) | ||||||
|  |  | ||||||
|  |     return code | ||||||
|  |  | ||||||
|  | def compile(p, flags=0): | ||||||
|  |     # internal: convert pattern list to internal format | ||||||
|  |  | ||||||
|  |     if isstring(p): | ||||||
|  |         pattern = p | ||||||
|  |         p = sre_parse.parse(p, flags) | ||||||
|  |     else: | ||||||
|  |         pattern = None | ||||||
|  |  | ||||||
|  |     code = _code(p, flags) | ||||||
|  |  | ||||||
|  |     # print code | ||||||
|  |  | ||||||
|  |     # XXX: <fl> get rid of this limitation! | ||||||
|  |     if p.pattern.groups > 100: | ||||||
|  |         raise AssertionError( | ||||||
|  |             "sorry, but this version only supports 100 named groups" | ||||||
|  |             ) | ||||||
|  |  | ||||||
|  |     # map in either direction | ||||||
|  |     groupindex = p.pattern.groupdict | ||||||
|  |     indexgroup = [None] * p.pattern.groups | ||||||
|  |     for k, i in groupindex.items(): | ||||||
|  |         indexgroup[i] = k | ||||||
|  |  | ||||||
|  |     return _sre.compile( | ||||||
|  |         pattern, flags | p.pattern.flags, code, | ||||||
|  |         p.pattern.groups-1, | ||||||
|  |         groupindex, indexgroup | ||||||
|  |         ) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/sre_constants.py |  | ||||||
							
								
								
									
										261
									
								
								v1/flask/lib/python3.4/sre_constants.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										261
									
								
								v1/flask/lib/python3.4/sre_constants.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,261 @@ | |||||||
|  | # | ||||||
|  | # Secret Labs' Regular Expression Engine | ||||||
|  | # | ||||||
|  | # various symbols used by the regular expression engine. | ||||||
|  | # run this script to update the _sre include files! | ||||||
|  | # | ||||||
|  | # Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved. | ||||||
|  | # | ||||||
|  | # See the sre.py file for information on usage and redistribution. | ||||||
|  | # | ||||||
|  |  | ||||||
|  | """Internal support module for sre""" | ||||||
|  |  | ||||||
|  | # update when constants are added or removed | ||||||
|  |  | ||||||
|  | MAGIC = 20031017 | ||||||
|  |  | ||||||
|  | from _sre import MAXREPEAT | ||||||
|  |  | ||||||
|  | # SRE standard exception (access as sre.error) | ||||||
|  | # should this really be here? | ||||||
|  |  | ||||||
|  | class error(Exception): | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  | # operators | ||||||
|  |  | ||||||
|  | FAILURE = "failure" | ||||||
|  | SUCCESS = "success" | ||||||
|  |  | ||||||
|  | ANY = "any" | ||||||
|  | ANY_ALL = "any_all" | ||||||
|  | ASSERT = "assert" | ||||||
|  | ASSERT_NOT = "assert_not" | ||||||
|  | AT = "at" | ||||||
|  | BIGCHARSET = "bigcharset" | ||||||
|  | BRANCH = "branch" | ||||||
|  | CALL = "call" | ||||||
|  | CATEGORY = "category" | ||||||
|  | CHARSET = "charset" | ||||||
|  | GROUPREF = "groupref" | ||||||
|  | GROUPREF_IGNORE = "groupref_ignore" | ||||||
|  | GROUPREF_EXISTS = "groupref_exists" | ||||||
|  | IN = "in" | ||||||
|  | IN_IGNORE = "in_ignore" | ||||||
|  | INFO = "info" | ||||||
|  | JUMP = "jump" | ||||||
|  | LITERAL = "literal" | ||||||
|  | LITERAL_IGNORE = "literal_ignore" | ||||||
|  | MARK = "mark" | ||||||
|  | MAX_REPEAT = "max_repeat" | ||||||
|  | MAX_UNTIL = "max_until" | ||||||
|  | MIN_REPEAT = "min_repeat" | ||||||
|  | MIN_UNTIL = "min_until" | ||||||
|  | NEGATE = "negate" | ||||||
|  | NOT_LITERAL = "not_literal" | ||||||
|  | NOT_LITERAL_IGNORE = "not_literal_ignore" | ||||||
|  | RANGE = "range" | ||||||
|  | REPEAT = "repeat" | ||||||
|  | REPEAT_ONE = "repeat_one" | ||||||
|  | SUBPATTERN = "subpattern" | ||||||
|  | MIN_REPEAT_ONE = "min_repeat_one" | ||||||
|  |  | ||||||
|  | # positions | ||||||
|  | AT_BEGINNING = "at_beginning" | ||||||
|  | AT_BEGINNING_LINE = "at_beginning_line" | ||||||
|  | AT_BEGINNING_STRING = "at_beginning_string" | ||||||
|  | AT_BOUNDARY = "at_boundary" | ||||||
|  | AT_NON_BOUNDARY = "at_non_boundary" | ||||||
|  | AT_END = "at_end" | ||||||
|  | AT_END_LINE = "at_end_line" | ||||||
|  | AT_END_STRING = "at_end_string" | ||||||
|  | AT_LOC_BOUNDARY = "at_loc_boundary" | ||||||
|  | AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" | ||||||
|  | AT_UNI_BOUNDARY = "at_uni_boundary" | ||||||
|  | AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" | ||||||
|  |  | ||||||
|  | # categories | ||||||
|  | CATEGORY_DIGIT = "category_digit" | ||||||
|  | CATEGORY_NOT_DIGIT = "category_not_digit" | ||||||
|  | CATEGORY_SPACE = "category_space" | ||||||
|  | CATEGORY_NOT_SPACE = "category_not_space" | ||||||
|  | CATEGORY_WORD = "category_word" | ||||||
|  | CATEGORY_NOT_WORD = "category_not_word" | ||||||
|  | CATEGORY_LINEBREAK = "category_linebreak" | ||||||
|  | CATEGORY_NOT_LINEBREAK = "category_not_linebreak" | ||||||
|  | CATEGORY_LOC_WORD = "category_loc_word" | ||||||
|  | CATEGORY_LOC_NOT_WORD = "category_loc_not_word" | ||||||
|  | CATEGORY_UNI_DIGIT = "category_uni_digit" | ||||||
|  | CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" | ||||||
|  | CATEGORY_UNI_SPACE = "category_uni_space" | ||||||
|  | CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" | ||||||
|  | CATEGORY_UNI_WORD = "category_uni_word" | ||||||
|  | CATEGORY_UNI_NOT_WORD = "category_uni_not_word" | ||||||
|  | CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" | ||||||
|  | CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" | ||||||
|  |  | ||||||
|  | OPCODES = [ | ||||||
|  |  | ||||||
|  |     # failure=0 success=1 (just because it looks better that way :-) | ||||||
|  |     FAILURE, SUCCESS, | ||||||
|  |  | ||||||
|  |     ANY, ANY_ALL, | ||||||
|  |     ASSERT, ASSERT_NOT, | ||||||
|  |     AT, | ||||||
|  |     BRANCH, | ||||||
|  |     CALL, | ||||||
|  |     CATEGORY, | ||||||
|  |     CHARSET, BIGCHARSET, | ||||||
|  |     GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, | ||||||
|  |     IN, IN_IGNORE, | ||||||
|  |     INFO, | ||||||
|  |     JUMP, | ||||||
|  |     LITERAL, LITERAL_IGNORE, | ||||||
|  |     MARK, | ||||||
|  |     MAX_UNTIL, | ||||||
|  |     MIN_UNTIL, | ||||||
|  |     NOT_LITERAL, NOT_LITERAL_IGNORE, | ||||||
|  |     NEGATE, | ||||||
|  |     RANGE, | ||||||
|  |     REPEAT, | ||||||
|  |     REPEAT_ONE, | ||||||
|  |     SUBPATTERN, | ||||||
|  |     MIN_REPEAT_ONE | ||||||
|  |  | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | ATCODES = [ | ||||||
|  |     AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, | ||||||
|  |     AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, | ||||||
|  |     AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, | ||||||
|  |     AT_UNI_NON_BOUNDARY | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | CHCODES = [ | ||||||
|  |     CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, | ||||||
|  |     CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, | ||||||
|  |     CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, | ||||||
|  |     CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, | ||||||
|  |     CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, | ||||||
|  |     CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, | ||||||
|  |     CATEGORY_UNI_NOT_LINEBREAK | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | def makedict(list): | ||||||
|  |     d = {} | ||||||
|  |     i = 0 | ||||||
|  |     for item in list: | ||||||
|  |         d[item] = i | ||||||
|  |         i = i + 1 | ||||||
|  |     return d | ||||||
|  |  | ||||||
|  | OPCODES = makedict(OPCODES) | ||||||
|  | ATCODES = makedict(ATCODES) | ||||||
|  | CHCODES = makedict(CHCODES) | ||||||
|  |  | ||||||
|  | # replacement operations for "ignore case" mode | ||||||
|  | OP_IGNORE = { | ||||||
|  |     GROUPREF: GROUPREF_IGNORE, | ||||||
|  |     IN: IN_IGNORE, | ||||||
|  |     LITERAL: LITERAL_IGNORE, | ||||||
|  |     NOT_LITERAL: NOT_LITERAL_IGNORE | ||||||
|  | } | ||||||
|  |  | ||||||
|  | AT_MULTILINE = { | ||||||
|  |     AT_BEGINNING: AT_BEGINNING_LINE, | ||||||
|  |     AT_END: AT_END_LINE | ||||||
|  | } | ||||||
|  |  | ||||||
|  | AT_LOCALE = { | ||||||
|  |     AT_BOUNDARY: AT_LOC_BOUNDARY, | ||||||
|  |     AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY | ||||||
|  | } | ||||||
|  |  | ||||||
|  | AT_UNICODE = { | ||||||
|  |     AT_BOUNDARY: AT_UNI_BOUNDARY, | ||||||
|  |     AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY | ||||||
|  | } | ||||||
|  |  | ||||||
|  | CH_LOCALE = { | ||||||
|  |     CATEGORY_DIGIT: CATEGORY_DIGIT, | ||||||
|  |     CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, | ||||||
|  |     CATEGORY_SPACE: CATEGORY_SPACE, | ||||||
|  |     CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, | ||||||
|  |     CATEGORY_WORD: CATEGORY_LOC_WORD, | ||||||
|  |     CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, | ||||||
|  |     CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, | ||||||
|  |     CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK | ||||||
|  | } | ||||||
|  |  | ||||||
|  | CH_UNICODE = { | ||||||
|  |     CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, | ||||||
|  |     CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, | ||||||
|  |     CATEGORY_SPACE: CATEGORY_UNI_SPACE, | ||||||
|  |     CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, | ||||||
|  |     CATEGORY_WORD: CATEGORY_UNI_WORD, | ||||||
|  |     CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, | ||||||
|  |     CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, | ||||||
|  |     CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK | ||||||
|  | } | ||||||
|  |  | ||||||
|  | # flags | ||||||
|  | SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) | ||||||
|  | SRE_FLAG_IGNORECASE = 2 # case insensitive | ||||||
|  | SRE_FLAG_LOCALE = 4 # honour system locale | ||||||
|  | SRE_FLAG_MULTILINE = 8 # treat target as multiline string | ||||||
|  | SRE_FLAG_DOTALL = 16 # treat target as a single string | ||||||
|  | SRE_FLAG_UNICODE = 32 # use unicode "locale" | ||||||
|  | SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments | ||||||
|  | SRE_FLAG_DEBUG = 128 # debugging | ||||||
|  | SRE_FLAG_ASCII = 256 # use ascii "locale" | ||||||
|  |  | ||||||
|  | # flags for INFO primitive | ||||||
|  | SRE_INFO_PREFIX = 1 # has prefix | ||||||
|  | SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) | ||||||
|  | SRE_INFO_CHARSET = 4 # pattern starts with character from given set | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     def dump(f, d, prefix): | ||||||
|  |         items = sorted(d.items(), key=lambda a: a[1]) | ||||||
|  |         for k, v in items: | ||||||
|  |             f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) | ||||||
|  |     f = open("sre_constants.h", "w") | ||||||
|  |     f.write("""\ | ||||||
|  | /* | ||||||
|  |  * Secret Labs' Regular Expression Engine | ||||||
|  |  * | ||||||
|  |  * regular expression matching engine | ||||||
|  |  * | ||||||
|  |  * NOTE: This file is generated by sre_constants.py.  If you need | ||||||
|  |  * to change anything in here, edit sre_constants.py and run it. | ||||||
|  |  * | ||||||
|  |  * Copyright (c) 1997-2001 by Secret Labs AB.  All rights reserved. | ||||||
|  |  * | ||||||
|  |  * See the _sre.c file for information on usage and redistribution. | ||||||
|  |  */ | ||||||
|  |  | ||||||
|  | """) | ||||||
|  |  | ||||||
|  |     f.write("#define SRE_MAGIC %d\n" % MAGIC) | ||||||
|  |  | ||||||
|  |     dump(f, OPCODES, "SRE_OP") | ||||||
|  |     dump(f, ATCODES, "SRE") | ||||||
|  |     dump(f, CHCODES, "SRE") | ||||||
|  |  | ||||||
|  |     f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) | ||||||
|  |     f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) | ||||||
|  |     f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) | ||||||
|  |     f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) | ||||||
|  |     f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) | ||||||
|  |     f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) | ||||||
|  |     f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) | ||||||
|  |     f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG) | ||||||
|  |     f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII) | ||||||
|  |  | ||||||
|  |     f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) | ||||||
|  |     f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) | ||||||
|  |     f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) | ||||||
|  |  | ||||||
|  |     f.close() | ||||||
|  |     print("done") | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/sre_parse.py |  | ||||||
							
								
								
									
										891
									
								
								v1/flask/lib/python3.4/sre_parse.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										891
									
								
								v1/flask/lib/python3.4/sre_parse.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,891 @@ | |||||||
|  | # | ||||||
|  | # Secret Labs' Regular Expression Engine | ||||||
|  | # | ||||||
|  | # convert re-style regular expression to sre pattern | ||||||
|  | # | ||||||
|  | # Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved. | ||||||
|  | # | ||||||
|  | # See the sre.py file for information on usage and redistribution. | ||||||
|  | # | ||||||
|  |  | ||||||
|  | """Internal support module for sre""" | ||||||
|  |  | ||||||
|  | # XXX: show string offset and offending character for all errors | ||||||
|  |  | ||||||
|  | from sre_constants import * | ||||||
|  | from _sre import MAXREPEAT | ||||||
|  |  | ||||||
|  | SPECIAL_CHARS = ".\\[{()*+?^$|" | ||||||
|  | REPEAT_CHARS = "*+?{" | ||||||
|  |  | ||||||
|  | DIGITS = set("0123456789") | ||||||
|  |  | ||||||
|  | OCTDIGITS = set("01234567") | ||||||
|  | HEXDIGITS = set("0123456789abcdefABCDEF") | ||||||
|  |  | ||||||
|  | WHITESPACE = set(" \t\n\r\v\f") | ||||||
|  |  | ||||||
|  | ESCAPES = { | ||||||
|  |     r"\a": (LITERAL, ord("\a")), | ||||||
|  |     r"\b": (LITERAL, ord("\b")), | ||||||
|  |     r"\f": (LITERAL, ord("\f")), | ||||||
|  |     r"\n": (LITERAL, ord("\n")), | ||||||
|  |     r"\r": (LITERAL, ord("\r")), | ||||||
|  |     r"\t": (LITERAL, ord("\t")), | ||||||
|  |     r"\v": (LITERAL, ord("\v")), | ||||||
|  |     r"\\": (LITERAL, ord("\\")) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | CATEGORIES = { | ||||||
|  |     r"\A": (AT, AT_BEGINNING_STRING), # start of string | ||||||
|  |     r"\b": (AT, AT_BOUNDARY), | ||||||
|  |     r"\B": (AT, AT_NON_BOUNDARY), | ||||||
|  |     r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), | ||||||
|  |     r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), | ||||||
|  |     r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), | ||||||
|  |     r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), | ||||||
|  |     r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), | ||||||
|  |     r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), | ||||||
|  |     r"\Z": (AT, AT_END_STRING), # end of string | ||||||
|  | } | ||||||
|  |  | ||||||
|  | FLAGS = { | ||||||
|  |     # standard flags | ||||||
|  |     "i": SRE_FLAG_IGNORECASE, | ||||||
|  |     "L": SRE_FLAG_LOCALE, | ||||||
|  |     "m": SRE_FLAG_MULTILINE, | ||||||
|  |     "s": SRE_FLAG_DOTALL, | ||||||
|  |     "x": SRE_FLAG_VERBOSE, | ||||||
|  |     # extensions | ||||||
|  |     "a": SRE_FLAG_ASCII, | ||||||
|  |     "t": SRE_FLAG_TEMPLATE, | ||||||
|  |     "u": SRE_FLAG_UNICODE, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | class Pattern: | ||||||
|  |     # master pattern object.  keeps track of global attributes | ||||||
|  |     def __init__(self): | ||||||
|  |         self.flags = 0 | ||||||
|  |         self.open = [] | ||||||
|  |         self.groups = 1 | ||||||
|  |         self.groupdict = {} | ||||||
|  |         self.lookbehind = 0 | ||||||
|  |  | ||||||
|  |     def opengroup(self, name=None): | ||||||
|  |         gid = self.groups | ||||||
|  |         self.groups = gid + 1 | ||||||
|  |         if name is not None: | ||||||
|  |             ogid = self.groupdict.get(name, None) | ||||||
|  |             if ogid is not None: | ||||||
|  |                 raise error("redefinition of group name %s as group %d; " | ||||||
|  |                             "was group %d" % (repr(name), gid,  ogid)) | ||||||
|  |             self.groupdict[name] = gid | ||||||
|  |         self.open.append(gid) | ||||||
|  |         return gid | ||||||
|  |     def closegroup(self, gid): | ||||||
|  |         self.open.remove(gid) | ||||||
|  |     def checkgroup(self, gid): | ||||||
|  |         return gid < self.groups and gid not in self.open | ||||||
|  |  | ||||||
|  | class SubPattern: | ||||||
|  |     # a subpattern, in intermediate form | ||||||
|  |     def __init__(self, pattern, data=None): | ||||||
|  |         self.pattern = pattern | ||||||
|  |         if data is None: | ||||||
|  |             data = [] | ||||||
|  |         self.data = data | ||||||
|  |         self.width = None | ||||||
|  |     def dump(self, level=0): | ||||||
|  |         nl = True | ||||||
|  |         seqtypes = (tuple, list) | ||||||
|  |         for op, av in self.data: | ||||||
|  |             print(level*"  " + op, end='') | ||||||
|  |             if op == IN: | ||||||
|  |                 # member sublanguage | ||||||
|  |                 print() | ||||||
|  |                 for op, a in av: | ||||||
|  |                     print((level+1)*"  " + op, a) | ||||||
|  |             elif op == BRANCH: | ||||||
|  |                 print() | ||||||
|  |                 for i, a in enumerate(av[1]): | ||||||
|  |                     if i: | ||||||
|  |                         print(level*"  " + "or") | ||||||
|  |                     a.dump(level+1) | ||||||
|  |             elif op == GROUPREF_EXISTS: | ||||||
|  |                 condgroup, item_yes, item_no = av | ||||||
|  |                 print('', condgroup) | ||||||
|  |                 item_yes.dump(level+1) | ||||||
|  |                 if item_no: | ||||||
|  |                     print(level*"  " + "else") | ||||||
|  |                     item_no.dump(level+1) | ||||||
|  |             elif isinstance(av, seqtypes): | ||||||
|  |                 nl = False | ||||||
|  |                 for a in av: | ||||||
|  |                     if isinstance(a, SubPattern): | ||||||
|  |                         if not nl: | ||||||
|  |                             print() | ||||||
|  |                         a.dump(level+1) | ||||||
|  |                         nl = True | ||||||
|  |                     else: | ||||||
|  |                         if not nl: | ||||||
|  |                             print(' ', end='') | ||||||
|  |                         print(a, end='') | ||||||
|  |                         nl = False | ||||||
|  |                 if not nl: | ||||||
|  |                     print() | ||||||
|  |             else: | ||||||
|  |                 print('', av) | ||||||
|  |     def __repr__(self): | ||||||
|  |         return repr(self.data) | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self.data) | ||||||
|  |     def __delitem__(self, index): | ||||||
|  |         del self.data[index] | ||||||
|  |     def __getitem__(self, index): | ||||||
|  |         if isinstance(index, slice): | ||||||
|  |             return SubPattern(self.pattern, self.data[index]) | ||||||
|  |         return self.data[index] | ||||||
|  |     def __setitem__(self, index, code): | ||||||
|  |         self.data[index] = code | ||||||
|  |     def insert(self, index, code): | ||||||
|  |         self.data.insert(index, code) | ||||||
|  |     def append(self, code): | ||||||
|  |         self.data.append(code) | ||||||
|  |     def getwidth(self): | ||||||
|  |         # determine the width (min, max) for this subpattern | ||||||
|  |         if self.width: | ||||||
|  |             return self.width | ||||||
|  |         lo = hi = 0 | ||||||
|  |         UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY) | ||||||
|  |         REPEATCODES = (MIN_REPEAT, MAX_REPEAT) | ||||||
|  |         for op, av in self.data: | ||||||
|  |             if op is BRANCH: | ||||||
|  |                 i = MAXREPEAT - 1 | ||||||
|  |                 j = 0 | ||||||
|  |                 for av in av[1]: | ||||||
|  |                     l, h = av.getwidth() | ||||||
|  |                     i = min(i, l) | ||||||
|  |                     j = max(j, h) | ||||||
|  |                 lo = lo + i | ||||||
|  |                 hi = hi + j | ||||||
|  |             elif op is CALL: | ||||||
|  |                 i, j = av.getwidth() | ||||||
|  |                 lo = lo + i | ||||||
|  |                 hi = hi + j | ||||||
|  |             elif op is SUBPATTERN: | ||||||
|  |                 i, j = av[1].getwidth() | ||||||
|  |                 lo = lo + i | ||||||
|  |                 hi = hi + j | ||||||
|  |             elif op in REPEATCODES: | ||||||
|  |                 i, j = av[2].getwidth() | ||||||
|  |                 lo = lo + i * av[0] | ||||||
|  |                 hi = hi + j * av[1] | ||||||
|  |             elif op in UNITCODES: | ||||||
|  |                 lo = lo + 1 | ||||||
|  |                 hi = hi + 1 | ||||||
|  |             elif op == SUCCESS: | ||||||
|  |                 break | ||||||
|  |         self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT) | ||||||
|  |         return self.width | ||||||
|  |  | ||||||
|  | class Tokenizer: | ||||||
|  |     def __init__(self, string): | ||||||
|  |         self.istext = isinstance(string, str) | ||||||
|  |         self.string = string | ||||||
|  |         self.index = 0 | ||||||
|  |         self.__next() | ||||||
|  |     def __next(self): | ||||||
|  |         if self.index >= len(self.string): | ||||||
|  |             self.next = None | ||||||
|  |             return | ||||||
|  |         char = self.string[self.index:self.index+1] | ||||||
|  |         # Special case for the str8, since indexing returns a integer | ||||||
|  |         # XXX This is only needed for test_bug_926075 in test_re.py | ||||||
|  |         if char and not self.istext: | ||||||
|  |             char = chr(char[0]) | ||||||
|  |         if char == "\\": | ||||||
|  |             try: | ||||||
|  |                 c = self.string[self.index + 1] | ||||||
|  |             except IndexError: | ||||||
|  |                 raise error("bogus escape (end of line)") | ||||||
|  |             if not self.istext: | ||||||
|  |                 c = chr(c) | ||||||
|  |             char = char + c | ||||||
|  |         self.index = self.index + len(char) | ||||||
|  |         self.next = char | ||||||
|  |     def match(self, char, skip=1): | ||||||
|  |         if char == self.next: | ||||||
|  |             if skip: | ||||||
|  |                 self.__next() | ||||||
|  |             return 1 | ||||||
|  |         return 0 | ||||||
|  |     def get(self): | ||||||
|  |         this = self.next | ||||||
|  |         self.__next() | ||||||
|  |         return this | ||||||
|  |     def getwhile(self, n, charset): | ||||||
|  |         result = '' | ||||||
|  |         for _ in range(n): | ||||||
|  |             c = self.next | ||||||
|  |             if c not in charset: | ||||||
|  |                 break | ||||||
|  |             result += c | ||||||
|  |             self.__next() | ||||||
|  |         return result | ||||||
|  |     def tell(self): | ||||||
|  |         return self.index, self.next | ||||||
|  |     def seek(self, index): | ||||||
|  |         self.index, self.next = index | ||||||
|  |  | ||||||
|  | # The following three functions are not used in this module anymore, but we keep | ||||||
|  | # them here (with DeprecationWarnings) for backwards compatibility. | ||||||
|  |  | ||||||
|  | def isident(char): | ||||||
|  |     import warnings | ||||||
|  |     warnings.warn('sre_parse.isident() will be removed in 3.5', | ||||||
|  |                   DeprecationWarning, stacklevel=2) | ||||||
|  |     return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_" | ||||||
|  |  | ||||||
|  | def isdigit(char): | ||||||
|  |     import warnings | ||||||
|  |     warnings.warn('sre_parse.isdigit() will be removed in 3.5', | ||||||
|  |                   DeprecationWarning, stacklevel=2) | ||||||
|  |     return "0" <= char <= "9" | ||||||
|  |  | ||||||
|  | def isname(name): | ||||||
|  |     import warnings | ||||||
|  |     warnings.warn('sre_parse.isname() will be removed in 3.5', | ||||||
|  |                   DeprecationWarning, stacklevel=2) | ||||||
|  |     # check that group name is a valid string | ||||||
|  |     if not isident(name[0]): | ||||||
|  |         return False | ||||||
|  |     for char in name[1:]: | ||||||
|  |         if not isident(char) and not isdigit(char): | ||||||
|  |             return False | ||||||
|  |     return True | ||||||
|  |  | ||||||
|  | def _class_escape(source, escape): | ||||||
|  |     # handle escape code inside character class | ||||||
|  |     code = ESCAPES.get(escape) | ||||||
|  |     if code: | ||||||
|  |         return code | ||||||
|  |     code = CATEGORIES.get(escape) | ||||||
|  |     if code and code[0] == IN: | ||||||
|  |         return code | ||||||
|  |     try: | ||||||
|  |         c = escape[1:2] | ||||||
|  |         if c == "x": | ||||||
|  |             # hexadecimal escape (exactly two digits) | ||||||
|  |             escape += source.getwhile(2, HEXDIGITS) | ||||||
|  |             if len(escape) != 4: | ||||||
|  |                 raise ValueError | ||||||
|  |             return LITERAL, int(escape[2:], 16) & 0xff | ||||||
|  |         elif c == "u" and source.istext: | ||||||
|  |             # unicode escape (exactly four digits) | ||||||
|  |             escape += source.getwhile(4, HEXDIGITS) | ||||||
|  |             if len(escape) != 6: | ||||||
|  |                 raise ValueError | ||||||
|  |             return LITERAL, int(escape[2:], 16) | ||||||
|  |         elif c == "U" and source.istext: | ||||||
|  |             # unicode escape (exactly eight digits) | ||||||
|  |             escape += source.getwhile(8, HEXDIGITS) | ||||||
|  |             if len(escape) != 10: | ||||||
|  |                 raise ValueError | ||||||
|  |             c = int(escape[2:], 16) | ||||||
|  |             chr(c) # raise ValueError for invalid code | ||||||
|  |             return LITERAL, c | ||||||
|  |         elif c in OCTDIGITS: | ||||||
|  |             # octal escape (up to three digits) | ||||||
|  |             escape += source.getwhile(2, OCTDIGITS) | ||||||
|  |             return LITERAL, int(escape[1:], 8) & 0xff | ||||||
|  |         elif c in DIGITS: | ||||||
|  |             raise ValueError | ||||||
|  |         if len(escape) == 2: | ||||||
|  |             return LITERAL, ord(escape[1]) | ||||||
|  |     except ValueError: | ||||||
|  |         pass | ||||||
|  |     raise error("bogus escape: %s" % repr(escape)) | ||||||
|  |  | ||||||
|  | def _escape(source, escape, state): | ||||||
|  |     # handle escape code in expression | ||||||
|  |     code = CATEGORIES.get(escape) | ||||||
|  |     if code: | ||||||
|  |         return code | ||||||
|  |     code = ESCAPES.get(escape) | ||||||
|  |     if code: | ||||||
|  |         return code | ||||||
|  |     try: | ||||||
|  |         c = escape[1:2] | ||||||
|  |         if c == "x": | ||||||
|  |             # hexadecimal escape | ||||||
|  |             escape += source.getwhile(2, HEXDIGITS) | ||||||
|  |             if len(escape) != 4: | ||||||
|  |                 raise ValueError | ||||||
|  |             return LITERAL, int(escape[2:], 16) & 0xff | ||||||
|  |         elif c == "u" and source.istext: | ||||||
|  |             # unicode escape (exactly four digits) | ||||||
|  |             escape += source.getwhile(4, HEXDIGITS) | ||||||
|  |             if len(escape) != 6: | ||||||
|  |                 raise ValueError | ||||||
|  |             return LITERAL, int(escape[2:], 16) | ||||||
|  |         elif c == "U" and source.istext: | ||||||
|  |             # unicode escape (exactly eight digits) | ||||||
|  |             escape += source.getwhile(8, HEXDIGITS) | ||||||
|  |             if len(escape) != 10: | ||||||
|  |                 raise ValueError | ||||||
|  |             c = int(escape[2:], 16) | ||||||
|  |             chr(c) # raise ValueError for invalid code | ||||||
|  |             return LITERAL, c | ||||||
|  |         elif c == "0": | ||||||
|  |             # octal escape | ||||||
|  |             escape += source.getwhile(2, OCTDIGITS) | ||||||
|  |             return LITERAL, int(escape[1:], 8) & 0xff | ||||||
|  |         elif c in DIGITS: | ||||||
|  |             # octal escape *or* decimal group reference (sigh) | ||||||
|  |             if source.next in DIGITS: | ||||||
|  |                 escape = escape + source.get() | ||||||
|  |                 if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and | ||||||
|  |                     source.next in OCTDIGITS): | ||||||
|  |                     # got three octal digits; this is an octal escape | ||||||
|  |                     escape = escape + source.get() | ||||||
|  |                     return LITERAL, int(escape[1:], 8) & 0xff | ||||||
|  |             # not an octal escape, so this is a group reference | ||||||
|  |             group = int(escape[1:]) | ||||||
|  |             if group < state.groups: | ||||||
|  |                 if not state.checkgroup(group): | ||||||
|  |                     raise error("cannot refer to open group") | ||||||
|  |                 if state.lookbehind: | ||||||
|  |                     import warnings | ||||||
|  |                     warnings.warn('group references in lookbehind ' | ||||||
|  |                                   'assertions are not supported', | ||||||
|  |                                   RuntimeWarning) | ||||||
|  |                 return GROUPREF, group | ||||||
|  |             raise ValueError | ||||||
|  |         if len(escape) == 2: | ||||||
|  |             return LITERAL, ord(escape[1]) | ||||||
|  |     except ValueError: | ||||||
|  |         pass | ||||||
|  |     raise error("bogus escape: %s" % repr(escape)) | ||||||
|  |  | ||||||
|  | def _parse_sub(source, state, nested=1): | ||||||
|  |     # parse an alternation: a|b|c | ||||||
|  |  | ||||||
|  |     items = [] | ||||||
|  |     itemsappend = items.append | ||||||
|  |     sourcematch = source.match | ||||||
|  |     while 1: | ||||||
|  |         itemsappend(_parse(source, state)) | ||||||
|  |         if sourcematch("|"): | ||||||
|  |             continue | ||||||
|  |         if not nested: | ||||||
|  |             break | ||||||
|  |         if not source.next or sourcematch(")", 0): | ||||||
|  |             break | ||||||
|  |         else: | ||||||
|  |             raise error("pattern not properly closed") | ||||||
|  |  | ||||||
|  |     if len(items) == 1: | ||||||
|  |         return items[0] | ||||||
|  |  | ||||||
|  |     subpattern = SubPattern(state) | ||||||
|  |     subpatternappend = subpattern.append | ||||||
|  |  | ||||||
|  |     # check if all items share a common prefix | ||||||
|  |     while 1: | ||||||
|  |         prefix = None | ||||||
|  |         for item in items: | ||||||
|  |             if not item: | ||||||
|  |                 break | ||||||
|  |             if prefix is None: | ||||||
|  |                 prefix = item[0] | ||||||
|  |             elif item[0] != prefix: | ||||||
|  |                 break | ||||||
|  |         else: | ||||||
|  |             # all subitems start with a common "prefix". | ||||||
|  |             # move it out of the branch | ||||||
|  |             for item in items: | ||||||
|  |                 del item[0] | ||||||
|  |             subpatternappend(prefix) | ||||||
|  |             continue # check next one | ||||||
|  |         break | ||||||
|  |  | ||||||
|  |     # check if the branch can be replaced by a character set | ||||||
|  |     for item in items: | ||||||
|  |         if len(item) != 1 or item[0][0] != LITERAL: | ||||||
|  |             break | ||||||
|  |     else: | ||||||
|  |         # we can store this as a character set instead of a | ||||||
|  |         # branch (the compiler may optimize this even more) | ||||||
|  |         set = [] | ||||||
|  |         setappend = set.append | ||||||
|  |         for item in items: | ||||||
|  |             setappend(item[0]) | ||||||
|  |         subpatternappend((IN, set)) | ||||||
|  |         return subpattern | ||||||
|  |  | ||||||
|  |     subpattern.append((BRANCH, (None, items))) | ||||||
|  |     return subpattern | ||||||
|  |  | ||||||
|  | def _parse_sub_cond(source, state, condgroup): | ||||||
|  |     item_yes = _parse(source, state) | ||||||
|  |     if source.match("|"): | ||||||
|  |         item_no = _parse(source, state) | ||||||
|  |         if source.match("|"): | ||||||
|  |             raise error("conditional backref with more than two branches") | ||||||
|  |     else: | ||||||
|  |         item_no = None | ||||||
|  |     if source.next and not source.match(")", 0): | ||||||
|  |         raise error("pattern not properly closed") | ||||||
|  |     subpattern = SubPattern(state) | ||||||
|  |     subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) | ||||||
|  |     return subpattern | ||||||
|  |  | ||||||
|  | _PATTERNENDERS = set("|)") | ||||||
|  | _ASSERTCHARS = set("=!<") | ||||||
|  | _LOOKBEHINDASSERTCHARS = set("=!") | ||||||
|  | _REPEATCODES = set([MIN_REPEAT, MAX_REPEAT]) | ||||||
|  |  | ||||||
|  | def _parse(source, state): | ||||||
|  |     # parse a simple pattern | ||||||
|  |     subpattern = SubPattern(state) | ||||||
|  |  | ||||||
|  |     # precompute constants into local variables | ||||||
|  |     subpatternappend = subpattern.append | ||||||
|  |     sourceget = source.get | ||||||
|  |     sourcematch = source.match | ||||||
|  |     _len = len | ||||||
|  |     PATTERNENDERS = _PATTERNENDERS | ||||||
|  |     ASSERTCHARS = _ASSERTCHARS | ||||||
|  |     LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS | ||||||
|  |     REPEATCODES = _REPEATCODES | ||||||
|  |  | ||||||
|  |     while 1: | ||||||
|  |  | ||||||
|  |         if source.next in PATTERNENDERS: | ||||||
|  |             break # end of subpattern | ||||||
|  |         this = sourceget() | ||||||
|  |         if this is None: | ||||||
|  |             break # end of pattern | ||||||
|  |  | ||||||
|  |         if state.flags & SRE_FLAG_VERBOSE: | ||||||
|  |             # skip whitespace and comments | ||||||
|  |             if this in WHITESPACE: | ||||||
|  |                 continue | ||||||
|  |             if this == "#": | ||||||
|  |                 while 1: | ||||||
|  |                     this = sourceget() | ||||||
|  |                     if this in (None, "\n"): | ||||||
|  |                         break | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |         if this and this[0] not in SPECIAL_CHARS: | ||||||
|  |             subpatternappend((LITERAL, ord(this))) | ||||||
|  |  | ||||||
|  |         elif this == "[": | ||||||
|  |             # character set | ||||||
|  |             set = [] | ||||||
|  |             setappend = set.append | ||||||
|  | ##          if sourcematch(":"): | ||||||
|  | ##              pass # handle character classes | ||||||
|  |             if sourcematch("^"): | ||||||
|  |                 setappend((NEGATE, None)) | ||||||
|  |             # check remaining characters | ||||||
|  |             start = set[:] | ||||||
|  |             while 1: | ||||||
|  |                 this = sourceget() | ||||||
|  |                 if this == "]" and set != start: | ||||||
|  |                     break | ||||||
|  |                 elif this and this[0] == "\\": | ||||||
|  |                     code1 = _class_escape(source, this) | ||||||
|  |                 elif this: | ||||||
|  |                     code1 = LITERAL, ord(this) | ||||||
|  |                 else: | ||||||
|  |                     raise error("unexpected end of regular expression") | ||||||
|  |                 if sourcematch("-"): | ||||||
|  |                     # potential range | ||||||
|  |                     this = sourceget() | ||||||
|  |                     if this == "]": | ||||||
|  |                         if code1[0] is IN: | ||||||
|  |                             code1 = code1[1][0] | ||||||
|  |                         setappend(code1) | ||||||
|  |                         setappend((LITERAL, ord("-"))) | ||||||
|  |                         break | ||||||
|  |                     elif this: | ||||||
|  |                         if this[0] == "\\": | ||||||
|  |                             code2 = _class_escape(source, this) | ||||||
|  |                         else: | ||||||
|  |                             code2 = LITERAL, ord(this) | ||||||
|  |                         if code1[0] != LITERAL or code2[0] != LITERAL: | ||||||
|  |                             raise error("bad character range") | ||||||
|  |                         lo = code1[1] | ||||||
|  |                         hi = code2[1] | ||||||
|  |                         if hi < lo: | ||||||
|  |                             raise error("bad character range") | ||||||
|  |                         setappend((RANGE, (lo, hi))) | ||||||
|  |                     else: | ||||||
|  |                         raise error("unexpected end of regular expression") | ||||||
|  |                 else: | ||||||
|  |                     if code1[0] is IN: | ||||||
|  |                         code1 = code1[1][0] | ||||||
|  |                     setappend(code1) | ||||||
|  |  | ||||||
|  |             # XXX: <fl> should move set optimization to compiler! | ||||||
|  |             if _len(set)==1 and set[0][0] is LITERAL: | ||||||
|  |                 subpatternappend(set[0]) # optimization | ||||||
|  |             elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL: | ||||||
|  |                 subpatternappend((NOT_LITERAL, set[1][1])) # optimization | ||||||
|  |             else: | ||||||
|  |                 # XXX: <fl> should add charmap optimization here | ||||||
|  |                 subpatternappend((IN, set)) | ||||||
|  |  | ||||||
|  |         elif this and this[0] in REPEAT_CHARS: | ||||||
|  |             # repeat previous item | ||||||
|  |             if this == "?": | ||||||
|  |                 min, max = 0, 1 | ||||||
|  |             elif this == "*": | ||||||
|  |                 min, max = 0, MAXREPEAT | ||||||
|  |  | ||||||
|  |             elif this == "+": | ||||||
|  |                 min, max = 1, MAXREPEAT | ||||||
|  |             elif this == "{": | ||||||
|  |                 if source.next == "}": | ||||||
|  |                     subpatternappend((LITERAL, ord(this))) | ||||||
|  |                     continue | ||||||
|  |                 here = source.tell() | ||||||
|  |                 min, max = 0, MAXREPEAT | ||||||
|  |                 lo = hi = "" | ||||||
|  |                 while source.next in DIGITS: | ||||||
|  |                     lo = lo + source.get() | ||||||
|  |                 if sourcematch(","): | ||||||
|  |                     while source.next in DIGITS: | ||||||
|  |                         hi = hi + sourceget() | ||||||
|  |                 else: | ||||||
|  |                     hi = lo | ||||||
|  |                 if not sourcematch("}"): | ||||||
|  |                     subpatternappend((LITERAL, ord(this))) | ||||||
|  |                     source.seek(here) | ||||||
|  |                     continue | ||||||
|  |                 if lo: | ||||||
|  |                     min = int(lo) | ||||||
|  |                     if min >= MAXREPEAT: | ||||||
|  |                         raise OverflowError("the repetition number is too large") | ||||||
|  |                 if hi: | ||||||
|  |                     max = int(hi) | ||||||
|  |                     if max >= MAXREPEAT: | ||||||
|  |                         raise OverflowError("the repetition number is too large") | ||||||
|  |                     if max < min: | ||||||
|  |                         raise error("bad repeat interval") | ||||||
|  |             else: | ||||||
|  |                 raise error("not supported") | ||||||
|  |             # figure out which item to repeat | ||||||
|  |             if subpattern: | ||||||
|  |                 item = subpattern[-1:] | ||||||
|  |             else: | ||||||
|  |                 item = None | ||||||
|  |             if not item or (_len(item) == 1 and item[0][0] == AT): | ||||||
|  |                 raise error("nothing to repeat") | ||||||
|  |             if item[0][0] in REPEATCODES: | ||||||
|  |                 raise error("multiple repeat") | ||||||
|  |             if sourcematch("?"): | ||||||
|  |                 subpattern[-1] = (MIN_REPEAT, (min, max, item)) | ||||||
|  |             else: | ||||||
|  |                 subpattern[-1] = (MAX_REPEAT, (min, max, item)) | ||||||
|  |  | ||||||
|  |         elif this == ".": | ||||||
|  |             subpatternappend((ANY, None)) | ||||||
|  |  | ||||||
|  |         elif this == "(": | ||||||
|  |             group = 1 | ||||||
|  |             name = None | ||||||
|  |             condgroup = None | ||||||
|  |             if sourcematch("?"): | ||||||
|  |                 group = 0 | ||||||
|  |                 # options | ||||||
|  |                 if sourcematch("P"): | ||||||
|  |                     # python extensions | ||||||
|  |                     if sourcematch("<"): | ||||||
|  |                         # named group: skip forward to end of name | ||||||
|  |                         name = "" | ||||||
|  |                         while 1: | ||||||
|  |                             char = sourceget() | ||||||
|  |                             if char is None: | ||||||
|  |                                 raise error("unterminated name") | ||||||
|  |                             if char == ">": | ||||||
|  |                                 break | ||||||
|  |                             name = name + char | ||||||
|  |                         group = 1 | ||||||
|  |                         if not name: | ||||||
|  |                             raise error("missing group name") | ||||||
|  |                         if not name.isidentifier(): | ||||||
|  |                             raise error("bad character in group name %r" % name) | ||||||
|  |                     elif sourcematch("="): | ||||||
|  |                         # named backreference | ||||||
|  |                         name = "" | ||||||
|  |                         while 1: | ||||||
|  |                             char = sourceget() | ||||||
|  |                             if char is None: | ||||||
|  |                                 raise error("unterminated name") | ||||||
|  |                             if char == ")": | ||||||
|  |                                 break | ||||||
|  |                             name = name + char | ||||||
|  |                         if not name: | ||||||
|  |                             raise error("missing group name") | ||||||
|  |                         if not name.isidentifier(): | ||||||
|  |                             raise error("bad character in backref group name " | ||||||
|  |                                         "%r" % name) | ||||||
|  |                         gid = state.groupdict.get(name) | ||||||
|  |                         if gid is None: | ||||||
|  |                             msg = "unknown group name: {0!r}".format(name) | ||||||
|  |                             raise error(msg) | ||||||
|  |                         if state.lookbehind: | ||||||
|  |                             import warnings | ||||||
|  |                             warnings.warn('group references in lookbehind ' | ||||||
|  |                                           'assertions are not supported', | ||||||
|  |                                           RuntimeWarning) | ||||||
|  |                         subpatternappend((GROUPREF, gid)) | ||||||
|  |                         continue | ||||||
|  |                     else: | ||||||
|  |                         char = sourceget() | ||||||
|  |                         if char is None: | ||||||
|  |                             raise error("unexpected end of pattern") | ||||||
|  |                         raise error("unknown specifier: ?P%s" % char) | ||||||
|  |                 elif sourcematch(":"): | ||||||
|  |                     # non-capturing group | ||||||
|  |                     group = 2 | ||||||
|  |                 elif sourcematch("#"): | ||||||
|  |                     # comment | ||||||
|  |                     while 1: | ||||||
|  |                         if source.next is None or source.next == ")": | ||||||
|  |                             break | ||||||
|  |                         sourceget() | ||||||
|  |                     if not sourcematch(")"): | ||||||
|  |                         raise error("unbalanced parenthesis") | ||||||
|  |                     continue | ||||||
|  |                 elif source.next in ASSERTCHARS: | ||||||
|  |                     # lookahead assertions | ||||||
|  |                     char = sourceget() | ||||||
|  |                     dir = 1 | ||||||
|  |                     if char == "<": | ||||||
|  |                         if source.next not in LOOKBEHINDASSERTCHARS: | ||||||
|  |                             raise error("syntax error") | ||||||
|  |                         dir = -1 # lookbehind | ||||||
|  |                         char = sourceget() | ||||||
|  |                         state.lookbehind += 1 | ||||||
|  |                     p = _parse_sub(source, state) | ||||||
|  |                     if dir < 0: | ||||||
|  |                         state.lookbehind -= 1 | ||||||
|  |                     if not sourcematch(")"): | ||||||
|  |                         raise error("unbalanced parenthesis") | ||||||
|  |                     if char == "=": | ||||||
|  |                         subpatternappend((ASSERT, (dir, p))) | ||||||
|  |                     else: | ||||||
|  |                         subpatternappend((ASSERT_NOT, (dir, p))) | ||||||
|  |                     continue | ||||||
|  |                 elif sourcematch("("): | ||||||
|  |                     # conditional backreference group | ||||||
|  |                     condname = "" | ||||||
|  |                     while 1: | ||||||
|  |                         char = sourceget() | ||||||
|  |                         if char is None: | ||||||
|  |                             raise error("unterminated name") | ||||||
|  |                         if char == ")": | ||||||
|  |                             break | ||||||
|  |                         condname = condname + char | ||||||
|  |                     group = 2 | ||||||
|  |                     if not condname: | ||||||
|  |                         raise error("missing group name") | ||||||
|  |                     if condname.isidentifier(): | ||||||
|  |                         condgroup = state.groupdict.get(condname) | ||||||
|  |                         if condgroup is None: | ||||||
|  |                             msg = "unknown group name: {0!r}".format(condname) | ||||||
|  |                             raise error(msg) | ||||||
|  |                     else: | ||||||
|  |                         try: | ||||||
|  |                             condgroup = int(condname) | ||||||
|  |                         except ValueError: | ||||||
|  |                             raise error("bad character in group name") | ||||||
|  |                     if state.lookbehind: | ||||||
|  |                         import warnings | ||||||
|  |                         warnings.warn('group references in lookbehind ' | ||||||
|  |                                       'assertions are not supported', | ||||||
|  |                                       RuntimeWarning) | ||||||
|  |                 else: | ||||||
|  |                     # flags | ||||||
|  |                     if not source.next in FLAGS: | ||||||
|  |                         raise error("unexpected end of pattern") | ||||||
|  |                     while source.next in FLAGS: | ||||||
|  |                         state.flags = state.flags | FLAGS[sourceget()] | ||||||
|  |             if group: | ||||||
|  |                 # parse group contents | ||||||
|  |                 if group == 2: | ||||||
|  |                     # anonymous group | ||||||
|  |                     group = None | ||||||
|  |                 else: | ||||||
|  |                     group = state.opengroup(name) | ||||||
|  |                 if condgroup: | ||||||
|  |                     p = _parse_sub_cond(source, state, condgroup) | ||||||
|  |                 else: | ||||||
|  |                     p = _parse_sub(source, state) | ||||||
|  |                 if not sourcematch(")"): | ||||||
|  |                     raise error("unbalanced parenthesis") | ||||||
|  |                 if group is not None: | ||||||
|  |                     state.closegroup(group) | ||||||
|  |                 subpatternappend((SUBPATTERN, (group, p))) | ||||||
|  |             else: | ||||||
|  |                 while 1: | ||||||
|  |                     char = sourceget() | ||||||
|  |                     if char is None: | ||||||
|  |                         raise error("unexpected end of pattern") | ||||||
|  |                     if char == ")": | ||||||
|  |                         break | ||||||
|  |                     raise error("unknown extension") | ||||||
|  |  | ||||||
|  |         elif this == "^": | ||||||
|  |             subpatternappend((AT, AT_BEGINNING)) | ||||||
|  |  | ||||||
|  |         elif this == "$": | ||||||
|  |             subpattern.append((AT, AT_END)) | ||||||
|  |  | ||||||
|  |         elif this and this[0] == "\\": | ||||||
|  |             code = _escape(source, this, state) | ||||||
|  |             subpatternappend(code) | ||||||
|  |  | ||||||
|  |         else: | ||||||
|  |             raise error("parser error") | ||||||
|  |  | ||||||
|  |     return subpattern | ||||||
|  |  | ||||||
|  | def fix_flags(src, flags): | ||||||
|  |     # Check and fix flags according to the type of pattern (str or bytes) | ||||||
|  |     if isinstance(src, str): | ||||||
|  |         if not flags & SRE_FLAG_ASCII: | ||||||
|  |             flags |= SRE_FLAG_UNICODE | ||||||
|  |         elif flags & SRE_FLAG_UNICODE: | ||||||
|  |             raise ValueError("ASCII and UNICODE flags are incompatible") | ||||||
|  |     else: | ||||||
|  |         if flags & SRE_FLAG_UNICODE: | ||||||
|  |             raise ValueError("can't use UNICODE flag with a bytes pattern") | ||||||
|  |     return flags | ||||||
|  |  | ||||||
|  | def parse(str, flags=0, pattern=None): | ||||||
|  |     # parse 're' pattern into list of (opcode, argument) tuples | ||||||
|  |  | ||||||
|  |     source = Tokenizer(str) | ||||||
|  |  | ||||||
|  |     if pattern is None: | ||||||
|  |         pattern = Pattern() | ||||||
|  |     pattern.flags = flags | ||||||
|  |     pattern.str = str | ||||||
|  |  | ||||||
|  |     p = _parse_sub(source, pattern, 0) | ||||||
|  |     p.pattern.flags = fix_flags(str, p.pattern.flags) | ||||||
|  |  | ||||||
|  |     tail = source.get() | ||||||
|  |     if tail == ")": | ||||||
|  |         raise error("unbalanced parenthesis") | ||||||
|  |     elif tail: | ||||||
|  |         raise error("bogus characters at end of regular expression") | ||||||
|  |  | ||||||
|  |     if flags & SRE_FLAG_DEBUG: | ||||||
|  |         p.dump() | ||||||
|  |  | ||||||
|  |     if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE: | ||||||
|  |         # the VERBOSE flag was switched on inside the pattern.  to be | ||||||
|  |         # on the safe side, we'll parse the whole thing again... | ||||||
|  |         return parse(str, p.pattern.flags) | ||||||
|  |  | ||||||
|  |     return p | ||||||
|  |  | ||||||
|  | def parse_template(source, pattern): | ||||||
|  |     # parse 're' replacement string into list of literals and | ||||||
|  |     # group references | ||||||
|  |     s = Tokenizer(source) | ||||||
|  |     sget = s.get | ||||||
|  |     groups = [] | ||||||
|  |     literals = [] | ||||||
|  |     literal = [] | ||||||
|  |     lappend = literal.append | ||||||
|  |     def addgroup(index): | ||||||
|  |         if literal: | ||||||
|  |             literals.append(''.join(literal)) | ||||||
|  |             del literal[:] | ||||||
|  |         groups.append((len(literals), index)) | ||||||
|  |         literals.append(None) | ||||||
|  |     while True: | ||||||
|  |         this = sget() | ||||||
|  |         if this is None: | ||||||
|  |             break # end of replacement string | ||||||
|  |         if this[0] == "\\": | ||||||
|  |             # group | ||||||
|  |             c = this[1] | ||||||
|  |             if c == "g": | ||||||
|  |                 name = "" | ||||||
|  |                 if s.match("<"): | ||||||
|  |                     while True: | ||||||
|  |                         char = sget() | ||||||
|  |                         if char is None: | ||||||
|  |                             raise error("unterminated group name") | ||||||
|  |                         if char == ">": | ||||||
|  |                             break | ||||||
|  |                         name += char | ||||||
|  |                 if not name: | ||||||
|  |                     raise error("missing group name") | ||||||
|  |                 try: | ||||||
|  |                     index = int(name) | ||||||
|  |                     if index < 0: | ||||||
|  |                         raise error("negative group number") | ||||||
|  |                 except ValueError: | ||||||
|  |                     if not name.isidentifier(): | ||||||
|  |                         raise error("bad character in group name") | ||||||
|  |                     try: | ||||||
|  |                         index = pattern.groupindex[name] | ||||||
|  |                     except KeyError: | ||||||
|  |                         msg = "unknown group name: {0!r}".format(name) | ||||||
|  |                         raise IndexError(msg) | ||||||
|  |                 addgroup(index) | ||||||
|  |             elif c == "0": | ||||||
|  |                 if s.next in OCTDIGITS: | ||||||
|  |                     this += sget() | ||||||
|  |                     if s.next in OCTDIGITS: | ||||||
|  |                         this += sget() | ||||||
|  |                 lappend(chr(int(this[1:], 8) & 0xff)) | ||||||
|  |             elif c in DIGITS: | ||||||
|  |                 isoctal = False | ||||||
|  |                 if s.next in DIGITS: | ||||||
|  |                     this += sget() | ||||||
|  |                     if (c in OCTDIGITS and this[2] in OCTDIGITS and | ||||||
|  |                         s.next in OCTDIGITS): | ||||||
|  |                         this += sget() | ||||||
|  |                         isoctal = True | ||||||
|  |                         lappend(chr(int(this[1:], 8) & 0xff)) | ||||||
|  |                 if not isoctal: | ||||||
|  |                     addgroup(int(this[1:])) | ||||||
|  |             else: | ||||||
|  |                 try: | ||||||
|  |                     this = chr(ESCAPES[this][1]) | ||||||
|  |                 except KeyError: | ||||||
|  |                     pass | ||||||
|  |                 lappend(this) | ||||||
|  |         else: | ||||||
|  |             lappend(this) | ||||||
|  |     if literal: | ||||||
|  |         literals.append(''.join(literal)) | ||||||
|  |     if not isinstance(source, str): | ||||||
|  |         # The tokenizer implicitly decodes bytes objects as latin-1, we must | ||||||
|  |         # therefore re-encode the final representation. | ||||||
|  |         literals = [None if s is None else s.encode('latin-1') for s in literals] | ||||||
|  |     return groups, literals | ||||||
|  |  | ||||||
|  | def expand_template(template, match): | ||||||
|  |     g = match.group | ||||||
|  |     sep = match.string[:0] | ||||||
|  |     groups, literals = template | ||||||
|  |     literals = literals[:] | ||||||
|  |     try: | ||||||
|  |         for index, group in groups: | ||||||
|  |             literals[index] = s = g(group) | ||||||
|  |             if s is None: | ||||||
|  |                 raise error("unmatched group") | ||||||
|  |     except IndexError: | ||||||
|  |         raise error("invalid group reference") | ||||||
|  |     return sep.join(literals) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/stat.py |  | ||||||
							
								
								
									
										155
									
								
								v1/flask/lib/python3.4/stat.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										155
									
								
								v1/flask/lib/python3.4/stat.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,155 @@ | |||||||
|  | """Constants/functions for interpreting results of os.stat() and os.lstat(). | ||||||
|  |  | ||||||
|  | Suggested usage: from stat import * | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | # Indices for stat struct members in the tuple returned by os.stat() | ||||||
|  |  | ||||||
|  | ST_MODE  = 0 | ||||||
|  | ST_INO   = 1 | ||||||
|  | ST_DEV   = 2 | ||||||
|  | ST_NLINK = 3 | ||||||
|  | ST_UID   = 4 | ||||||
|  | ST_GID   = 5 | ||||||
|  | ST_SIZE  = 6 | ||||||
|  | ST_ATIME = 7 | ||||||
|  | ST_MTIME = 8 | ||||||
|  | ST_CTIME = 9 | ||||||
|  |  | ||||||
|  | # Extract bits from the mode | ||||||
|  |  | ||||||
|  | def S_IMODE(mode): | ||||||
|  |     """Return the portion of the file's mode that can be set by | ||||||
|  |     os.chmod(). | ||||||
|  |     """ | ||||||
|  |     return mode & 0o7777 | ||||||
|  |  | ||||||
|  | def S_IFMT(mode): | ||||||
|  |     """Return the portion of the file's mode that describes the | ||||||
|  |     file type. | ||||||
|  |     """ | ||||||
|  |     return mode & 0o170000 | ||||||
|  |  | ||||||
|  | # Constants used as S_IFMT() for various file types | ||||||
|  | # (not all are implemented on all systems) | ||||||
|  |  | ||||||
|  | S_IFDIR  = 0o040000  # directory | ||||||
|  | S_IFCHR  = 0o020000  # character device | ||||||
|  | S_IFBLK  = 0o060000  # block device | ||||||
|  | S_IFREG  = 0o100000  # regular file | ||||||
|  | S_IFIFO  = 0o010000  # fifo (named pipe) | ||||||
|  | S_IFLNK  = 0o120000  # symbolic link | ||||||
|  | S_IFSOCK = 0o140000  # socket file | ||||||
|  |  | ||||||
|  | # Functions to test for each file type | ||||||
|  |  | ||||||
|  | def S_ISDIR(mode): | ||||||
|  |     """Return True if mode is from a directory.""" | ||||||
|  |     return S_IFMT(mode) == S_IFDIR | ||||||
|  |  | ||||||
|  | def S_ISCHR(mode): | ||||||
|  |     """Return True if mode is from a character special device file.""" | ||||||
|  |     return S_IFMT(mode) == S_IFCHR | ||||||
|  |  | ||||||
|  | def S_ISBLK(mode): | ||||||
|  |     """Return True if mode is from a block special device file.""" | ||||||
|  |     return S_IFMT(mode) == S_IFBLK | ||||||
|  |  | ||||||
|  | def S_ISREG(mode): | ||||||
|  |     """Return True if mode is from a regular file.""" | ||||||
|  |     return S_IFMT(mode) == S_IFREG | ||||||
|  |  | ||||||
|  | def S_ISFIFO(mode): | ||||||
|  |     """Return True if mode is from a FIFO (named pipe).""" | ||||||
|  |     return S_IFMT(mode) == S_IFIFO | ||||||
|  |  | ||||||
|  | def S_ISLNK(mode): | ||||||
|  |     """Return True if mode is from a symbolic link.""" | ||||||
|  |     return S_IFMT(mode) == S_IFLNK | ||||||
|  |  | ||||||
|  | def S_ISSOCK(mode): | ||||||
|  |     """Return True if mode is from a socket.""" | ||||||
|  |     return S_IFMT(mode) == S_IFSOCK | ||||||
|  |  | ||||||
|  | # Names for permission bits | ||||||
|  |  | ||||||
|  | S_ISUID = 0o4000  # set UID bit | ||||||
|  | S_ISGID = 0o2000  # set GID bit | ||||||
|  | S_ENFMT = S_ISGID # file locking enforcement | ||||||
|  | S_ISVTX = 0o1000  # sticky bit | ||||||
|  | S_IREAD = 0o0400  # Unix V7 synonym for S_IRUSR | ||||||
|  | S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR | ||||||
|  | S_IEXEC = 0o0100  # Unix V7 synonym for S_IXUSR | ||||||
|  | S_IRWXU = 0o0700  # mask for owner permissions | ||||||
|  | S_IRUSR = 0o0400  # read by owner | ||||||
|  | S_IWUSR = 0o0200  # write by owner | ||||||
|  | S_IXUSR = 0o0100  # execute by owner | ||||||
|  | S_IRWXG = 0o0070  # mask for group permissions | ||||||
|  | S_IRGRP = 0o0040  # read by group | ||||||
|  | S_IWGRP = 0o0020  # write by group | ||||||
|  | S_IXGRP = 0o0010  # execute by group | ||||||
|  | S_IRWXO = 0o0007  # mask for others (not in group) permissions | ||||||
|  | S_IROTH = 0o0004  # read by others | ||||||
|  | S_IWOTH = 0o0002  # write by others | ||||||
|  | S_IXOTH = 0o0001  # execute by others | ||||||
|  |  | ||||||
|  | # Names for file flags | ||||||
|  |  | ||||||
|  | UF_NODUMP    = 0x00000001  # do not dump file | ||||||
|  | UF_IMMUTABLE = 0x00000002  # file may not be changed | ||||||
|  | UF_APPEND    = 0x00000004  # file may only be appended to | ||||||
|  | UF_OPAQUE    = 0x00000008  # directory is opaque when viewed through a union stack | ||||||
|  | UF_NOUNLINK  = 0x00000010  # file may not be renamed or deleted | ||||||
|  | UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed | ||||||
|  | UF_HIDDEN    = 0x00008000  # OS X: file should not be displayed | ||||||
|  | SF_ARCHIVED  = 0x00010000  # file may be archived | ||||||
|  | SF_IMMUTABLE = 0x00020000  # file may not be changed | ||||||
|  | SF_APPEND    = 0x00040000  # file may only be appended to | ||||||
|  | SF_NOUNLINK  = 0x00100000  # file may not be renamed or deleted | ||||||
|  | SF_SNAPSHOT  = 0x00200000  # file is a snapshot file | ||||||
|  |  | ||||||
|  |  | ||||||
|  | _filemode_table = ( | ||||||
|  |     ((S_IFLNK,         "l"), | ||||||
|  |      (S_IFREG,         "-"), | ||||||
|  |      (S_IFBLK,         "b"), | ||||||
|  |      (S_IFDIR,         "d"), | ||||||
|  |      (S_IFCHR,         "c"), | ||||||
|  |      (S_IFIFO,         "p")), | ||||||
|  |  | ||||||
|  |     ((S_IRUSR,         "r"),), | ||||||
|  |     ((S_IWUSR,         "w"),), | ||||||
|  |     ((S_IXUSR|S_ISUID, "s"), | ||||||
|  |      (S_ISUID,         "S"), | ||||||
|  |      (S_IXUSR,         "x")), | ||||||
|  |  | ||||||
|  |     ((S_IRGRP,         "r"),), | ||||||
|  |     ((S_IWGRP,         "w"),), | ||||||
|  |     ((S_IXGRP|S_ISGID, "s"), | ||||||
|  |      (S_ISGID,         "S"), | ||||||
|  |      (S_IXGRP,         "x")), | ||||||
|  |  | ||||||
|  |     ((S_IROTH,         "r"),), | ||||||
|  |     ((S_IWOTH,         "w"),), | ||||||
|  |     ((S_IXOTH|S_ISVTX, "t"), | ||||||
|  |      (S_ISVTX,         "T"), | ||||||
|  |      (S_IXOTH,         "x")) | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | def filemode(mode): | ||||||
|  |     """Convert a file's mode to a string of the form '-rwxrwxrwx'.""" | ||||||
|  |     perm = [] | ||||||
|  |     for table in _filemode_table: | ||||||
|  |         for bit, char in table: | ||||||
|  |             if mode & bit == bit: | ||||||
|  |                 perm.append(char) | ||||||
|  |                 break | ||||||
|  |         else: | ||||||
|  |             perm.append("-") | ||||||
|  |     return "".join(perm) | ||||||
|  |  | ||||||
|  | # If available, use C implementation | ||||||
|  | try: | ||||||
|  |     from _stat import * | ||||||
|  | except ImportError: | ||||||
|  |     pass | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/struct.py |  | ||||||
							
								
								
									
										15
									
								
								v1/flask/lib/python3.4/struct.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								v1/flask/lib/python3.4/struct.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | |||||||
|  | __all__ = [ | ||||||
|  |     # Functions | ||||||
|  |     'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', | ||||||
|  |     'iter_unpack', | ||||||
|  |  | ||||||
|  |     # Classes | ||||||
|  |     'Struct', | ||||||
|  |  | ||||||
|  |     # Exceptions | ||||||
|  |     'error' | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  | from _struct import * | ||||||
|  | from _struct import _clearcache | ||||||
|  | from _struct import __doc__ | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tarfile.py |  | ||||||
							
								
								
									
										2529
									
								
								v1/flask/lib/python3.4/tarfile.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										2529
									
								
								v1/flask/lib/python3.4/tarfile.py
									
									
									
									
									
										Executable file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tempfile.py |  | ||||||
							
								
								
									
										713
									
								
								v1/flask/lib/python3.4/tempfile.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										713
									
								
								v1/flask/lib/python3.4/tempfile.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,713 @@ | |||||||
|  | """Temporary files. | ||||||
|  |  | ||||||
|  | This module provides generic, low- and high-level interfaces for | ||||||
|  | creating temporary files and directories.  All of the interfaces | ||||||
|  | provided by this module can be used without fear of race conditions | ||||||
|  | except for 'mktemp'.  'mktemp' is subject to race conditions and | ||||||
|  | should not be used; it is provided for backward compatibility only. | ||||||
|  |  | ||||||
|  | This module also provides some data items to the user: | ||||||
|  |  | ||||||
|  |   TMP_MAX  - maximum number of names that will be tried before | ||||||
|  |              giving up. | ||||||
|  |   tempdir  - If this is set to a string before the first use of | ||||||
|  |              any routine from this module, it will be considered as | ||||||
|  |              another candidate location to store temporary files. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | __all__ = [ | ||||||
|  |     "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces | ||||||
|  |     "SpooledTemporaryFile", "TemporaryDirectory", | ||||||
|  |     "mkstemp", "mkdtemp",                  # low level safe interfaces | ||||||
|  |     "mktemp",                              # deprecated unsafe interface | ||||||
|  |     "TMP_MAX", "gettempprefix",            # constants | ||||||
|  |     "tempdir", "gettempdir" | ||||||
|  |    ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Imports. | ||||||
|  |  | ||||||
|  | import functools as _functools | ||||||
|  | import warnings as _warnings | ||||||
|  | import io as _io | ||||||
|  | import os as _os | ||||||
|  | import shutil as _shutil | ||||||
|  | import errno as _errno | ||||||
|  | from random import Random as _Random | ||||||
|  | import weakref as _weakref | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     import _thread | ||||||
|  | except ImportError: | ||||||
|  |     import _dummy_thread as _thread | ||||||
|  | _allocate_lock = _thread.allocate_lock | ||||||
|  |  | ||||||
|  | _text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL | ||||||
|  | if hasattr(_os, 'O_NOFOLLOW'): | ||||||
|  |     _text_openflags |= _os.O_NOFOLLOW | ||||||
|  |  | ||||||
|  | _bin_openflags = _text_openflags | ||||||
|  | if hasattr(_os, 'O_BINARY'): | ||||||
|  |     _bin_openflags |= _os.O_BINARY | ||||||
|  |  | ||||||
|  | if hasattr(_os, 'TMP_MAX'): | ||||||
|  |     TMP_MAX = _os.TMP_MAX | ||||||
|  | else: | ||||||
|  |     TMP_MAX = 10000 | ||||||
|  |  | ||||||
|  | # Although it does not have an underscore for historical reasons, this | ||||||
|  | # variable is an internal implementation detail (see issue 10354). | ||||||
|  | template = "tmp" | ||||||
|  |  | ||||||
|  | # Internal routines. | ||||||
|  |  | ||||||
|  | _once_lock = _allocate_lock() | ||||||
|  |  | ||||||
|  | if hasattr(_os, "lstat"): | ||||||
|  |     _stat = _os.lstat | ||||||
|  | elif hasattr(_os, "stat"): | ||||||
|  |     _stat = _os.stat | ||||||
|  | else: | ||||||
|  |     # Fallback.  All we need is something that raises OSError if the | ||||||
|  |     # file doesn't exist. | ||||||
|  |     def _stat(fn): | ||||||
|  |         fd = _os.open(fn, _os.O_RDONLY) | ||||||
|  |         _os.close(fd) | ||||||
|  |  | ||||||
|  | def _exists(fn): | ||||||
|  |     try: | ||||||
|  |         _stat(fn) | ||||||
|  |     except OSError: | ||||||
|  |         return False | ||||||
|  |     else: | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  | class _RandomNameSequence: | ||||||
|  |     """An instance of _RandomNameSequence generates an endless | ||||||
|  |     sequence of unpredictable strings which can safely be incorporated | ||||||
|  |     into file names.  Each string is six characters long.  Multiple | ||||||
|  |     threads can safely use the same instance at the same time. | ||||||
|  |  | ||||||
|  |     _RandomNameSequence is an iterator.""" | ||||||
|  |  | ||||||
|  |     characters = "abcdefghijklmnopqrstuvwxyz0123456789_" | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def rng(self): | ||||||
|  |         cur_pid = _os.getpid() | ||||||
|  |         if cur_pid != getattr(self, '_rng_pid', None): | ||||||
|  |             self._rng = _Random() | ||||||
|  |             self._rng_pid = cur_pid | ||||||
|  |         return self._rng | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __next__(self): | ||||||
|  |         c = self.characters | ||||||
|  |         choose = self.rng.choice | ||||||
|  |         letters = [choose(c) for dummy in range(8)] | ||||||
|  |         return ''.join(letters) | ||||||
|  |  | ||||||
|  | def _candidate_tempdir_list(): | ||||||
|  |     """Generate a list of candidate temporary directories which | ||||||
|  |     _get_default_tempdir will try.""" | ||||||
|  |  | ||||||
|  |     dirlist = [] | ||||||
|  |  | ||||||
|  |     # First, try the environment. | ||||||
|  |     for envname in 'TMPDIR', 'TEMP', 'TMP': | ||||||
|  |         dirname = _os.getenv(envname) | ||||||
|  |         if dirname: dirlist.append(dirname) | ||||||
|  |  | ||||||
|  |     # Failing that, try OS-specific locations. | ||||||
|  |     if _os.name == 'nt': | ||||||
|  |         dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) | ||||||
|  |     else: | ||||||
|  |         dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) | ||||||
|  |  | ||||||
|  |     # As a last resort, the current directory. | ||||||
|  |     try: | ||||||
|  |         dirlist.append(_os.getcwd()) | ||||||
|  |     except (AttributeError, OSError): | ||||||
|  |         dirlist.append(_os.curdir) | ||||||
|  |  | ||||||
|  |     return dirlist | ||||||
|  |  | ||||||
|  | def _get_default_tempdir(): | ||||||
|  |     """Calculate the default directory to use for temporary files. | ||||||
|  |     This routine should be called exactly once. | ||||||
|  |  | ||||||
|  |     We determine whether or not a candidate temp dir is usable by | ||||||
|  |     trying to create and write to a file in that directory.  If this | ||||||
|  |     is successful, the test file is deleted.  To prevent denial of | ||||||
|  |     service, the name of the test file must be randomized.""" | ||||||
|  |  | ||||||
|  |     namer = _RandomNameSequence() | ||||||
|  |     dirlist = _candidate_tempdir_list() | ||||||
|  |  | ||||||
|  |     for dir in dirlist: | ||||||
|  |         if dir != _os.curdir: | ||||||
|  |             dir = _os.path.abspath(dir) | ||||||
|  |         # Try only a few names per directory. | ||||||
|  |         for seq in range(100): | ||||||
|  |             name = next(namer) | ||||||
|  |             filename = _os.path.join(dir, name) | ||||||
|  |             try: | ||||||
|  |                 fd = _os.open(filename, _bin_openflags, 0o600) | ||||||
|  |                 try: | ||||||
|  |                     try: | ||||||
|  |                         with _io.open(fd, 'wb', closefd=False) as fp: | ||||||
|  |                             fp.write(b'blat') | ||||||
|  |                     finally: | ||||||
|  |                         _os.close(fd) | ||||||
|  |                 finally: | ||||||
|  |                     _os.unlink(filename) | ||||||
|  |                 return dir | ||||||
|  |             except FileExistsError: | ||||||
|  |                 pass | ||||||
|  |             except PermissionError: | ||||||
|  |                 # This exception is thrown when a directory with the chosen name | ||||||
|  |                 # already exists on windows. | ||||||
|  |                 if (_os.name == 'nt' and _os.path.isdir(dir) and | ||||||
|  |                     _os.access(dir, _os.W_OK)): | ||||||
|  |                     continue | ||||||
|  |                 break   # no point trying more names in this directory | ||||||
|  |             except OSError: | ||||||
|  |                 break   # no point trying more names in this directory | ||||||
|  |     raise FileNotFoundError(_errno.ENOENT, | ||||||
|  |                             "No usable temporary directory found in %s" % | ||||||
|  |                             dirlist) | ||||||
|  |  | ||||||
|  | _name_sequence = None | ||||||
|  |  | ||||||
|  | def _get_candidate_names(): | ||||||
|  |     """Common setup sequence for all user-callable interfaces.""" | ||||||
|  |  | ||||||
|  |     global _name_sequence | ||||||
|  |     if _name_sequence is None: | ||||||
|  |         _once_lock.acquire() | ||||||
|  |         try: | ||||||
|  |             if _name_sequence is None: | ||||||
|  |                 _name_sequence = _RandomNameSequence() | ||||||
|  |         finally: | ||||||
|  |             _once_lock.release() | ||||||
|  |     return _name_sequence | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _mkstemp_inner(dir, pre, suf, flags): | ||||||
|  |     """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" | ||||||
|  |  | ||||||
|  |     names = _get_candidate_names() | ||||||
|  |  | ||||||
|  |     for seq in range(TMP_MAX): | ||||||
|  |         name = next(names) | ||||||
|  |         file = _os.path.join(dir, pre + name + suf) | ||||||
|  |         try: | ||||||
|  |             fd = _os.open(file, flags, 0o600) | ||||||
|  |             return (fd, _os.path.abspath(file)) | ||||||
|  |         except FileExistsError: | ||||||
|  |             continue    # try again | ||||||
|  |         except PermissionError: | ||||||
|  |             # This exception is thrown when a directory with the chosen name | ||||||
|  |             # already exists on windows. | ||||||
|  |             if (_os.name == 'nt' and _os.path.isdir(dir) and | ||||||
|  |                 _os.access(dir, _os.W_OK)): | ||||||
|  |                 continue | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     raise FileExistsError(_errno.EEXIST, | ||||||
|  |                           "No usable temporary file name found") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # User visible interfaces. | ||||||
|  |  | ||||||
|  | def gettempprefix(): | ||||||
|  |     """Accessor for tempdir.template.""" | ||||||
|  |     return template | ||||||
|  |  | ||||||
|  | tempdir = None | ||||||
|  |  | ||||||
|  | def gettempdir(): | ||||||
|  |     """Accessor for tempfile.tempdir.""" | ||||||
|  |     global tempdir | ||||||
|  |     if tempdir is None: | ||||||
|  |         _once_lock.acquire() | ||||||
|  |         try: | ||||||
|  |             if tempdir is None: | ||||||
|  |                 tempdir = _get_default_tempdir() | ||||||
|  |         finally: | ||||||
|  |             _once_lock.release() | ||||||
|  |     return tempdir | ||||||
|  |  | ||||||
|  | def mkstemp(suffix="", prefix=template, dir=None, text=False): | ||||||
|  |     """User-callable function to create and return a unique temporary | ||||||
|  |     file.  The return value is a pair (fd, name) where fd is the | ||||||
|  |     file descriptor returned by os.open, and name is the filename. | ||||||
|  |  | ||||||
|  |     If 'suffix' is specified, the file name will end with that suffix, | ||||||
|  |     otherwise there will be no suffix. | ||||||
|  |  | ||||||
|  |     If 'prefix' is specified, the file name will begin with that prefix, | ||||||
|  |     otherwise a default prefix is used. | ||||||
|  |  | ||||||
|  |     If 'dir' is specified, the file will be created in that directory, | ||||||
|  |     otherwise a default directory is used. | ||||||
|  |  | ||||||
|  |     If 'text' is specified and true, the file is opened in text | ||||||
|  |     mode.  Else (the default) the file is opened in binary mode.  On | ||||||
|  |     some operating systems, this makes no difference. | ||||||
|  |  | ||||||
|  |     The file is readable and writable only by the creating user ID. | ||||||
|  |     If the operating system uses permission bits to indicate whether a | ||||||
|  |     file is executable, the file is executable by no one. The file | ||||||
|  |     descriptor is not inherited by children of this process. | ||||||
|  |  | ||||||
|  |     Caller is responsible for deleting the file when done with it. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if dir is None: | ||||||
|  |         dir = gettempdir() | ||||||
|  |  | ||||||
|  |     if text: | ||||||
|  |         flags = _text_openflags | ||||||
|  |     else: | ||||||
|  |         flags = _bin_openflags | ||||||
|  |  | ||||||
|  |     return _mkstemp_inner(dir, prefix, suffix, flags) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def mkdtemp(suffix="", prefix=template, dir=None): | ||||||
|  |     """User-callable function to create and return a unique temporary | ||||||
|  |     directory.  The return value is the pathname of the directory. | ||||||
|  |  | ||||||
|  |     Arguments are as for mkstemp, except that the 'text' argument is | ||||||
|  |     not accepted. | ||||||
|  |  | ||||||
|  |     The directory is readable, writable, and searchable only by the | ||||||
|  |     creating user. | ||||||
|  |  | ||||||
|  |     Caller is responsible for deleting the directory when done with it. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if dir is None: | ||||||
|  |         dir = gettempdir() | ||||||
|  |  | ||||||
|  |     names = _get_candidate_names() | ||||||
|  |  | ||||||
|  |     for seq in range(TMP_MAX): | ||||||
|  |         name = next(names) | ||||||
|  |         file = _os.path.join(dir, prefix + name + suffix) | ||||||
|  |         try: | ||||||
|  |             _os.mkdir(file, 0o700) | ||||||
|  |             return file | ||||||
|  |         except FileExistsError: | ||||||
|  |             continue    # try again | ||||||
|  |         except PermissionError: | ||||||
|  |             # This exception is thrown when a directory with the chosen name | ||||||
|  |             # already exists on windows. | ||||||
|  |             if (_os.name == 'nt' and _os.path.isdir(dir) and | ||||||
|  |                 _os.access(dir, _os.W_OK)): | ||||||
|  |                 continue | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     raise FileExistsError(_errno.EEXIST, | ||||||
|  |                           "No usable temporary directory name found") | ||||||
|  |  | ||||||
|  | def mktemp(suffix="", prefix=template, dir=None): | ||||||
|  |     """User-callable function to return a unique temporary file name.  The | ||||||
|  |     file is not created. | ||||||
|  |  | ||||||
|  |     Arguments are as for mkstemp, except that the 'text' argument is | ||||||
|  |     not accepted. | ||||||
|  |  | ||||||
|  |     This function is unsafe and should not be used.  The file name | ||||||
|  |     refers to a file that did not exist at some point, but by the time | ||||||
|  |     you get around to creating it, someone else may have beaten you to | ||||||
|  |     the punch. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  | ##    from warnings import warn as _warn | ||||||
|  | ##    _warn("mktemp is a potential security risk to your program", | ||||||
|  | ##          RuntimeWarning, stacklevel=2) | ||||||
|  |  | ||||||
|  |     if dir is None: | ||||||
|  |         dir = gettempdir() | ||||||
|  |  | ||||||
|  |     names = _get_candidate_names() | ||||||
|  |     for seq in range(TMP_MAX): | ||||||
|  |         name = next(names) | ||||||
|  |         file = _os.path.join(dir, prefix + name + suffix) | ||||||
|  |         if not _exists(file): | ||||||
|  |             return file | ||||||
|  |  | ||||||
|  |     raise FileExistsError(_errno.EEXIST, | ||||||
|  |                           "No usable temporary filename found") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class _TemporaryFileCloser: | ||||||
|  |     """A separate object allowing proper closing of a temporary file's | ||||||
|  |     underlying file object, without adding a __del__ method to the | ||||||
|  |     temporary file.""" | ||||||
|  |  | ||||||
|  |     file = None  # Set here since __del__ checks it | ||||||
|  |     close_called = False | ||||||
|  |  | ||||||
|  |     def __init__(self, file, name, delete=True): | ||||||
|  |         self.file = file | ||||||
|  |         self.name = name | ||||||
|  |         self.delete = delete | ||||||
|  |  | ||||||
|  |     # NT provides delete-on-close as a primitive, so we don't need | ||||||
|  |     # the wrapper to do anything special.  We still use it so that | ||||||
|  |     # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile. | ||||||
|  |     if _os.name != 'nt': | ||||||
|  |         # Cache the unlinker so we don't get spurious errors at | ||||||
|  |         # shutdown when the module-level "os" is None'd out.  Note | ||||||
|  |         # that this must be referenced as self.unlink, because the | ||||||
|  |         # name TemporaryFileWrapper may also get None'd out before | ||||||
|  |         # __del__ is called. | ||||||
|  |  | ||||||
|  |         def close(self, unlink=_os.unlink): | ||||||
|  |             if not self.close_called and self.file is not None: | ||||||
|  |                 self.close_called = True | ||||||
|  |                 try: | ||||||
|  |                     self.file.close() | ||||||
|  |                 finally: | ||||||
|  |                     if self.delete: | ||||||
|  |                         unlink(self.name) | ||||||
|  |  | ||||||
|  |         # Need to ensure the file is deleted on __del__ | ||||||
|  |         def __del__(self): | ||||||
|  |             self.close() | ||||||
|  |  | ||||||
|  |     else: | ||||||
|  |         def close(self): | ||||||
|  |             if not self.close_called: | ||||||
|  |                 self.close_called = True | ||||||
|  |                 self.file.close() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class _TemporaryFileWrapper: | ||||||
|  |     """Temporary file wrapper | ||||||
|  |  | ||||||
|  |     This class provides a wrapper around files opened for | ||||||
|  |     temporary use.  In particular, it seeks to automatically | ||||||
|  |     remove the file when it is no longer needed. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, file, name, delete=True): | ||||||
|  |         self.file = file | ||||||
|  |         self.name = name | ||||||
|  |         self.delete = delete | ||||||
|  |         self._closer = _TemporaryFileCloser(file, name, delete) | ||||||
|  |  | ||||||
|  |     def __getattr__(self, name): | ||||||
|  |         # Attribute lookups are delegated to the underlying file | ||||||
|  |         # and cached for non-numeric results | ||||||
|  |         # (i.e. methods are cached, closed and friends are not) | ||||||
|  |         file = self.__dict__['file'] | ||||||
|  |         a = getattr(file, name) | ||||||
|  |         if hasattr(a, '__call__'): | ||||||
|  |             func = a | ||||||
|  |             @_functools.wraps(func) | ||||||
|  |             def func_wrapper(*args, **kwargs): | ||||||
|  |                 return func(*args, **kwargs) | ||||||
|  |             # Avoid closing the file as long as the wrapper is alive, | ||||||
|  |             # see issue #18879. | ||||||
|  |             func_wrapper._closer = self._closer | ||||||
|  |             a = func_wrapper | ||||||
|  |         if not isinstance(a, int): | ||||||
|  |             setattr(self, name, a) | ||||||
|  |         return a | ||||||
|  |  | ||||||
|  |     # The underlying __enter__ method returns the wrong object | ||||||
|  |     # (self.file) so override it to return the wrapper | ||||||
|  |     def __enter__(self): | ||||||
|  |         self.file.__enter__() | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     # Need to trap __exit__ as well to ensure the file gets | ||||||
|  |     # deleted when used in a with statement | ||||||
|  |     def __exit__(self, exc, value, tb): | ||||||
|  |         result = self.file.__exit__(exc, value, tb) | ||||||
|  |         self.close() | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         """ | ||||||
|  |         Close the temporary file, possibly deleting it. | ||||||
|  |         """ | ||||||
|  |         self._closer.close() | ||||||
|  |  | ||||||
|  |     # iter() doesn't use __getattr__ to find the __iter__ method | ||||||
|  |     def __iter__(self): | ||||||
|  |         # Don't return iter(self.file), but yield from it to avoid closing | ||||||
|  |         # file as long as it's being used as iterator (see issue #23700).  We | ||||||
|  |         # can't use 'yield from' here because iter(file) returns the file | ||||||
|  |         # object itself, which has a close method, and thus the file would get | ||||||
|  |         # closed when the generator is finalized, due to PEP380 semantics. | ||||||
|  |         for line in self.file: | ||||||
|  |             yield line | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None, | ||||||
|  |                        newline=None, suffix="", prefix=template, | ||||||
|  |                        dir=None, delete=True): | ||||||
|  |     """Create and return a temporary file. | ||||||
|  |     Arguments: | ||||||
|  |     'prefix', 'suffix', 'dir' -- as for mkstemp. | ||||||
|  |     'mode' -- the mode argument to io.open (default "w+b"). | ||||||
|  |     'buffering' -- the buffer size argument to io.open (default -1). | ||||||
|  |     'encoding' -- the encoding argument to io.open (default None) | ||||||
|  |     'newline' -- the newline argument to io.open (default None) | ||||||
|  |     'delete' -- whether the file is deleted on close (default True). | ||||||
|  |     The file is created as mkstemp() would do it. | ||||||
|  |  | ||||||
|  |     Returns an object with a file-like interface; the name of the file | ||||||
|  |     is accessible as file.name.  The file will be automatically deleted | ||||||
|  |     when it is closed unless the 'delete' argument is set to False. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     if dir is None: | ||||||
|  |         dir = gettempdir() | ||||||
|  |  | ||||||
|  |     flags = _bin_openflags | ||||||
|  |  | ||||||
|  |     # Setting O_TEMPORARY in the flags causes the OS to delete | ||||||
|  |     # the file when it is closed.  This is only supported by Windows. | ||||||
|  |     if _os.name == 'nt' and delete: | ||||||
|  |         flags |= _os.O_TEMPORARY | ||||||
|  |  | ||||||
|  |     (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) | ||||||
|  |     try: | ||||||
|  |         file = _io.open(fd, mode, buffering=buffering, | ||||||
|  |                         newline=newline, encoding=encoding) | ||||||
|  |  | ||||||
|  |         return _TemporaryFileWrapper(file, name, delete) | ||||||
|  |     except Exception: | ||||||
|  |         _os.close(fd) | ||||||
|  |         raise | ||||||
|  |  | ||||||
|  | if _os.name != 'posix' or _os.sys.platform == 'cygwin': | ||||||
|  |     # On non-POSIX and Cygwin systems, assume that we cannot unlink a file | ||||||
|  |     # while it is open. | ||||||
|  |     TemporaryFile = NamedTemporaryFile | ||||||
|  |  | ||||||
|  | else: | ||||||
|  |     def TemporaryFile(mode='w+b', buffering=-1, encoding=None, | ||||||
|  |                       newline=None, suffix="", prefix=template, | ||||||
|  |                       dir=None): | ||||||
|  |         """Create and return a temporary file. | ||||||
|  |         Arguments: | ||||||
|  |         'prefix', 'suffix', 'dir' -- as for mkstemp. | ||||||
|  |         'mode' -- the mode argument to io.open (default "w+b"). | ||||||
|  |         'buffering' -- the buffer size argument to io.open (default -1). | ||||||
|  |         'encoding' -- the encoding argument to io.open (default None) | ||||||
|  |         'newline' -- the newline argument to io.open (default None) | ||||||
|  |         The file is created as mkstemp() would do it. | ||||||
|  |  | ||||||
|  |         Returns an object with a file-like interface.  The file has no | ||||||
|  |         name, and will cease to exist when it is closed. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         if dir is None: | ||||||
|  |             dir = gettempdir() | ||||||
|  |  | ||||||
|  |         flags = _bin_openflags | ||||||
|  |  | ||||||
|  |         (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) | ||||||
|  |         try: | ||||||
|  |             _os.unlink(name) | ||||||
|  |             return _io.open(fd, mode, buffering=buffering, | ||||||
|  |                             newline=newline, encoding=encoding) | ||||||
|  |         except: | ||||||
|  |             _os.close(fd) | ||||||
|  |             raise | ||||||
|  |  | ||||||
|  | class SpooledTemporaryFile: | ||||||
|  |     """Temporary file wrapper, specialized to switch from BytesIO | ||||||
|  |     or StringIO to a real file when it exceeds a certain size or | ||||||
|  |     when a fileno is needed. | ||||||
|  |     """ | ||||||
|  |     _rolled = False | ||||||
|  |  | ||||||
|  |     def __init__(self, max_size=0, mode='w+b', buffering=-1, | ||||||
|  |                  encoding=None, newline=None, | ||||||
|  |                  suffix="", prefix=template, dir=None): | ||||||
|  |         if 'b' in mode: | ||||||
|  |             self._file = _io.BytesIO() | ||||||
|  |         else: | ||||||
|  |             # Setting newline="\n" avoids newline translation; | ||||||
|  |             # this is important because otherwise on Windows we'd | ||||||
|  |             # get double newline translation upon rollover(). | ||||||
|  |             self._file = _io.StringIO(newline="\n") | ||||||
|  |         self._max_size = max_size | ||||||
|  |         self._rolled = False | ||||||
|  |         self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering, | ||||||
|  |                                    'suffix': suffix, 'prefix': prefix, | ||||||
|  |                                    'encoding': encoding, 'newline': newline, | ||||||
|  |                                    'dir': dir} | ||||||
|  |  | ||||||
|  |     def _check(self, file): | ||||||
|  |         if self._rolled: return | ||||||
|  |         max_size = self._max_size | ||||||
|  |         if max_size and file.tell() > max_size: | ||||||
|  |             self.rollover() | ||||||
|  |  | ||||||
|  |     def rollover(self): | ||||||
|  |         if self._rolled: return | ||||||
|  |         file = self._file | ||||||
|  |         newfile = self._file = TemporaryFile(**self._TemporaryFileArgs) | ||||||
|  |         del self._TemporaryFileArgs | ||||||
|  |  | ||||||
|  |         newfile.write(file.getvalue()) | ||||||
|  |         newfile.seek(file.tell(), 0) | ||||||
|  |  | ||||||
|  |         self._rolled = True | ||||||
|  |  | ||||||
|  |     # The method caching trick from NamedTemporaryFile | ||||||
|  |     # won't work here, because _file may change from a | ||||||
|  |     # BytesIO/StringIO instance to a real file. So we list | ||||||
|  |     # all the methods directly. | ||||||
|  |  | ||||||
|  |     # Context management protocol | ||||||
|  |     def __enter__(self): | ||||||
|  |         if self._file.closed: | ||||||
|  |             raise ValueError("Cannot enter context with closed file") | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __exit__(self, exc, value, tb): | ||||||
|  |         self._file.close() | ||||||
|  |  | ||||||
|  |     # file protocol | ||||||
|  |     def __iter__(self): | ||||||
|  |         return self._file.__iter__() | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         self._file.close() | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def closed(self): | ||||||
|  |         return self._file.closed | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def encoding(self): | ||||||
|  |         try: | ||||||
|  |             return self._file.encoding | ||||||
|  |         except AttributeError: | ||||||
|  |             if 'b' in self._TemporaryFileArgs['mode']: | ||||||
|  |                 raise | ||||||
|  |             return self._TemporaryFileArgs['encoding'] | ||||||
|  |  | ||||||
|  |     def fileno(self): | ||||||
|  |         self.rollover() | ||||||
|  |         return self._file.fileno() | ||||||
|  |  | ||||||
|  |     def flush(self): | ||||||
|  |         self._file.flush() | ||||||
|  |  | ||||||
|  |     def isatty(self): | ||||||
|  |         return self._file.isatty() | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def mode(self): | ||||||
|  |         try: | ||||||
|  |             return self._file.mode | ||||||
|  |         except AttributeError: | ||||||
|  |             return self._TemporaryFileArgs['mode'] | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def name(self): | ||||||
|  |         try: | ||||||
|  |             return self._file.name | ||||||
|  |         except AttributeError: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def newlines(self): | ||||||
|  |         try: | ||||||
|  |             return self._file.newlines | ||||||
|  |         except AttributeError: | ||||||
|  |             if 'b' in self._TemporaryFileArgs['mode']: | ||||||
|  |                 raise | ||||||
|  |             return self._TemporaryFileArgs['newline'] | ||||||
|  |  | ||||||
|  |     def read(self, *args): | ||||||
|  |         return self._file.read(*args) | ||||||
|  |  | ||||||
|  |     def readline(self, *args): | ||||||
|  |         return self._file.readline(*args) | ||||||
|  |  | ||||||
|  |     def readlines(self, *args): | ||||||
|  |         return self._file.readlines(*args) | ||||||
|  |  | ||||||
|  |     def seek(self, *args): | ||||||
|  |         self._file.seek(*args) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def softspace(self): | ||||||
|  |         return self._file.softspace | ||||||
|  |  | ||||||
|  |     def tell(self): | ||||||
|  |         return self._file.tell() | ||||||
|  |  | ||||||
|  |     def truncate(self, size=None): | ||||||
|  |         if size is None: | ||||||
|  |             self._file.truncate() | ||||||
|  |         else: | ||||||
|  |             if size > self._max_size: | ||||||
|  |                 self.rollover() | ||||||
|  |             self._file.truncate(size) | ||||||
|  |  | ||||||
|  |     def write(self, s): | ||||||
|  |         file = self._file | ||||||
|  |         rv = file.write(s) | ||||||
|  |         self._check(file) | ||||||
|  |         return rv | ||||||
|  |  | ||||||
|  |     def writelines(self, iterable): | ||||||
|  |         file = self._file | ||||||
|  |         rv = file.writelines(iterable) | ||||||
|  |         self._check(file) | ||||||
|  |         return rv | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TemporaryDirectory(object): | ||||||
|  |     """Create and return a temporary directory.  This has the same | ||||||
|  |     behavior as mkdtemp but can be used as a context manager.  For | ||||||
|  |     example: | ||||||
|  |  | ||||||
|  |         with TemporaryDirectory() as tmpdir: | ||||||
|  |             ... | ||||||
|  |  | ||||||
|  |     Upon exiting the context, the directory and everything contained | ||||||
|  |     in it are removed. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, suffix="", prefix=template, dir=None): | ||||||
|  |         self.name = mkdtemp(suffix, prefix, dir) | ||||||
|  |         self._finalizer = _weakref.finalize( | ||||||
|  |             self, self._cleanup, self.name, | ||||||
|  |             warn_message="Implicitly cleaning up {!r}".format(self)) | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def _cleanup(cls, name, warn_message): | ||||||
|  |         _shutil.rmtree(name) | ||||||
|  |         _warnings.warn(warn_message, ResourceWarning) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return "<{} {!r}>".format(self.__class__.__name__, self.name) | ||||||
|  |  | ||||||
|  |     def __enter__(self): | ||||||
|  |         return self.name | ||||||
|  |  | ||||||
|  |     def __exit__(self, exc, value, tb): | ||||||
|  |         self.cleanup() | ||||||
|  |  | ||||||
|  |     def cleanup(self): | ||||||
|  |         if self._finalizer.detach(): | ||||||
|  |             _shutil.rmtree(self.name) | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/token.py |  | ||||||
							
								
								
									
										140
									
								
								v1/flask/lib/python3.4/token.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										140
									
								
								v1/flask/lib/python3.4/token.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,140 @@ | |||||||
|  | """Token constants (from "token.h").""" | ||||||
|  |  | ||||||
|  | __all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF'] | ||||||
|  |  | ||||||
|  | #  This file is automatically generated; please don't muck it up! | ||||||
|  | # | ||||||
|  | #  To update the symbols in this file, 'cd' to the top directory of | ||||||
|  | #  the python source tree after building the interpreter and run: | ||||||
|  | # | ||||||
|  | #    ./python Lib/token.py | ||||||
|  |  | ||||||
|  | #--start constants-- | ||||||
|  | ENDMARKER = 0 | ||||||
|  | NAME = 1 | ||||||
|  | NUMBER = 2 | ||||||
|  | STRING = 3 | ||||||
|  | NEWLINE = 4 | ||||||
|  | INDENT = 5 | ||||||
|  | DEDENT = 6 | ||||||
|  | LPAR = 7 | ||||||
|  | RPAR = 8 | ||||||
|  | LSQB = 9 | ||||||
|  | RSQB = 10 | ||||||
|  | COLON = 11 | ||||||
|  | COMMA = 12 | ||||||
|  | SEMI = 13 | ||||||
|  | PLUS = 14 | ||||||
|  | MINUS = 15 | ||||||
|  | STAR = 16 | ||||||
|  | SLASH = 17 | ||||||
|  | VBAR = 18 | ||||||
|  | AMPER = 19 | ||||||
|  | LESS = 20 | ||||||
|  | GREATER = 21 | ||||||
|  | EQUAL = 22 | ||||||
|  | DOT = 23 | ||||||
|  | PERCENT = 24 | ||||||
|  | LBRACE = 25 | ||||||
|  | RBRACE = 26 | ||||||
|  | EQEQUAL = 27 | ||||||
|  | NOTEQUAL = 28 | ||||||
|  | LESSEQUAL = 29 | ||||||
|  | GREATEREQUAL = 30 | ||||||
|  | TILDE = 31 | ||||||
|  | CIRCUMFLEX = 32 | ||||||
|  | LEFTSHIFT = 33 | ||||||
|  | RIGHTSHIFT = 34 | ||||||
|  | DOUBLESTAR = 35 | ||||||
|  | PLUSEQUAL = 36 | ||||||
|  | MINEQUAL = 37 | ||||||
|  | STAREQUAL = 38 | ||||||
|  | SLASHEQUAL = 39 | ||||||
|  | PERCENTEQUAL = 40 | ||||||
|  | AMPEREQUAL = 41 | ||||||
|  | VBAREQUAL = 42 | ||||||
|  | CIRCUMFLEXEQUAL = 43 | ||||||
|  | LEFTSHIFTEQUAL = 44 | ||||||
|  | RIGHTSHIFTEQUAL = 45 | ||||||
|  | DOUBLESTAREQUAL = 46 | ||||||
|  | DOUBLESLASH = 47 | ||||||
|  | DOUBLESLASHEQUAL = 48 | ||||||
|  | AT = 49 | ||||||
|  | RARROW = 50 | ||||||
|  | ELLIPSIS = 51 | ||||||
|  | OP = 52 | ||||||
|  | ERRORTOKEN = 53 | ||||||
|  | N_TOKENS = 54 | ||||||
|  | NT_OFFSET = 256 | ||||||
|  | #--end constants-- | ||||||
|  |  | ||||||
|  | tok_name = {value: name | ||||||
|  |             for name, value in globals().items() | ||||||
|  |             if isinstance(value, int) and not name.startswith('_')} | ||||||
|  | __all__.extend(tok_name.values()) | ||||||
|  |  | ||||||
|  | def ISTERMINAL(x): | ||||||
|  |     return x < NT_OFFSET | ||||||
|  |  | ||||||
|  | def ISNONTERMINAL(x): | ||||||
|  |     return x >= NT_OFFSET | ||||||
|  |  | ||||||
|  | def ISEOF(x): | ||||||
|  |     return x == ENDMARKER | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _main(): | ||||||
|  |     import re | ||||||
|  |     import sys | ||||||
|  |     args = sys.argv[1:] | ||||||
|  |     inFileName = args and args[0] or "Include/token.h" | ||||||
|  |     outFileName = "Lib/token.py" | ||||||
|  |     if len(args) > 1: | ||||||
|  |         outFileName = args[1] | ||||||
|  |     try: | ||||||
|  |         fp = open(inFileName) | ||||||
|  |     except OSError as err: | ||||||
|  |         sys.stdout.write("I/O error: %s\n" % str(err)) | ||||||
|  |         sys.exit(1) | ||||||
|  |     lines = fp.read().split("\n") | ||||||
|  |     fp.close() | ||||||
|  |     prog = re.compile( | ||||||
|  |         "#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)", | ||||||
|  |         re.IGNORECASE) | ||||||
|  |     tokens = {} | ||||||
|  |     for line in lines: | ||||||
|  |         match = prog.match(line) | ||||||
|  |         if match: | ||||||
|  |             name, val = match.group(1, 2) | ||||||
|  |             val = int(val) | ||||||
|  |             tokens[val] = name          # reverse so we can sort them... | ||||||
|  |     keys = sorted(tokens.keys()) | ||||||
|  |     # load the output skeleton from the target: | ||||||
|  |     try: | ||||||
|  |         fp = open(outFileName) | ||||||
|  |     except OSError as err: | ||||||
|  |         sys.stderr.write("I/O error: %s\n" % str(err)) | ||||||
|  |         sys.exit(2) | ||||||
|  |     format = fp.read().split("\n") | ||||||
|  |     fp.close() | ||||||
|  |     try: | ||||||
|  |         start = format.index("#--start constants--") + 1 | ||||||
|  |         end = format.index("#--end constants--") | ||||||
|  |     except ValueError: | ||||||
|  |         sys.stderr.write("target does not contain format markers") | ||||||
|  |         sys.exit(3) | ||||||
|  |     lines = [] | ||||||
|  |     for val in keys: | ||||||
|  |         lines.append("%s = %d" % (tokens[val], val)) | ||||||
|  |     format[start:end] = lines | ||||||
|  |     try: | ||||||
|  |         fp = open(outFileName, 'w') | ||||||
|  |     except OSError as err: | ||||||
|  |         sys.stderr.write("I/O error: %s\n" % str(err)) | ||||||
|  |         sys.exit(4) | ||||||
|  |     fp.write("\n".join(format)) | ||||||
|  |     fp.close() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     _main() | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tokenize.py |  | ||||||
							
								
								
									
										712
									
								
								v1/flask/lib/python3.4/tokenize.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										712
									
								
								v1/flask/lib/python3.4/tokenize.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,712 @@ | |||||||
|  | """Tokenization help for Python programs. | ||||||
|  |  | ||||||
|  | tokenize(readline) is a generator that breaks a stream of bytes into | ||||||
|  | Python tokens.  It decodes the bytes according to PEP-0263 for | ||||||
|  | determining source file encoding. | ||||||
|  |  | ||||||
|  | It accepts a readline-like method which is called repeatedly to get the | ||||||
|  | next line of input (or b"" for EOF).  It generates 5-tuples with these | ||||||
|  | members: | ||||||
|  |  | ||||||
|  |     the token type (see token.py) | ||||||
|  |     the token (a string) | ||||||
|  |     the starting (row, column) indices of the token (a 2-tuple of ints) | ||||||
|  |     the ending (row, column) indices of the token (a 2-tuple of ints) | ||||||
|  |     the original line (string) | ||||||
|  |  | ||||||
|  | It is designed to match the working of the Python tokenizer exactly, except | ||||||
|  | that it produces COMMENT tokens for comments and gives type OP for all | ||||||
|  | operators.  Additionally, all token lists start with an ENCODING token | ||||||
|  | which tells you which encoding was used to decode the bytes stream. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | __author__ = 'Ka-Ping Yee <ping@lfw.org>' | ||||||
|  | __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' | ||||||
|  |                'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' | ||||||
|  |                'Michael Foord') | ||||||
|  | from builtins import open as _builtin_open | ||||||
|  | from codecs import lookup, BOM_UTF8 | ||||||
|  | import collections | ||||||
|  | from io import TextIOWrapper | ||||||
|  | from itertools import chain | ||||||
|  | import re | ||||||
|  | import sys | ||||||
|  | from token import * | ||||||
|  |  | ||||||
|  | cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) | ||||||
|  | blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) | ||||||
|  |  | ||||||
|  | import token | ||||||
|  | __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", | ||||||
|  |                            "NL", "untokenize", "ENCODING", "TokenInfo"] | ||||||
|  | del token | ||||||
|  |  | ||||||
|  | COMMENT = N_TOKENS | ||||||
|  | tok_name[COMMENT] = 'COMMENT' | ||||||
|  | NL = N_TOKENS + 1 | ||||||
|  | tok_name[NL] = 'NL' | ||||||
|  | ENCODING = N_TOKENS + 2 | ||||||
|  | tok_name[ENCODING] = 'ENCODING' | ||||||
|  | N_TOKENS += 3 | ||||||
|  | EXACT_TOKEN_TYPES = { | ||||||
|  |     '(':   LPAR, | ||||||
|  |     ')':   RPAR, | ||||||
|  |     '[':   LSQB, | ||||||
|  |     ']':   RSQB, | ||||||
|  |     ':':   COLON, | ||||||
|  |     ',':   COMMA, | ||||||
|  |     ';':   SEMI, | ||||||
|  |     '+':   PLUS, | ||||||
|  |     '-':   MINUS, | ||||||
|  |     '*':   STAR, | ||||||
|  |     '/':   SLASH, | ||||||
|  |     '|':   VBAR, | ||||||
|  |     '&':   AMPER, | ||||||
|  |     '<':   LESS, | ||||||
|  |     '>':   GREATER, | ||||||
|  |     '=':   EQUAL, | ||||||
|  |     '.':   DOT, | ||||||
|  |     '%':   PERCENT, | ||||||
|  |     '{':   LBRACE, | ||||||
|  |     '}':   RBRACE, | ||||||
|  |     '==':  EQEQUAL, | ||||||
|  |     '!=':  NOTEQUAL, | ||||||
|  |     '<=':  LESSEQUAL, | ||||||
|  |     '>=':  GREATEREQUAL, | ||||||
|  |     '~':   TILDE, | ||||||
|  |     '^':   CIRCUMFLEX, | ||||||
|  |     '<<':  LEFTSHIFT, | ||||||
|  |     '>>':  RIGHTSHIFT, | ||||||
|  |     '**':  DOUBLESTAR, | ||||||
|  |     '+=':  PLUSEQUAL, | ||||||
|  |     '-=':  MINEQUAL, | ||||||
|  |     '*=':  STAREQUAL, | ||||||
|  |     '/=':  SLASHEQUAL, | ||||||
|  |     '%=':  PERCENTEQUAL, | ||||||
|  |     '&=':  AMPEREQUAL, | ||||||
|  |     '|=':  VBAREQUAL, | ||||||
|  |     '^=': CIRCUMFLEXEQUAL, | ||||||
|  |     '<<=': LEFTSHIFTEQUAL, | ||||||
|  |     '>>=': RIGHTSHIFTEQUAL, | ||||||
|  |     '**=': DOUBLESTAREQUAL, | ||||||
|  |     '//':  DOUBLESLASH, | ||||||
|  |     '//=': DOUBLESLASHEQUAL, | ||||||
|  |     '@':   AT | ||||||
|  | } | ||||||
|  |  | ||||||
|  | class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): | ||||||
|  |     def __repr__(self): | ||||||
|  |         annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) | ||||||
|  |         return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % | ||||||
|  |                 self._replace(type=annotated_type)) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def exact_type(self): | ||||||
|  |         if self.type == OP and self.string in EXACT_TOKEN_TYPES: | ||||||
|  |             return EXACT_TOKEN_TYPES[self.string] | ||||||
|  |         else: | ||||||
|  |             return self.type | ||||||
|  |  | ||||||
|  | def group(*choices): return '(' + '|'.join(choices) + ')' | ||||||
|  | def any(*choices): return group(*choices) + '*' | ||||||
|  | def maybe(*choices): return group(*choices) + '?' | ||||||
|  |  | ||||||
|  | # Note: we use unicode matching for names ("\w") but ascii matching for | ||||||
|  | # number literals. | ||||||
|  | Whitespace = r'[ \f\t]*' | ||||||
|  | Comment = r'#[^\r\n]*' | ||||||
|  | Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) | ||||||
|  | Name = r'\w+' | ||||||
|  |  | ||||||
|  | Hexnumber = r'0[xX][0-9a-fA-F]+' | ||||||
|  | Binnumber = r'0[bB][01]+' | ||||||
|  | Octnumber = r'0[oO][0-7]+' | ||||||
|  | Decnumber = r'(?:0+|[1-9][0-9]*)' | ||||||
|  | Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) | ||||||
|  | Exponent = r'[eE][-+]?[0-9]+' | ||||||
|  | Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) | ||||||
|  | Expfloat = r'[0-9]+' + Exponent | ||||||
|  | Floatnumber = group(Pointfloat, Expfloat) | ||||||
|  | Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') | ||||||
|  | Number = group(Imagnumber, Floatnumber, Intnumber) | ||||||
|  |  | ||||||
|  | StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' | ||||||
|  |  | ||||||
|  | # Tail end of ' string. | ||||||
|  | Single = r"[^'\\]*(?:\\.[^'\\]*)*'" | ||||||
|  | # Tail end of " string. | ||||||
|  | Double = r'[^"\\]*(?:\\.[^"\\]*)*"' | ||||||
|  | # Tail end of ''' string. | ||||||
|  | Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" | ||||||
|  | # Tail end of """ string. | ||||||
|  | Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' | ||||||
|  | Triple = group(StringPrefix + "'''", StringPrefix + '"""') | ||||||
|  | # Single-line ' or " string. | ||||||
|  | String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", | ||||||
|  |                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') | ||||||
|  |  | ||||||
|  | # Because of leftmost-then-longest match semantics, be sure to put the | ||||||
|  | # longest operators first (e.g., if = came before ==, == would get | ||||||
|  | # recognized as two instances of =). | ||||||
|  | Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", | ||||||
|  |                  r"//=?", r"->", | ||||||
|  |                  r"[+\-*/%&|^=<>]=?", | ||||||
|  |                  r"~") | ||||||
|  |  | ||||||
|  | Bracket = '[][(){}]' | ||||||
|  | Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') | ||||||
|  | Funny = group(Operator, Bracket, Special) | ||||||
|  |  | ||||||
|  | PlainToken = group(Number, Funny, String, Name) | ||||||
|  | Token = Ignore + PlainToken | ||||||
|  |  | ||||||
|  | # First (or only) line of ' or " string. | ||||||
|  | ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + | ||||||
|  |                 group("'", r'\\\r?\n'), | ||||||
|  |                 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + | ||||||
|  |                 group('"', r'\\\r?\n')) | ||||||
|  | PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) | ||||||
|  | PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) | ||||||
|  |  | ||||||
|  | def _compile(expr): | ||||||
|  |     return re.compile(expr, re.UNICODE) | ||||||
|  |  | ||||||
|  | endpats = {"'": Single, '"': Double, | ||||||
|  |            "'''": Single3, '"""': Double3, | ||||||
|  |            "r'''": Single3, 'r"""': Double3, | ||||||
|  |            "b'''": Single3, 'b"""': Double3, | ||||||
|  |            "R'''": Single3, 'R"""': Double3, | ||||||
|  |            "B'''": Single3, 'B"""': Double3, | ||||||
|  |            "br'''": Single3, 'br"""': Double3, | ||||||
|  |            "bR'''": Single3, 'bR"""': Double3, | ||||||
|  |            "Br'''": Single3, 'Br"""': Double3, | ||||||
|  |            "BR'''": Single3, 'BR"""': Double3, | ||||||
|  |            "rb'''": Single3, 'rb"""': Double3, | ||||||
|  |            "Rb'''": Single3, 'Rb"""': Double3, | ||||||
|  |            "rB'''": Single3, 'rB"""': Double3, | ||||||
|  |            "RB'''": Single3, 'RB"""': Double3, | ||||||
|  |            "u'''": Single3, 'u"""': Double3, | ||||||
|  |            "R'''": Single3, 'R"""': Double3, | ||||||
|  |            "U'''": Single3, 'U"""': Double3, | ||||||
|  |            'r': None, 'R': None, 'b': None, 'B': None, | ||||||
|  |            'u': None, 'U': None} | ||||||
|  |  | ||||||
|  | triple_quoted = {} | ||||||
|  | for t in ("'''", '"""', | ||||||
|  |           "r'''", 'r"""', "R'''", 'R"""', | ||||||
|  |           "b'''", 'b"""', "B'''", 'B"""', | ||||||
|  |           "br'''", 'br"""', "Br'''", 'Br"""', | ||||||
|  |           "bR'''", 'bR"""', "BR'''", 'BR"""', | ||||||
|  |           "rb'''", 'rb"""', "rB'''", 'rB"""', | ||||||
|  |           "Rb'''", 'Rb"""', "RB'''", 'RB"""', | ||||||
|  |           "u'''", 'u"""', "U'''", 'U"""', | ||||||
|  |           ): | ||||||
|  |     triple_quoted[t] = t | ||||||
|  | single_quoted = {} | ||||||
|  | for t in ("'", '"', | ||||||
|  |           "r'", 'r"', "R'", 'R"', | ||||||
|  |           "b'", 'b"', "B'", 'B"', | ||||||
|  |           "br'", 'br"', "Br'", 'Br"', | ||||||
|  |           "bR'", 'bR"', "BR'", 'BR"' , | ||||||
|  |           "rb'", 'rb"', "rB'", 'rB"', | ||||||
|  |           "Rb'", 'Rb"', "RB'", 'RB"' , | ||||||
|  |           "u'", 'u"', "U'", 'U"', | ||||||
|  |           ): | ||||||
|  |     single_quoted[t] = t | ||||||
|  |  | ||||||
|  | tabsize = 8 | ||||||
|  |  | ||||||
|  | class TokenError(Exception): pass | ||||||
|  |  | ||||||
|  | class StopTokenizing(Exception): pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Untokenizer: | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         self.tokens = [] | ||||||
|  |         self.prev_row = 1 | ||||||
|  |         self.prev_col = 0 | ||||||
|  |         self.encoding = None | ||||||
|  |  | ||||||
|  |     def add_whitespace(self, start): | ||||||
|  |         row, col = start | ||||||
|  |         if row < self.prev_row or row == self.prev_row and col < self.prev_col: | ||||||
|  |             raise ValueError("start ({},{}) precedes previous end ({},{})" | ||||||
|  |                              .format(row, col, self.prev_row, self.prev_col)) | ||||||
|  |         row_offset = row - self.prev_row | ||||||
|  |         if row_offset: | ||||||
|  |             self.tokens.append("\\\n" * row_offset) | ||||||
|  |             self.prev_col = 0 | ||||||
|  |         col_offset = col - self.prev_col | ||||||
|  |         if col_offset: | ||||||
|  |             self.tokens.append(" " * col_offset) | ||||||
|  |  | ||||||
|  |     def untokenize(self, iterable): | ||||||
|  |         it = iter(iterable) | ||||||
|  |         indents = [] | ||||||
|  |         startline = False | ||||||
|  |         for t in it: | ||||||
|  |             if len(t) == 2: | ||||||
|  |                 self.compat(t, it) | ||||||
|  |                 break | ||||||
|  |             tok_type, token, start, end, line = t | ||||||
|  |             if tok_type == ENCODING: | ||||||
|  |                 self.encoding = token | ||||||
|  |                 continue | ||||||
|  |             if tok_type == ENDMARKER: | ||||||
|  |                 break | ||||||
|  |             if tok_type == INDENT: | ||||||
|  |                 indents.append(token) | ||||||
|  |                 continue | ||||||
|  |             elif tok_type == DEDENT: | ||||||
|  |                 indents.pop() | ||||||
|  |                 self.prev_row, self.prev_col = end | ||||||
|  |                 continue | ||||||
|  |             elif tok_type in (NEWLINE, NL): | ||||||
|  |                 startline = True | ||||||
|  |             elif startline and indents: | ||||||
|  |                 indent = indents[-1] | ||||||
|  |                 if start[1] >= len(indent): | ||||||
|  |                     self.tokens.append(indent) | ||||||
|  |                     self.prev_col = len(indent) | ||||||
|  |                 startline = False | ||||||
|  |             self.add_whitespace(start) | ||||||
|  |             self.tokens.append(token) | ||||||
|  |             self.prev_row, self.prev_col = end | ||||||
|  |             if tok_type in (NEWLINE, NL): | ||||||
|  |                 self.prev_row += 1 | ||||||
|  |                 self.prev_col = 0 | ||||||
|  |         return "".join(self.tokens) | ||||||
|  |  | ||||||
|  |     def compat(self, token, iterable): | ||||||
|  |         indents = [] | ||||||
|  |         toks_append = self.tokens.append | ||||||
|  |         startline = token[0] in (NEWLINE, NL) | ||||||
|  |         prevstring = False | ||||||
|  |  | ||||||
|  |         for tok in chain([token], iterable): | ||||||
|  |             toknum, tokval = tok[:2] | ||||||
|  |             if toknum == ENCODING: | ||||||
|  |                 self.encoding = tokval | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |             if toknum in (NAME, NUMBER): | ||||||
|  |                 tokval += ' ' | ||||||
|  |  | ||||||
|  |             # Insert a space between two consecutive strings | ||||||
|  |             if toknum == STRING: | ||||||
|  |                 if prevstring: | ||||||
|  |                     tokval = ' ' + tokval | ||||||
|  |                 prevstring = True | ||||||
|  |             else: | ||||||
|  |                 prevstring = False | ||||||
|  |  | ||||||
|  |             if toknum == INDENT: | ||||||
|  |                 indents.append(tokval) | ||||||
|  |                 continue | ||||||
|  |             elif toknum == DEDENT: | ||||||
|  |                 indents.pop() | ||||||
|  |                 continue | ||||||
|  |             elif toknum in (NEWLINE, NL): | ||||||
|  |                 startline = True | ||||||
|  |             elif startline and indents: | ||||||
|  |                 toks_append(indents[-1]) | ||||||
|  |                 startline = False | ||||||
|  |             toks_append(tokval) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def untokenize(iterable): | ||||||
|  |     """Transform tokens back into Python source code. | ||||||
|  |     It returns a bytes object, encoded using the ENCODING | ||||||
|  |     token, which is the first token sequence output by tokenize. | ||||||
|  |  | ||||||
|  |     Each element returned by the iterable must be a token sequence | ||||||
|  |     with at least two elements, a token number and token value.  If | ||||||
|  |     only two tokens are passed, the resulting output is poor. | ||||||
|  |  | ||||||
|  |     Round-trip invariant for full input: | ||||||
|  |         Untokenized source will match input source exactly | ||||||
|  |  | ||||||
|  |     Round-trip invariant for limited intput: | ||||||
|  |         # Output bytes will tokenize the back to the input | ||||||
|  |         t1 = [tok[:2] for tok in tokenize(f.readline)] | ||||||
|  |         newcode = untokenize(t1) | ||||||
|  |         readline = BytesIO(newcode).readline | ||||||
|  |         t2 = [tok[:2] for tok in tokenize(readline)] | ||||||
|  |         assert t1 == t2 | ||||||
|  |     """ | ||||||
|  |     ut = Untokenizer() | ||||||
|  |     out = ut.untokenize(iterable) | ||||||
|  |     if ut.encoding is not None: | ||||||
|  |         out = out.encode(ut.encoding) | ||||||
|  |     return out | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _get_normal_name(orig_enc): | ||||||
|  |     """Imitates get_normal_name in tokenizer.c.""" | ||||||
|  |     # Only care about the first 12 characters. | ||||||
|  |     enc = orig_enc[:12].lower().replace("_", "-") | ||||||
|  |     if enc == "utf-8" or enc.startswith("utf-8-"): | ||||||
|  |         return "utf-8" | ||||||
|  |     if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ | ||||||
|  |        enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): | ||||||
|  |         return "iso-8859-1" | ||||||
|  |     return orig_enc | ||||||
|  |  | ||||||
|  | def detect_encoding(readline): | ||||||
|  |     """ | ||||||
|  |     The detect_encoding() function is used to detect the encoding that should | ||||||
|  |     be used to decode a Python source file.  It requires one argument, readline, | ||||||
|  |     in the same way as the tokenize() generator. | ||||||
|  |  | ||||||
|  |     It will call readline a maximum of twice, and return the encoding used | ||||||
|  |     (as a string) and a list of any lines (left as bytes) it has read in. | ||||||
|  |  | ||||||
|  |     It detects the encoding from the presence of a utf-8 bom or an encoding | ||||||
|  |     cookie as specified in pep-0263.  If both a bom and a cookie are present, | ||||||
|  |     but disagree, a SyntaxError will be raised.  If the encoding cookie is an | ||||||
|  |     invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found, | ||||||
|  |     'utf-8-sig' is returned. | ||||||
|  |  | ||||||
|  |     If no encoding is specified, then the default of 'utf-8' will be returned. | ||||||
|  |     """ | ||||||
|  |     try: | ||||||
|  |         filename = readline.__self__.name | ||||||
|  |     except AttributeError: | ||||||
|  |         filename = None | ||||||
|  |     bom_found = False | ||||||
|  |     encoding = None | ||||||
|  |     default = 'utf-8' | ||||||
|  |     def read_or_stop(): | ||||||
|  |         try: | ||||||
|  |             return readline() | ||||||
|  |         except StopIteration: | ||||||
|  |             return b'' | ||||||
|  |  | ||||||
|  |     def find_cookie(line): | ||||||
|  |         try: | ||||||
|  |             # Decode as UTF-8. Either the line is an encoding declaration, | ||||||
|  |             # in which case it should be pure ASCII, or it must be UTF-8 | ||||||
|  |             # per default encoding. | ||||||
|  |             line_string = line.decode('utf-8') | ||||||
|  |         except UnicodeDecodeError: | ||||||
|  |             msg = "invalid or missing encoding declaration" | ||||||
|  |             if filename is not None: | ||||||
|  |                 msg = '{} for {!r}'.format(msg, filename) | ||||||
|  |             raise SyntaxError(msg) | ||||||
|  |  | ||||||
|  |         match = cookie_re.match(line_string) | ||||||
|  |         if not match: | ||||||
|  |             return None | ||||||
|  |         encoding = _get_normal_name(match.group(1)) | ||||||
|  |         try: | ||||||
|  |             codec = lookup(encoding) | ||||||
|  |         except LookupError: | ||||||
|  |             # This behaviour mimics the Python interpreter | ||||||
|  |             if filename is None: | ||||||
|  |                 msg = "unknown encoding: " + encoding | ||||||
|  |             else: | ||||||
|  |                 msg = "unknown encoding for {!r}: {}".format(filename, | ||||||
|  |                         encoding) | ||||||
|  |             raise SyntaxError(msg) | ||||||
|  |  | ||||||
|  |         if bom_found: | ||||||
|  |             if encoding != 'utf-8': | ||||||
|  |                 # This behaviour mimics the Python interpreter | ||||||
|  |                 if filename is None: | ||||||
|  |                     msg = 'encoding problem: utf-8' | ||||||
|  |                 else: | ||||||
|  |                     msg = 'encoding problem for {!r}: utf-8'.format(filename) | ||||||
|  |                 raise SyntaxError(msg) | ||||||
|  |             encoding += '-sig' | ||||||
|  |         return encoding | ||||||
|  |  | ||||||
|  |     first = read_or_stop() | ||||||
|  |     if first.startswith(BOM_UTF8): | ||||||
|  |         bom_found = True | ||||||
|  |         first = first[3:] | ||||||
|  |         default = 'utf-8-sig' | ||||||
|  |     if not first: | ||||||
|  |         return default, [] | ||||||
|  |  | ||||||
|  |     encoding = find_cookie(first) | ||||||
|  |     if encoding: | ||||||
|  |         return encoding, [first] | ||||||
|  |     if not blank_re.match(first): | ||||||
|  |         return default, [first] | ||||||
|  |  | ||||||
|  |     second = read_or_stop() | ||||||
|  |     if not second: | ||||||
|  |         return default, [first] | ||||||
|  |  | ||||||
|  |     encoding = find_cookie(second) | ||||||
|  |     if encoding: | ||||||
|  |         return encoding, [first, second] | ||||||
|  |  | ||||||
|  |     return default, [first, second] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def open(filename): | ||||||
|  |     """Open a file in read only mode using the encoding detected by | ||||||
|  |     detect_encoding(). | ||||||
|  |     """ | ||||||
|  |     buffer = _builtin_open(filename, 'rb') | ||||||
|  |     try: | ||||||
|  |         encoding, lines = detect_encoding(buffer.readline) | ||||||
|  |         buffer.seek(0) | ||||||
|  |         text = TextIOWrapper(buffer, encoding, line_buffering=True) | ||||||
|  |         text.mode = 'r' | ||||||
|  |         return text | ||||||
|  |     except: | ||||||
|  |         buffer.close() | ||||||
|  |         raise | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def tokenize(readline): | ||||||
|  |     """ | ||||||
|  |     The tokenize() generator requires one argment, readline, which | ||||||
|  |     must be a callable object which provides the same interface as the | ||||||
|  |     readline() method of built-in file objects.  Each call to the function | ||||||
|  |     should return one line of input as bytes.  Alternately, readline | ||||||
|  |     can be a callable function terminating with StopIteration: | ||||||
|  |         readline = open(myfile, 'rb').__next__  # Example of alternate readline | ||||||
|  |  | ||||||
|  |     The generator produces 5-tuples with these members: the token type; the | ||||||
|  |     token string; a 2-tuple (srow, scol) of ints specifying the row and | ||||||
|  |     column where the token begins in the source; a 2-tuple (erow, ecol) of | ||||||
|  |     ints specifying the row and column where the token ends in the source; | ||||||
|  |     and the line on which the token was found.  The line passed is the | ||||||
|  |     logical line; continuation lines are included. | ||||||
|  |  | ||||||
|  |     The first token sequence will always be an ENCODING token | ||||||
|  |     which tells you which encoding was used to decode the bytes stream. | ||||||
|  |     """ | ||||||
|  |     # This import is here to avoid problems when the itertools module is not | ||||||
|  |     # built yet and tokenize is imported. | ||||||
|  |     from itertools import chain, repeat | ||||||
|  |     encoding, consumed = detect_encoding(readline) | ||||||
|  |     rl_gen = iter(readline, b"") | ||||||
|  |     empty = repeat(b"") | ||||||
|  |     return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _tokenize(readline, encoding): | ||||||
|  |     lnum = parenlev = continued = 0 | ||||||
|  |     numchars = '0123456789' | ||||||
|  |     contstr, needcont = '', 0 | ||||||
|  |     contline = None | ||||||
|  |     indents = [0] | ||||||
|  |  | ||||||
|  |     if encoding is not None: | ||||||
|  |         if encoding == "utf-8-sig": | ||||||
|  |             # BOM will already have been stripped. | ||||||
|  |             encoding = "utf-8" | ||||||
|  |         yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') | ||||||
|  |     while True:             # loop over lines in stream | ||||||
|  |         try: | ||||||
|  |             line = readline() | ||||||
|  |         except StopIteration: | ||||||
|  |             line = b'' | ||||||
|  |  | ||||||
|  |         if encoding is not None: | ||||||
|  |             line = line.decode(encoding) | ||||||
|  |         lnum += 1 | ||||||
|  |         pos, max = 0, len(line) | ||||||
|  |  | ||||||
|  |         if contstr:                            # continued string | ||||||
|  |             if not line: | ||||||
|  |                 raise TokenError("EOF in multi-line string", strstart) | ||||||
|  |             endmatch = endprog.match(line) | ||||||
|  |             if endmatch: | ||||||
|  |                 pos = end = endmatch.end(0) | ||||||
|  |                 yield TokenInfo(STRING, contstr + line[:end], | ||||||
|  |                        strstart, (lnum, end), contline + line) | ||||||
|  |                 contstr, needcont = '', 0 | ||||||
|  |                 contline = None | ||||||
|  |             elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': | ||||||
|  |                 yield TokenInfo(ERRORTOKEN, contstr + line, | ||||||
|  |                            strstart, (lnum, len(line)), contline) | ||||||
|  |                 contstr = '' | ||||||
|  |                 contline = None | ||||||
|  |                 continue | ||||||
|  |             else: | ||||||
|  |                 contstr = contstr + line | ||||||
|  |                 contline = contline + line | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |         elif parenlev == 0 and not continued:  # new statement | ||||||
|  |             if not line: break | ||||||
|  |             column = 0 | ||||||
|  |             while pos < max:                   # measure leading whitespace | ||||||
|  |                 if line[pos] == ' ': | ||||||
|  |                     column += 1 | ||||||
|  |                 elif line[pos] == '\t': | ||||||
|  |                     column = (column//tabsize + 1)*tabsize | ||||||
|  |                 elif line[pos] == '\f': | ||||||
|  |                     column = 0 | ||||||
|  |                 else: | ||||||
|  |                     break | ||||||
|  |                 pos += 1 | ||||||
|  |             if pos == max: | ||||||
|  |                 break | ||||||
|  |  | ||||||
|  |             if line[pos] in '#\r\n':           # skip comments or blank lines | ||||||
|  |                 if line[pos] == '#': | ||||||
|  |                     comment_token = line[pos:].rstrip('\r\n') | ||||||
|  |                     nl_pos = pos + len(comment_token) | ||||||
|  |                     yield TokenInfo(COMMENT, comment_token, | ||||||
|  |                            (lnum, pos), (lnum, pos + len(comment_token)), line) | ||||||
|  |                     yield TokenInfo(NL, line[nl_pos:], | ||||||
|  |                            (lnum, nl_pos), (lnum, len(line)), line) | ||||||
|  |                 else: | ||||||
|  |                     yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:], | ||||||
|  |                            (lnum, pos), (lnum, len(line)), line) | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |             if column > indents[-1]:           # count indents or dedents | ||||||
|  |                 indents.append(column) | ||||||
|  |                 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) | ||||||
|  |             while column < indents[-1]: | ||||||
|  |                 if column not in indents: | ||||||
|  |                     raise IndentationError( | ||||||
|  |                         "unindent does not match any outer indentation level", | ||||||
|  |                         ("<tokenize>", lnum, pos, line)) | ||||||
|  |                 indents = indents[:-1] | ||||||
|  |                 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) | ||||||
|  |  | ||||||
|  |         else:                                  # continued statement | ||||||
|  |             if not line: | ||||||
|  |                 raise TokenError("EOF in multi-line statement", (lnum, 0)) | ||||||
|  |             continued = 0 | ||||||
|  |  | ||||||
|  |         while pos < max: | ||||||
|  |             pseudomatch = _compile(PseudoToken).match(line, pos) | ||||||
|  |             if pseudomatch:                                # scan for tokens | ||||||
|  |                 start, end = pseudomatch.span(1) | ||||||
|  |                 spos, epos, pos = (lnum, start), (lnum, end), end | ||||||
|  |                 if start == end: | ||||||
|  |                     continue | ||||||
|  |                 token, initial = line[start:end], line[start] | ||||||
|  |  | ||||||
|  |                 if (initial in numchars or                  # ordinary number | ||||||
|  |                     (initial == '.' and token != '.' and token != '...')): | ||||||
|  |                     yield TokenInfo(NUMBER, token, spos, epos, line) | ||||||
|  |                 elif initial in '\r\n': | ||||||
|  |                     yield TokenInfo(NL if parenlev > 0 else NEWLINE, | ||||||
|  |                            token, spos, epos, line) | ||||||
|  |                 elif initial == '#': | ||||||
|  |                     assert not token.endswith("\n") | ||||||
|  |                     yield TokenInfo(COMMENT, token, spos, epos, line) | ||||||
|  |                 elif token in triple_quoted: | ||||||
|  |                     endprog = _compile(endpats[token]) | ||||||
|  |                     endmatch = endprog.match(line, pos) | ||||||
|  |                     if endmatch:                           # all on one line | ||||||
|  |                         pos = endmatch.end(0) | ||||||
|  |                         token = line[start:pos] | ||||||
|  |                         yield TokenInfo(STRING, token, spos, (lnum, pos), line) | ||||||
|  |                     else: | ||||||
|  |                         strstart = (lnum, start)           # multiple lines | ||||||
|  |                         contstr = line[start:] | ||||||
|  |                         contline = line | ||||||
|  |                         break | ||||||
|  |                 elif initial in single_quoted or \ | ||||||
|  |                     token[:2] in single_quoted or \ | ||||||
|  |                     token[:3] in single_quoted: | ||||||
|  |                     if token[-1] == '\n':                  # continued string | ||||||
|  |                         strstart = (lnum, start) | ||||||
|  |                         endprog = _compile(endpats[initial] or | ||||||
|  |                                            endpats[token[1]] or | ||||||
|  |                                            endpats[token[2]]) | ||||||
|  |                         contstr, needcont = line[start:], 1 | ||||||
|  |                         contline = line | ||||||
|  |                         break | ||||||
|  |                     else:                                  # ordinary string | ||||||
|  |                         yield TokenInfo(STRING, token, spos, epos, line) | ||||||
|  |                 elif initial.isidentifier():               # ordinary name | ||||||
|  |                     yield TokenInfo(NAME, token, spos, epos, line) | ||||||
|  |                 elif initial == '\\':                      # continued stmt | ||||||
|  |                     continued = 1 | ||||||
|  |                 else: | ||||||
|  |                     if initial in '([{': | ||||||
|  |                         parenlev += 1 | ||||||
|  |                     elif initial in ')]}': | ||||||
|  |                         parenlev -= 1 | ||||||
|  |                     yield TokenInfo(OP, token, spos, epos, line) | ||||||
|  |             else: | ||||||
|  |                 yield TokenInfo(ERRORTOKEN, line[pos], | ||||||
|  |                            (lnum, pos), (lnum, pos+1), line) | ||||||
|  |                 pos += 1 | ||||||
|  |  | ||||||
|  |     for indent in indents[1:]:                 # pop remaining indent levels | ||||||
|  |         yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') | ||||||
|  |     yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # An undocumented, backwards compatible, API for all the places in the standard | ||||||
|  | # library that expect to be able to use tokenize with strings | ||||||
|  | def generate_tokens(readline): | ||||||
|  |     return _tokenize(readline, None) | ||||||
|  |  | ||||||
|  | def main(): | ||||||
|  |     import argparse | ||||||
|  |  | ||||||
|  |     # Helper error handling routines | ||||||
|  |     def perror(message): | ||||||
|  |         print(message, file=sys.stderr) | ||||||
|  |  | ||||||
|  |     def error(message, filename=None, location=None): | ||||||
|  |         if location: | ||||||
|  |             args = (filename,) + location + (message,) | ||||||
|  |             perror("%s:%d:%d: error: %s" % args) | ||||||
|  |         elif filename: | ||||||
|  |             perror("%s: error: %s" % (filename, message)) | ||||||
|  |         else: | ||||||
|  |             perror("error: %s" % message) | ||||||
|  |         sys.exit(1) | ||||||
|  |  | ||||||
|  |     # Parse the arguments and options | ||||||
|  |     parser = argparse.ArgumentParser(prog='python -m tokenize') | ||||||
|  |     parser.add_argument(dest='filename', nargs='?', | ||||||
|  |                         metavar='filename.py', | ||||||
|  |                         help='the file to tokenize; defaults to stdin') | ||||||
|  |     parser.add_argument('-e', '--exact', dest='exact', action='store_true', | ||||||
|  |                         help='display token names using the exact type') | ||||||
|  |     args = parser.parse_args() | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         # Tokenize the input | ||||||
|  |         if args.filename: | ||||||
|  |             filename = args.filename | ||||||
|  |             with _builtin_open(filename, 'rb') as f: | ||||||
|  |                 tokens = list(tokenize(f.readline)) | ||||||
|  |         else: | ||||||
|  |             filename = "<stdin>" | ||||||
|  |             tokens = _tokenize(sys.stdin.readline, None) | ||||||
|  |  | ||||||
|  |         # Output the tokenization | ||||||
|  |         for token in tokens: | ||||||
|  |             token_type = token.type | ||||||
|  |             if args.exact: | ||||||
|  |                 token_type = token.exact_type | ||||||
|  |             token_range = "%d,%d-%d,%d:" % (token.start + token.end) | ||||||
|  |             print("%-20s%-15s%-15r" % | ||||||
|  |                   (token_range, tok_name[token_type], token.string)) | ||||||
|  |     except IndentationError as err: | ||||||
|  |         line, column = err.args[1][1:3] | ||||||
|  |         error(err.args[0], filename, (line, column)) | ||||||
|  |     except TokenError as err: | ||||||
|  |         line, column = err.args[1] | ||||||
|  |         error(err.args[0], filename, (line, column)) | ||||||
|  |     except SyntaxError as err: | ||||||
|  |         error(err, filename) | ||||||
|  |     except OSError as err: | ||||||
|  |         error(err) | ||||||
|  |     except KeyboardInterrupt: | ||||||
|  |         print("interrupted\n") | ||||||
|  |     except Exception as err: | ||||||
|  |         perror("unexpected error: %s" % err) | ||||||
|  |         raise | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     main() | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/types.py |  | ||||||
							
								
								
									
										161
									
								
								v1/flask/lib/python3.4/types.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										161
									
								
								v1/flask/lib/python3.4/types.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,161 @@ | |||||||
|  | """ | ||||||
|  | Define names for built-in types that aren't directly accessible as a builtin. | ||||||
|  | """ | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | # Iterators in Python aren't a matter of type but of protocol.  A large | ||||||
|  | # and changing number of builtin types implement *some* flavor of | ||||||
|  | # iterator.  Don't check the type!  Use hasattr to check for both | ||||||
|  | # "__iter__" and "__next__" attributes instead. | ||||||
|  |  | ||||||
|  | def _f(): pass | ||||||
|  | FunctionType = type(_f) | ||||||
|  | LambdaType = type(lambda: None)         # Same as FunctionType | ||||||
|  | CodeType = type(_f.__code__) | ||||||
|  | MappingProxyType = type(type.__dict__) | ||||||
|  | SimpleNamespace = type(sys.implementation) | ||||||
|  |  | ||||||
|  | def _g(): | ||||||
|  |     yield 1 | ||||||
|  | GeneratorType = type(_g()) | ||||||
|  |  | ||||||
|  | class _C: | ||||||
|  |     def _m(self): pass | ||||||
|  | MethodType = type(_C()._m) | ||||||
|  |  | ||||||
|  | BuiltinFunctionType = type(len) | ||||||
|  | BuiltinMethodType = type([].append)     # Same as BuiltinFunctionType | ||||||
|  |  | ||||||
|  | ModuleType = type(sys) | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     raise TypeError | ||||||
|  | except TypeError: | ||||||
|  |     tb = sys.exc_info()[2] | ||||||
|  |     TracebackType = type(tb) | ||||||
|  |     FrameType = type(tb.tb_frame) | ||||||
|  |     tb = None; del tb | ||||||
|  |  | ||||||
|  | # For Jython, the following two types are identical | ||||||
|  | GetSetDescriptorType = type(FunctionType.__code__) | ||||||
|  | MemberDescriptorType = type(FunctionType.__globals__) | ||||||
|  |  | ||||||
|  | del sys, _f, _g, _C,                              # Not for export | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Provide a PEP 3115 compliant mechanism for class creation | ||||||
|  | def new_class(name, bases=(), kwds=None, exec_body=None): | ||||||
|  |     """Create a class object dynamically using the appropriate metaclass.""" | ||||||
|  |     meta, ns, kwds = prepare_class(name, bases, kwds) | ||||||
|  |     if exec_body is not None: | ||||||
|  |         exec_body(ns) | ||||||
|  |     return meta(name, bases, ns, **kwds) | ||||||
|  |  | ||||||
|  | def prepare_class(name, bases=(), kwds=None): | ||||||
|  |     """Call the __prepare__ method of the appropriate metaclass. | ||||||
|  |  | ||||||
|  |     Returns (metaclass, namespace, kwds) as a 3-tuple | ||||||
|  |  | ||||||
|  |     *metaclass* is the appropriate metaclass | ||||||
|  |     *namespace* is the prepared class namespace | ||||||
|  |     *kwds* is an updated copy of the passed in kwds argument with any | ||||||
|  |     'metaclass' entry removed. If no kwds argument is passed in, this will | ||||||
|  |     be an empty dict. | ||||||
|  |     """ | ||||||
|  |     if kwds is None: | ||||||
|  |         kwds = {} | ||||||
|  |     else: | ||||||
|  |         kwds = dict(kwds) # Don't alter the provided mapping | ||||||
|  |     if 'metaclass' in kwds: | ||||||
|  |         meta = kwds.pop('metaclass') | ||||||
|  |     else: | ||||||
|  |         if bases: | ||||||
|  |             meta = type(bases[0]) | ||||||
|  |         else: | ||||||
|  |             meta = type | ||||||
|  |     if isinstance(meta, type): | ||||||
|  |         # when meta is a type, we first determine the most-derived metaclass | ||||||
|  |         # instead of invoking the initial candidate directly | ||||||
|  |         meta = _calculate_meta(meta, bases) | ||||||
|  |     if hasattr(meta, '__prepare__'): | ||||||
|  |         ns = meta.__prepare__(name, bases, **kwds) | ||||||
|  |     else: | ||||||
|  |         ns = {} | ||||||
|  |     return meta, ns, kwds | ||||||
|  |  | ||||||
|  | def _calculate_meta(meta, bases): | ||||||
|  |     """Calculate the most derived metaclass.""" | ||||||
|  |     winner = meta | ||||||
|  |     for base in bases: | ||||||
|  |         base_meta = type(base) | ||||||
|  |         if issubclass(winner, base_meta): | ||||||
|  |             continue | ||||||
|  |         if issubclass(base_meta, winner): | ||||||
|  |             winner = base_meta | ||||||
|  |             continue | ||||||
|  |         # else: | ||||||
|  |         raise TypeError("metaclass conflict: " | ||||||
|  |                         "the metaclass of a derived class " | ||||||
|  |                         "must be a (non-strict) subclass " | ||||||
|  |                         "of the metaclasses of all its bases") | ||||||
|  |     return winner | ||||||
|  |  | ||||||
|  | class DynamicClassAttribute: | ||||||
|  |     """Route attribute access on a class to __getattr__. | ||||||
|  |  | ||||||
|  |     This is a descriptor, used to define attributes that act differently when | ||||||
|  |     accessed through an instance and through a class.  Instance access remains | ||||||
|  |     normal, but access to an attribute through a class will be routed to the | ||||||
|  |     class's __getattr__ method; this is done by raising AttributeError. | ||||||
|  |  | ||||||
|  |     This allows one to have properties active on an instance, and have virtual | ||||||
|  |     attributes on the class with the same name (see Enum for an example). | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def __init__(self, fget=None, fset=None, fdel=None, doc=None): | ||||||
|  |         self.fget = fget | ||||||
|  |         self.fset = fset | ||||||
|  |         self.fdel = fdel | ||||||
|  |         # next two lines make DynamicClassAttribute act the same as property | ||||||
|  |         self.__doc__ = doc or fget.__doc__ | ||||||
|  |         self.overwrite_doc = doc is None | ||||||
|  |         # support for abstract methods | ||||||
|  |         self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False)) | ||||||
|  |  | ||||||
|  |     def __get__(self, instance, ownerclass=None): | ||||||
|  |         if instance is None: | ||||||
|  |             if self.__isabstractmethod__: | ||||||
|  |                 return self | ||||||
|  |             raise AttributeError() | ||||||
|  |         elif self.fget is None: | ||||||
|  |             raise AttributeError("unreadable attribute") | ||||||
|  |         return self.fget(instance) | ||||||
|  |  | ||||||
|  |     def __set__(self, instance, value): | ||||||
|  |         if self.fset is None: | ||||||
|  |             raise AttributeError("can't set attribute") | ||||||
|  |         self.fset(instance, value) | ||||||
|  |  | ||||||
|  |     def __delete__(self, instance): | ||||||
|  |         if self.fdel is None: | ||||||
|  |             raise AttributeError("can't delete attribute") | ||||||
|  |         self.fdel(instance) | ||||||
|  |  | ||||||
|  |     def getter(self, fget): | ||||||
|  |         fdoc = fget.__doc__ if self.overwrite_doc else None | ||||||
|  |         result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__) | ||||||
|  |         result.overwrite_doc = self.overwrite_doc | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  |     def setter(self, fset): | ||||||
|  |         result = type(self)(self.fget, fset, self.fdel, self.__doc__) | ||||||
|  |         result.overwrite_doc = self.overwrite_doc | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  |     def deleter(self, fdel): | ||||||
|  |         result = type(self)(self.fget, self.fset, fdel, self.__doc__) | ||||||
|  |         result.overwrite_doc = self.overwrite_doc | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __all__ = [n for n in globals() if n[:1] != '_'] | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/warnings.py |  | ||||||
							
								
								
									
										410
									
								
								v1/flask/lib/python3.4/warnings.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										410
									
								
								v1/flask/lib/python3.4/warnings.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,410 @@ | |||||||
|  | """Python part of the warnings subsystem.""" | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | __all__ = ["warn", "warn_explicit", "showwarning", | ||||||
|  |            "formatwarning", "filterwarnings", "simplefilter", | ||||||
|  |            "resetwarnings", "catch_warnings"] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def showwarning(message, category, filename, lineno, file=None, line=None): | ||||||
|  |     """Hook to write a warning to a file; replace if you like.""" | ||||||
|  |     if file is None: | ||||||
|  |         file = sys.stderr | ||||||
|  |         if file is None: | ||||||
|  |             # sys.stderr is None when run with pythonw.exe - warnings get lost | ||||||
|  |             return | ||||||
|  |     try: | ||||||
|  |         file.write(formatwarning(message, category, filename, lineno, line)) | ||||||
|  |     except OSError: | ||||||
|  |         pass # the file (probably stderr) is invalid - this warning gets lost. | ||||||
|  |  | ||||||
|  | def formatwarning(message, category, filename, lineno, line=None): | ||||||
|  |     """Function to format a warning the standard way.""" | ||||||
|  |     import linecache | ||||||
|  |     s =  "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message) | ||||||
|  |     line = linecache.getline(filename, lineno) if line is None else line | ||||||
|  |     if line: | ||||||
|  |         line = line.strip() | ||||||
|  |         s += "  %s\n" % line | ||||||
|  |     return s | ||||||
|  |  | ||||||
|  | def filterwarnings(action, message="", category=Warning, module="", lineno=0, | ||||||
|  |                    append=False): | ||||||
|  |     """Insert an entry into the list of warnings filters (at the front). | ||||||
|  |  | ||||||
|  |     'action' -- one of "error", "ignore", "always", "default", "module", | ||||||
|  |                 or "once" | ||||||
|  |     'message' -- a regex that the warning message must match | ||||||
|  |     'category' -- a class that the warning must be a subclass of | ||||||
|  |     'module' -- a regex that the module name must match | ||||||
|  |     'lineno' -- an integer line number, 0 matches all warnings | ||||||
|  |     'append' -- if true, append to the list of filters | ||||||
|  |     """ | ||||||
|  |     import re | ||||||
|  |     assert action in ("error", "ignore", "always", "default", "module", | ||||||
|  |                       "once"), "invalid action: %r" % (action,) | ||||||
|  |     assert isinstance(message, str), "message must be a string" | ||||||
|  |     assert isinstance(category, type), "category must be a class" | ||||||
|  |     assert issubclass(category, Warning), "category must be a Warning subclass" | ||||||
|  |     assert isinstance(module, str), "module must be a string" | ||||||
|  |     assert isinstance(lineno, int) and lineno >= 0, \ | ||||||
|  |            "lineno must be an int >= 0" | ||||||
|  |     item = (action, re.compile(message, re.I), category, | ||||||
|  |             re.compile(module), lineno) | ||||||
|  |     if append: | ||||||
|  |         filters.append(item) | ||||||
|  |     else: | ||||||
|  |         filters.insert(0, item) | ||||||
|  |     _filters_mutated() | ||||||
|  |  | ||||||
|  | def simplefilter(action, category=Warning, lineno=0, append=False): | ||||||
|  |     """Insert a simple entry into the list of warnings filters (at the front). | ||||||
|  |  | ||||||
|  |     A simple filter matches all modules and messages. | ||||||
|  |     'action' -- one of "error", "ignore", "always", "default", "module", | ||||||
|  |                 or "once" | ||||||
|  |     'category' -- a class that the warning must be a subclass of | ||||||
|  |     'lineno' -- an integer line number, 0 matches all warnings | ||||||
|  |     'append' -- if true, append to the list of filters | ||||||
|  |     """ | ||||||
|  |     assert action in ("error", "ignore", "always", "default", "module", | ||||||
|  |                       "once"), "invalid action: %r" % (action,) | ||||||
|  |     assert isinstance(lineno, int) and lineno >= 0, \ | ||||||
|  |            "lineno must be an int >= 0" | ||||||
|  |     item = (action, None, category, None, lineno) | ||||||
|  |     if append: | ||||||
|  |         filters.append(item) | ||||||
|  |     else: | ||||||
|  |         filters.insert(0, item) | ||||||
|  |     _filters_mutated() | ||||||
|  |  | ||||||
|  | def resetwarnings(): | ||||||
|  |     """Clear the list of warning filters, so that no filters are active.""" | ||||||
|  |     filters[:] = [] | ||||||
|  |     _filters_mutated() | ||||||
|  |  | ||||||
|  | class _OptionError(Exception): | ||||||
|  |     """Exception used by option processing helpers.""" | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  | # Helper to process -W options passed via sys.warnoptions | ||||||
|  | def _processoptions(args): | ||||||
|  |     for arg in args: | ||||||
|  |         try: | ||||||
|  |             _setoption(arg) | ||||||
|  |         except _OptionError as msg: | ||||||
|  |             print("Invalid -W option ignored:", msg, file=sys.stderr) | ||||||
|  |  | ||||||
|  | # Helper for _processoptions() | ||||||
|  | def _setoption(arg): | ||||||
|  |     import re | ||||||
|  |     parts = arg.split(':') | ||||||
|  |     if len(parts) > 5: | ||||||
|  |         raise _OptionError("too many fields (max 5): %r" % (arg,)) | ||||||
|  |     while len(parts) < 5: | ||||||
|  |         parts.append('') | ||||||
|  |     action, message, category, module, lineno = [s.strip() | ||||||
|  |                                                  for s in parts] | ||||||
|  |     action = _getaction(action) | ||||||
|  |     message = re.escape(message) | ||||||
|  |     category = _getcategory(category) | ||||||
|  |     module = re.escape(module) | ||||||
|  |     if module: | ||||||
|  |         module = module + '$' | ||||||
|  |     if lineno: | ||||||
|  |         try: | ||||||
|  |             lineno = int(lineno) | ||||||
|  |             if lineno < 0: | ||||||
|  |                 raise ValueError | ||||||
|  |         except (ValueError, OverflowError): | ||||||
|  |             raise _OptionError("invalid lineno %r" % (lineno,)) | ||||||
|  |     else: | ||||||
|  |         lineno = 0 | ||||||
|  |     filterwarnings(action, message, category, module, lineno) | ||||||
|  |  | ||||||
|  | # Helper for _setoption() | ||||||
|  | def _getaction(action): | ||||||
|  |     if not action: | ||||||
|  |         return "default" | ||||||
|  |     if action == "all": return "always" # Alias | ||||||
|  |     for a in ('default', 'always', 'ignore', 'module', 'once', 'error'): | ||||||
|  |         if a.startswith(action): | ||||||
|  |             return a | ||||||
|  |     raise _OptionError("invalid action: %r" % (action,)) | ||||||
|  |  | ||||||
|  | # Helper for _setoption() | ||||||
|  | def _getcategory(category): | ||||||
|  |     import re | ||||||
|  |     if not category: | ||||||
|  |         return Warning | ||||||
|  |     if re.match("^[a-zA-Z0-9_]+$", category): | ||||||
|  |         try: | ||||||
|  |             cat = eval(category) | ||||||
|  |         except NameError: | ||||||
|  |             raise _OptionError("unknown warning category: %r" % (category,)) | ||||||
|  |     else: | ||||||
|  |         i = category.rfind(".") | ||||||
|  |         module = category[:i] | ||||||
|  |         klass = category[i+1:] | ||||||
|  |         try: | ||||||
|  |             m = __import__(module, None, None, [klass]) | ||||||
|  |         except ImportError: | ||||||
|  |             raise _OptionError("invalid module name: %r" % (module,)) | ||||||
|  |         try: | ||||||
|  |             cat = getattr(m, klass) | ||||||
|  |         except AttributeError: | ||||||
|  |             raise _OptionError("unknown warning category: %r" % (category,)) | ||||||
|  |     if not issubclass(cat, Warning): | ||||||
|  |         raise _OptionError("invalid warning category: %r" % (category,)) | ||||||
|  |     return cat | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Code typically replaced by _warnings | ||||||
|  | def warn(message, category=None, stacklevel=1): | ||||||
|  |     """Issue a warning, or maybe ignore it or raise an exception.""" | ||||||
|  |     # Check if message is already a Warning object | ||||||
|  |     if isinstance(message, Warning): | ||||||
|  |         category = message.__class__ | ||||||
|  |     # Check category argument | ||||||
|  |     if category is None: | ||||||
|  |         category = UserWarning | ||||||
|  |     assert issubclass(category, Warning) | ||||||
|  |     # Get context information | ||||||
|  |     try: | ||||||
|  |         caller = sys._getframe(stacklevel) | ||||||
|  |     except ValueError: | ||||||
|  |         globals = sys.__dict__ | ||||||
|  |         lineno = 1 | ||||||
|  |     else: | ||||||
|  |         globals = caller.f_globals | ||||||
|  |         lineno = caller.f_lineno | ||||||
|  |     if '__name__' in globals: | ||||||
|  |         module = globals['__name__'] | ||||||
|  |     else: | ||||||
|  |         module = "<string>" | ||||||
|  |     filename = globals.get('__file__') | ||||||
|  |     if filename: | ||||||
|  |         fnl = filename.lower() | ||||||
|  |         if fnl.endswith((".pyc", ".pyo")): | ||||||
|  |             filename = filename[:-1] | ||||||
|  |     else: | ||||||
|  |         if module == "__main__": | ||||||
|  |             try: | ||||||
|  |                 filename = sys.argv[0] | ||||||
|  |             except AttributeError: | ||||||
|  |                 # embedded interpreters don't have sys.argv, see bug #839151 | ||||||
|  |                 filename = '__main__' | ||||||
|  |         if not filename: | ||||||
|  |             filename = module | ||||||
|  |     registry = globals.setdefault("__warningregistry__", {}) | ||||||
|  |     warn_explicit(message, category, filename, lineno, module, registry, | ||||||
|  |                   globals) | ||||||
|  |  | ||||||
|  | def warn_explicit(message, category, filename, lineno, | ||||||
|  |                   module=None, registry=None, module_globals=None): | ||||||
|  |     lineno = int(lineno) | ||||||
|  |     if module is None: | ||||||
|  |         module = filename or "<unknown>" | ||||||
|  |         if module[-3:].lower() == ".py": | ||||||
|  |             module = module[:-3] # XXX What about leading pathname? | ||||||
|  |     if registry is None: | ||||||
|  |         registry = {} | ||||||
|  |     if registry.get('version', 0) != _filters_version: | ||||||
|  |         registry.clear() | ||||||
|  |         registry['version'] = _filters_version | ||||||
|  |     if isinstance(message, Warning): | ||||||
|  |         text = str(message) | ||||||
|  |         category = message.__class__ | ||||||
|  |     else: | ||||||
|  |         text = message | ||||||
|  |         message = category(message) | ||||||
|  |     key = (text, category, lineno) | ||||||
|  |     # Quick test for common case | ||||||
|  |     if registry.get(key): | ||||||
|  |         return | ||||||
|  |     # Search the filters | ||||||
|  |     for item in filters: | ||||||
|  |         action, msg, cat, mod, ln = item | ||||||
|  |         if ((msg is None or msg.match(text)) and | ||||||
|  |             issubclass(category, cat) and | ||||||
|  |             (mod is None or mod.match(module)) and | ||||||
|  |             (ln == 0 or lineno == ln)): | ||||||
|  |             break | ||||||
|  |     else: | ||||||
|  |         action = defaultaction | ||||||
|  |     # Early exit actions | ||||||
|  |     if action == "ignore": | ||||||
|  |         registry[key] = 1 | ||||||
|  |         return | ||||||
|  |  | ||||||
|  |     # Prime the linecache for formatting, in case the | ||||||
|  |     # "file" is actually in a zipfile or something. | ||||||
|  |     import linecache | ||||||
|  |     linecache.getlines(filename, module_globals) | ||||||
|  |  | ||||||
|  |     if action == "error": | ||||||
|  |         raise message | ||||||
|  |     # Other actions | ||||||
|  |     if action == "once": | ||||||
|  |         registry[key] = 1 | ||||||
|  |         oncekey = (text, category) | ||||||
|  |         if onceregistry.get(oncekey): | ||||||
|  |             return | ||||||
|  |         onceregistry[oncekey] = 1 | ||||||
|  |     elif action == "always": | ||||||
|  |         pass | ||||||
|  |     elif action == "module": | ||||||
|  |         registry[key] = 1 | ||||||
|  |         altkey = (text, category, 0) | ||||||
|  |         if registry.get(altkey): | ||||||
|  |             return | ||||||
|  |         registry[altkey] = 1 | ||||||
|  |     elif action == "default": | ||||||
|  |         registry[key] = 1 | ||||||
|  |     else: | ||||||
|  |         # Unrecognized actions are errors | ||||||
|  |         raise RuntimeError( | ||||||
|  |               "Unrecognized action (%r) in warnings.filters:\n %s" % | ||||||
|  |               (action, item)) | ||||||
|  |     if not callable(showwarning): | ||||||
|  |         raise TypeError("warnings.showwarning() must be set to a " | ||||||
|  |                         "function or method") | ||||||
|  |     # Print message and context | ||||||
|  |     showwarning(message, category, filename, lineno) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WarningMessage(object): | ||||||
|  |  | ||||||
|  |     """Holds the result of a single showwarning() call.""" | ||||||
|  |  | ||||||
|  |     _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", | ||||||
|  |                         "line") | ||||||
|  |  | ||||||
|  |     def __init__(self, message, category, filename, lineno, file=None, | ||||||
|  |                     line=None): | ||||||
|  |         local_values = locals() | ||||||
|  |         for attr in self._WARNING_DETAILS: | ||||||
|  |             setattr(self, attr, local_values[attr]) | ||||||
|  |         self._category_name = category.__name__ if category else None | ||||||
|  |  | ||||||
|  |     def __str__(self): | ||||||
|  |         return ("{message : %r, category : %r, filename : %r, lineno : %s, " | ||||||
|  |                     "line : %r}" % (self.message, self._category_name, | ||||||
|  |                                     self.filename, self.lineno, self.line)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class catch_warnings(object): | ||||||
|  |  | ||||||
|  |     """A context manager that copies and restores the warnings filter upon | ||||||
|  |     exiting the context. | ||||||
|  |  | ||||||
|  |     The 'record' argument specifies whether warnings should be captured by a | ||||||
|  |     custom implementation of warnings.showwarning() and be appended to a list | ||||||
|  |     returned by the context manager. Otherwise None is returned by the context | ||||||
|  |     manager. The objects appended to the list are arguments whose attributes | ||||||
|  |     mirror the arguments to showwarning(). | ||||||
|  |  | ||||||
|  |     The 'module' argument is to specify an alternative module to the module | ||||||
|  |     named 'warnings' and imported under that name. This argument is only useful | ||||||
|  |     when testing the warnings module itself. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, *, record=False, module=None): | ||||||
|  |         """Specify whether to record warnings and if an alternative module | ||||||
|  |         should be used other than sys.modules['warnings']. | ||||||
|  |  | ||||||
|  |         For compatibility with Python 3.0, please consider all arguments to be | ||||||
|  |         keyword-only. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         self._record = record | ||||||
|  |         self._module = sys.modules['warnings'] if module is None else module | ||||||
|  |         self._entered = False | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         args = [] | ||||||
|  |         if self._record: | ||||||
|  |             args.append("record=True") | ||||||
|  |         if self._module is not sys.modules['warnings']: | ||||||
|  |             args.append("module=%r" % self._module) | ||||||
|  |         name = type(self).__name__ | ||||||
|  |         return "%s(%s)" % (name, ", ".join(args)) | ||||||
|  |  | ||||||
|  |     def __enter__(self): | ||||||
|  |         if self._entered: | ||||||
|  |             raise RuntimeError("Cannot enter %r twice" % self) | ||||||
|  |         self._entered = True | ||||||
|  |         self._filters = self._module.filters | ||||||
|  |         self._module.filters = self._filters[:] | ||||||
|  |         self._module._filters_mutated() | ||||||
|  |         self._showwarning = self._module.showwarning | ||||||
|  |         if self._record: | ||||||
|  |             log = [] | ||||||
|  |             def showwarning(*args, **kwargs): | ||||||
|  |                 log.append(WarningMessage(*args, **kwargs)) | ||||||
|  |             self._module.showwarning = showwarning | ||||||
|  |             return log | ||||||
|  |         else: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |     def __exit__(self, *exc_info): | ||||||
|  |         if not self._entered: | ||||||
|  |             raise RuntimeError("Cannot exit %r without entering first" % self) | ||||||
|  |         self._module.filters = self._filters | ||||||
|  |         self._module._filters_mutated() | ||||||
|  |         self._module.showwarning = self._showwarning | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # filters contains a sequence of filter 5-tuples | ||||||
|  | # The components of the 5-tuple are: | ||||||
|  | # - an action: error, ignore, always, default, module, or once | ||||||
|  | # - a compiled regex that must match the warning message | ||||||
|  | # - a class representing the warning category | ||||||
|  | # - a compiled regex that must match the module that is being warned | ||||||
|  | # - a line number for the line being warning, or 0 to mean any line | ||||||
|  | # If either if the compiled regexs are None, match anything. | ||||||
|  | _warnings_defaults = False | ||||||
|  | try: | ||||||
|  |     from _warnings import (filters, _defaultaction, _onceregistry, | ||||||
|  |                            warn, warn_explicit, _filters_mutated) | ||||||
|  |     defaultaction = _defaultaction | ||||||
|  |     onceregistry = _onceregistry | ||||||
|  |     _warnings_defaults = True | ||||||
|  |  | ||||||
|  | except ImportError: | ||||||
|  |     filters = [] | ||||||
|  |     defaultaction = "default" | ||||||
|  |     onceregistry = {} | ||||||
|  |  | ||||||
|  |     _filters_version = 1 | ||||||
|  |  | ||||||
|  |     def _filters_mutated(): | ||||||
|  |         global _filters_version | ||||||
|  |         _filters_version += 1 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Module initialization | ||||||
|  | _processoptions(sys.warnoptions) | ||||||
|  | if not _warnings_defaults: | ||||||
|  |     silence = [ImportWarning, PendingDeprecationWarning] | ||||||
|  |     silence.append(DeprecationWarning) | ||||||
|  |     for cls in silence: | ||||||
|  |         simplefilter("ignore", category=cls) | ||||||
|  |     bytes_warning = sys.flags.bytes_warning | ||||||
|  |     if bytes_warning > 1: | ||||||
|  |         bytes_action = "error" | ||||||
|  |     elif bytes_warning: | ||||||
|  |         bytes_action = "default" | ||||||
|  |     else: | ||||||
|  |         bytes_action = "ignore" | ||||||
|  |     simplefilter(bytes_action, category=BytesWarning, append=1) | ||||||
|  |     # resource usage warnings are enabled by default in pydebug mode | ||||||
|  |     if hasattr(sys, 'gettotalrefcount'): | ||||||
|  |         resource_action = "always" | ||||||
|  |     else: | ||||||
|  |         resource_action = "ignore" | ||||||
|  |     simplefilter(resource_action, category=ResourceWarning, append=1) | ||||||
|  |  | ||||||
|  | del _warnings_defaults | ||||||
| @@ -1 +0,0 @@ | |||||||
| /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/weakref.py |  | ||||||
							
								
								
									
										603
									
								
								v1/flask/lib/python3.4/weakref.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										603
									
								
								v1/flask/lib/python3.4/weakref.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,603 @@ | |||||||
|  | """Weak reference support for Python. | ||||||
|  |  | ||||||
|  | This module is an implementation of PEP 205: | ||||||
|  |  | ||||||
|  | http://www.python.org/dev/peps/pep-0205/ | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | # Naming convention: Variables named "wr" are weak reference objects; | ||||||
|  | # they are called this instead of "ref" to avoid name collisions with | ||||||
|  | # the module-global ref() function imported from _weakref. | ||||||
|  |  | ||||||
|  | from _weakref import ( | ||||||
|  |      getweakrefcount, | ||||||
|  |      getweakrefs, | ||||||
|  |      ref, | ||||||
|  |      proxy, | ||||||
|  |      CallableProxyType, | ||||||
|  |      ProxyType, | ||||||
|  |      ReferenceType) | ||||||
|  |  | ||||||
|  | from _weakrefset import WeakSet, _IterationGuard | ||||||
|  |  | ||||||
|  | import collections  # Import after _weakref to avoid circular import. | ||||||
|  | import sys | ||||||
|  | import itertools | ||||||
|  |  | ||||||
|  | ProxyTypes = (ProxyType, CallableProxyType) | ||||||
|  |  | ||||||
|  | __all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", | ||||||
|  |            "WeakKeyDictionary", "ReferenceType", "ProxyType", | ||||||
|  |            "CallableProxyType", "ProxyTypes", "WeakValueDictionary", | ||||||
|  |            "WeakSet", "WeakMethod", "finalize"] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WeakMethod(ref): | ||||||
|  |     """ | ||||||
|  |     A custom `weakref.ref` subclass which simulates a weak reference to | ||||||
|  |     a bound method, working around the lifetime problem of bound methods. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" | ||||||
|  |  | ||||||
|  |     def __new__(cls, meth, callback=None): | ||||||
|  |         try: | ||||||
|  |             obj = meth.__self__ | ||||||
|  |             func = meth.__func__ | ||||||
|  |         except AttributeError: | ||||||
|  |             raise TypeError("argument should be a bound method, not {}" | ||||||
|  |                             .format(type(meth))) from None | ||||||
|  |         def _cb(arg): | ||||||
|  |             # The self-weakref trick is needed to avoid creating a reference | ||||||
|  |             # cycle. | ||||||
|  |             self = self_wr() | ||||||
|  |             if self._alive: | ||||||
|  |                 self._alive = False | ||||||
|  |                 if callback is not None: | ||||||
|  |                     callback(self) | ||||||
|  |         self = ref.__new__(cls, obj, _cb) | ||||||
|  |         self._func_ref = ref(func, _cb) | ||||||
|  |         self._meth_type = type(meth) | ||||||
|  |         self._alive = True | ||||||
|  |         self_wr = ref(self) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __call__(self): | ||||||
|  |         obj = super().__call__() | ||||||
|  |         func = self._func_ref() | ||||||
|  |         if obj is None or func is None: | ||||||
|  |             return None | ||||||
|  |         return self._meth_type(func, obj) | ||||||
|  |  | ||||||
|  |     def __eq__(self, other): | ||||||
|  |         if isinstance(other, WeakMethod): | ||||||
|  |             if not self._alive or not other._alive: | ||||||
|  |                 return self is other | ||||||
|  |             return ref.__eq__(self, other) and self._func_ref == other._func_ref | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def __ne__(self, other): | ||||||
|  |         if isinstance(other, WeakMethod): | ||||||
|  |             if not self._alive or not other._alive: | ||||||
|  |                 return self is not other | ||||||
|  |             return ref.__ne__(self, other) or self._func_ref != other._func_ref | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     __hash__ = ref.__hash__ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WeakValueDictionary(collections.MutableMapping): | ||||||
|  |     """Mapping class that references values weakly. | ||||||
|  |  | ||||||
|  |     Entries in the dictionary will be discarded when no strong | ||||||
|  |     reference to the value exists anymore | ||||||
|  |     """ | ||||||
|  |     # We inherit the constructor without worrying about the input | ||||||
|  |     # dictionary; since it uses our .update() method, we get the right | ||||||
|  |     # checks (if the other dictionary is a WeakValueDictionary, | ||||||
|  |     # objects are unwrapped on the way out, and we always wrap on the | ||||||
|  |     # way in). | ||||||
|  |  | ||||||
|  |     def __init__(*args, **kw): | ||||||
|  |         if not args: | ||||||
|  |             raise TypeError("descriptor '__init__' of 'WeakValueDictionary' " | ||||||
|  |                             "object needs an argument") | ||||||
|  |         self, *args = args | ||||||
|  |         if len(args) > 1: | ||||||
|  |             raise TypeError('expected at most 1 arguments, got %d' % len(args)) | ||||||
|  |         def remove(wr, selfref=ref(self)): | ||||||
|  |             self = selfref() | ||||||
|  |             if self is not None: | ||||||
|  |                 if self._iterating: | ||||||
|  |                     self._pending_removals.append(wr.key) | ||||||
|  |                 else: | ||||||
|  |                     del self.data[wr.key] | ||||||
|  |         self._remove = remove | ||||||
|  |         # A list of keys to be removed | ||||||
|  |         self._pending_removals = [] | ||||||
|  |         self._iterating = set() | ||||||
|  |         self.data = d = {} | ||||||
|  |         self.update(*args, **kw) | ||||||
|  |  | ||||||
|  |     def _commit_removals(self): | ||||||
|  |         l = self._pending_removals | ||||||
|  |         d = self.data | ||||||
|  |         # We shouldn't encounter any KeyError, because this method should | ||||||
|  |         # always be called *before* mutating the dict. | ||||||
|  |         while l: | ||||||
|  |             del d[l.pop()] | ||||||
|  |  | ||||||
|  |     def __getitem__(self, key): | ||||||
|  |         o = self.data[key]() | ||||||
|  |         if o is None: | ||||||
|  |             raise KeyError(key) | ||||||
|  |         else: | ||||||
|  |             return o | ||||||
|  |  | ||||||
|  |     def __delitem__(self, key): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         del self.data[key] | ||||||
|  |  | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self.data) - len(self._pending_removals) | ||||||
|  |  | ||||||
|  |     def __contains__(self, key): | ||||||
|  |         try: | ||||||
|  |             o = self.data[key]() | ||||||
|  |         except KeyError: | ||||||
|  |             return False | ||||||
|  |         return o is not None | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return "<WeakValueDictionary at %s>" % id(self) | ||||||
|  |  | ||||||
|  |     def __setitem__(self, key, value): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         self.data[key] = KeyedRef(value, self._remove, key) | ||||||
|  |  | ||||||
|  |     def copy(self): | ||||||
|  |         new = WeakValueDictionary() | ||||||
|  |         for key, wr in self.data.items(): | ||||||
|  |             o = wr() | ||||||
|  |             if o is not None: | ||||||
|  |                 new[key] = o | ||||||
|  |         return new | ||||||
|  |  | ||||||
|  |     __copy__ = copy | ||||||
|  |  | ||||||
|  |     def __deepcopy__(self, memo): | ||||||
|  |         from copy import deepcopy | ||||||
|  |         new = self.__class__() | ||||||
|  |         for key, wr in self.data.items(): | ||||||
|  |             o = wr() | ||||||
|  |             if o is not None: | ||||||
|  |                 new[deepcopy(key, memo)] = o | ||||||
|  |         return new | ||||||
|  |  | ||||||
|  |     def get(self, key, default=None): | ||||||
|  |         try: | ||||||
|  |             wr = self.data[key] | ||||||
|  |         except KeyError: | ||||||
|  |             return default | ||||||
|  |         else: | ||||||
|  |             o = wr() | ||||||
|  |             if o is None: | ||||||
|  |                 # This should only happen | ||||||
|  |                 return default | ||||||
|  |             else: | ||||||
|  |                 return o | ||||||
|  |  | ||||||
|  |     def items(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for k, wr in self.data.items(): | ||||||
|  |                 v = wr() | ||||||
|  |                 if v is not None: | ||||||
|  |                     yield k, v | ||||||
|  |  | ||||||
|  |     def keys(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for k, wr in self.data.items(): | ||||||
|  |                 if wr() is not None: | ||||||
|  |                     yield k | ||||||
|  |  | ||||||
|  |     __iter__ = keys | ||||||
|  |  | ||||||
|  |     def itervaluerefs(self): | ||||||
|  |         """Return an iterator that yields the weak references to the values. | ||||||
|  |  | ||||||
|  |         The references are not guaranteed to be 'live' at the time | ||||||
|  |         they are used, so the result of calling the references needs | ||||||
|  |         to be checked before being used.  This can be used to avoid | ||||||
|  |         creating references that will cause the garbage collector to | ||||||
|  |         keep the values around longer than needed. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             yield from self.data.values() | ||||||
|  |  | ||||||
|  |     def values(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for wr in self.data.values(): | ||||||
|  |                 obj = wr() | ||||||
|  |                 if obj is not None: | ||||||
|  |                     yield obj | ||||||
|  |  | ||||||
|  |     def popitem(self): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         while True: | ||||||
|  |             key, wr = self.data.popitem() | ||||||
|  |             o = wr() | ||||||
|  |             if o is not None: | ||||||
|  |                 return key, o | ||||||
|  |  | ||||||
|  |     def pop(self, key, *args): | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         try: | ||||||
|  |             o = self.data.pop(key)() | ||||||
|  |         except KeyError: | ||||||
|  |             if args: | ||||||
|  |                 return args[0] | ||||||
|  |             raise | ||||||
|  |         if o is None: | ||||||
|  |             raise KeyError(key) | ||||||
|  |         else: | ||||||
|  |             return o | ||||||
|  |  | ||||||
|  |     def setdefault(self, key, default=None): | ||||||
|  |         try: | ||||||
|  |             wr = self.data[key] | ||||||
|  |         except KeyError: | ||||||
|  |             if self._pending_removals: | ||||||
|  |                 self._commit_removals() | ||||||
|  |             self.data[key] = KeyedRef(default, self._remove, key) | ||||||
|  |             return default | ||||||
|  |         else: | ||||||
|  |             return wr() | ||||||
|  |  | ||||||
|  |     def update(*args, **kwargs): | ||||||
|  |         if not args: | ||||||
|  |             raise TypeError("descriptor 'update' of 'WeakValueDictionary' " | ||||||
|  |                             "object needs an argument") | ||||||
|  |         self, *args = args | ||||||
|  |         if len(args) > 1: | ||||||
|  |             raise TypeError('expected at most 1 arguments, got %d' % len(args)) | ||||||
|  |         dict = args[0] if args else None | ||||||
|  |         if self._pending_removals: | ||||||
|  |             self._commit_removals() | ||||||
|  |         d = self.data | ||||||
|  |         if dict is not None: | ||||||
|  |             if not hasattr(dict, "items"): | ||||||
|  |                 dict = type({})(dict) | ||||||
|  |             for key, o in dict.items(): | ||||||
|  |                 d[key] = KeyedRef(o, self._remove, key) | ||||||
|  |         if len(kwargs): | ||||||
|  |             self.update(kwargs) | ||||||
|  |  | ||||||
|  |     def valuerefs(self): | ||||||
|  |         """Return a list of weak references to the values. | ||||||
|  |  | ||||||
|  |         The references are not guaranteed to be 'live' at the time | ||||||
|  |         they are used, so the result of calling the references needs | ||||||
|  |         to be checked before being used.  This can be used to avoid | ||||||
|  |         creating references that will cause the garbage collector to | ||||||
|  |         keep the values around longer than needed. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         return list(self.data.values()) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class KeyedRef(ref): | ||||||
|  |     """Specialized reference that includes a key corresponding to the value. | ||||||
|  |  | ||||||
|  |     This is used in the WeakValueDictionary to avoid having to create | ||||||
|  |     a function object for each key stored in the mapping.  A shared | ||||||
|  |     callback object can use the 'key' attribute of a KeyedRef instead | ||||||
|  |     of getting a reference to the key from an enclosing scope. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __slots__ = "key", | ||||||
|  |  | ||||||
|  |     def __new__(type, ob, callback, key): | ||||||
|  |         self = ref.__new__(type, ob, callback) | ||||||
|  |         self.key = key | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def __init__(self, ob, callback, key): | ||||||
|  |         super().__init__(ob, callback) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WeakKeyDictionary(collections.MutableMapping): | ||||||
|  |     """ Mapping class that references keys weakly. | ||||||
|  |  | ||||||
|  |     Entries in the dictionary will be discarded when there is no | ||||||
|  |     longer a strong reference to the key. This can be used to | ||||||
|  |     associate additional data with an object owned by other parts of | ||||||
|  |     an application without adding attributes to those objects. This | ||||||
|  |     can be especially useful with objects that override attribute | ||||||
|  |     accesses. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, dict=None): | ||||||
|  |         self.data = {} | ||||||
|  |         def remove(k, selfref=ref(self)): | ||||||
|  |             self = selfref() | ||||||
|  |             if self is not None: | ||||||
|  |                 if self._iterating: | ||||||
|  |                     self._pending_removals.append(k) | ||||||
|  |                 else: | ||||||
|  |                     del self.data[k] | ||||||
|  |         self._remove = remove | ||||||
|  |         # A list of dead weakrefs (keys to be removed) | ||||||
|  |         self._pending_removals = [] | ||||||
|  |         self._iterating = set() | ||||||
|  |         self._dirty_len = False | ||||||
|  |         if dict is not None: | ||||||
|  |             self.update(dict) | ||||||
|  |  | ||||||
|  |     def _commit_removals(self): | ||||||
|  |         # NOTE: We don't need to call this method before mutating the dict, | ||||||
|  |         # because a dead weakref never compares equal to a live weakref, | ||||||
|  |         # even if they happened to refer to equal objects. | ||||||
|  |         # However, it means keys may already have been removed. | ||||||
|  |         l = self._pending_removals | ||||||
|  |         d = self.data | ||||||
|  |         while l: | ||||||
|  |             try: | ||||||
|  |                 del d[l.pop()] | ||||||
|  |             except KeyError: | ||||||
|  |                 pass | ||||||
|  |  | ||||||
|  |     def _scrub_removals(self): | ||||||
|  |         d = self.data | ||||||
|  |         self._pending_removals = [k for k in self._pending_removals if k in d] | ||||||
|  |         self._dirty_len = False | ||||||
|  |  | ||||||
|  |     def __delitem__(self, key): | ||||||
|  |         self._dirty_len = True | ||||||
|  |         del self.data[ref(key)] | ||||||
|  |  | ||||||
|  |     def __getitem__(self, key): | ||||||
|  |         return self.data[ref(key)] | ||||||
|  |  | ||||||
|  |     def __len__(self): | ||||||
|  |         if self._dirty_len and self._pending_removals: | ||||||
|  |             # self._pending_removals may still contain keys which were | ||||||
|  |             # explicitly removed, we have to scrub them (see issue #21173). | ||||||
|  |             self._scrub_removals() | ||||||
|  |         return len(self.data) - len(self._pending_removals) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return "<WeakKeyDictionary at %s>" % id(self) | ||||||
|  |  | ||||||
|  |     def __setitem__(self, key, value): | ||||||
|  |         self.data[ref(key, self._remove)] = value | ||||||
|  |  | ||||||
|  |     def copy(self): | ||||||
|  |         new = WeakKeyDictionary() | ||||||
|  |         for key, value in self.data.items(): | ||||||
|  |             o = key() | ||||||
|  |             if o is not None: | ||||||
|  |                 new[o] = value | ||||||
|  |         return new | ||||||
|  |  | ||||||
|  |     __copy__ = copy | ||||||
|  |  | ||||||
|  |     def __deepcopy__(self, memo): | ||||||
|  |         from copy import deepcopy | ||||||
|  |         new = self.__class__() | ||||||
|  |         for key, value in self.data.items(): | ||||||
|  |             o = key() | ||||||
|  |             if o is not None: | ||||||
|  |                 new[o] = deepcopy(value, memo) | ||||||
|  |         return new | ||||||
|  |  | ||||||
|  |     def get(self, key, default=None): | ||||||
|  |         return self.data.get(ref(key),default) | ||||||
|  |  | ||||||
|  |     def __contains__(self, key): | ||||||
|  |         try: | ||||||
|  |             wr = ref(key) | ||||||
|  |         except TypeError: | ||||||
|  |             return False | ||||||
|  |         return wr in self.data | ||||||
|  |  | ||||||
|  |     def items(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for wr, value in self.data.items(): | ||||||
|  |                 key = wr() | ||||||
|  |                 if key is not None: | ||||||
|  |                     yield key, value | ||||||
|  |  | ||||||
|  |     def keys(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for wr in self.data: | ||||||
|  |                 obj = wr() | ||||||
|  |                 if obj is not None: | ||||||
|  |                     yield obj | ||||||
|  |  | ||||||
|  |     __iter__ = keys | ||||||
|  |  | ||||||
|  |     def values(self): | ||||||
|  |         with _IterationGuard(self): | ||||||
|  |             for wr, value in self.data.items(): | ||||||
|  |                 if wr() is not None: | ||||||
|  |                     yield value | ||||||
|  |  | ||||||
|  |     def keyrefs(self): | ||||||
|  |         """Return a list of weak references to the keys. | ||||||
|  |  | ||||||
|  |         The references are not guaranteed to be 'live' at the time | ||||||
|  |         they are used, so the result of calling the references needs | ||||||
|  |         to be checked before being used.  This can be used to avoid | ||||||
|  |         creating references that will cause the garbage collector to | ||||||
|  |         keep the keys around longer than needed. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         return list(self.data) | ||||||
|  |  | ||||||
|  |     def popitem(self): | ||||||
|  |         self._dirty_len = True | ||||||
|  |         while True: | ||||||
|  |             key, value = self.data.popitem() | ||||||
|  |             o = key() | ||||||
|  |             if o is not None: | ||||||
|  |                 return o, value | ||||||
|  |  | ||||||
|  |     def pop(self, key, *args): | ||||||
|  |         self._dirty_len = True | ||||||
|  |         return self.data.pop(ref(key), *args) | ||||||
|  |  | ||||||
|  |     def setdefault(self, key, default=None): | ||||||
|  |         return self.data.setdefault(ref(key, self._remove),default) | ||||||
|  |  | ||||||
|  |     def update(self, dict=None, **kwargs): | ||||||
|  |         d = self.data | ||||||
|  |         if dict is not None: | ||||||
|  |             if not hasattr(dict, "items"): | ||||||
|  |                 dict = type({})(dict) | ||||||
|  |             for key, value in dict.items(): | ||||||
|  |                 d[ref(key, self._remove)] = value | ||||||
|  |         if len(kwargs): | ||||||
|  |             self.update(kwargs) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class finalize: | ||||||
|  |     """Class for finalization of weakrefable objects | ||||||
|  |  | ||||||
|  |     finalize(obj, func, *args, **kwargs) returns a callable finalizer | ||||||
|  |     object which will be called when obj is garbage collected. The | ||||||
|  |     first time the finalizer is called it evaluates func(*arg, **kwargs) | ||||||
|  |     and returns the result. After this the finalizer is dead, and | ||||||
|  |     calling it just returns None. | ||||||
|  |  | ||||||
|  |     When the program exits any remaining finalizers for which the | ||||||
|  |     atexit attribute is true will be run in reverse order of creation. | ||||||
|  |     By default atexit is true. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     # Finalizer objects don't have any state of their own.  They are | ||||||
|  |     # just used as keys to lookup _Info objects in the registry.  This | ||||||
|  |     # ensures that they cannot be part of a ref-cycle. | ||||||
|  |  | ||||||
|  |     __slots__ = () | ||||||
|  |     _registry = {} | ||||||
|  |     _shutdown = False | ||||||
|  |     _index_iter = itertools.count() | ||||||
|  |     _dirty = False | ||||||
|  |     _registered_with_atexit = False | ||||||
|  |  | ||||||
|  |     class _Info: | ||||||
|  |         __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") | ||||||
|  |  | ||||||
|  |     def __init__(self, obj, func, *args, **kwargs): | ||||||
|  |         if not self._registered_with_atexit: | ||||||
|  |             # We may register the exit function more than once because | ||||||
|  |             # of a thread race, but that is harmless | ||||||
|  |             import atexit | ||||||
|  |             atexit.register(self._exitfunc) | ||||||
|  |             finalize._registered_with_atexit = True | ||||||
|  |         info = self._Info() | ||||||
|  |         info.weakref = ref(obj, self) | ||||||
|  |         info.func = func | ||||||
|  |         info.args = args | ||||||
|  |         info.kwargs = kwargs or None | ||||||
|  |         info.atexit = True | ||||||
|  |         info.index = next(self._index_iter) | ||||||
|  |         self._registry[self] = info | ||||||
|  |         finalize._dirty = True | ||||||
|  |  | ||||||
|  |     def __call__(self, _=None): | ||||||
|  |         """If alive then mark as dead and return func(*args, **kwargs); | ||||||
|  |         otherwise return None""" | ||||||
|  |         info = self._registry.pop(self, None) | ||||||
|  |         if info and not self._shutdown: | ||||||
|  |             return info.func(*info.args, **(info.kwargs or {})) | ||||||
|  |  | ||||||
|  |     def detach(self): | ||||||
|  |         """If alive then mark as dead and return (obj, func, args, kwargs); | ||||||
|  |         otherwise return None""" | ||||||
|  |         info = self._registry.get(self) | ||||||
|  |         obj = info and info.weakref() | ||||||
|  |         if obj is not None and self._registry.pop(self, None): | ||||||
|  |             return (obj, info.func, info.args, info.kwargs or {}) | ||||||
|  |  | ||||||
|  |     def peek(self): | ||||||
|  |         """If alive then return (obj, func, args, kwargs); | ||||||
|  |         otherwise return None""" | ||||||
|  |         info = self._registry.get(self) | ||||||
|  |         obj = info and info.weakref() | ||||||
|  |         if obj is not None: | ||||||
|  |             return (obj, info.func, info.args, info.kwargs or {}) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def alive(self): | ||||||
|  |         """Whether finalizer is alive""" | ||||||
|  |         return self in self._registry | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def atexit(self): | ||||||
|  |         """Whether finalizer should be called at exit""" | ||||||
|  |         info = self._registry.get(self) | ||||||
|  |         return bool(info) and info.atexit | ||||||
|  |  | ||||||
|  |     @atexit.setter | ||||||
|  |     def atexit(self, value): | ||||||
|  |         info = self._registry.get(self) | ||||||
|  |         if info: | ||||||
|  |             info.atexit = bool(value) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         info = self._registry.get(self) | ||||||
|  |         obj = info and info.weakref() | ||||||
|  |         if obj is None: | ||||||
|  |             return '<%s object at %#x; dead>' % (type(self).__name__, id(self)) | ||||||
|  |         else: | ||||||
|  |             return '<%s object at %#x; for %r at %#x>' % \ | ||||||
|  |                 (type(self).__name__, id(self), type(obj).__name__, id(obj)) | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def _select_for_exit(cls): | ||||||
|  |         # Return live finalizers marked for exit, oldest first | ||||||
|  |         L = [(f,i) for (f,i) in cls._registry.items() if i.atexit] | ||||||
|  |         L.sort(key=lambda item:item[1].index) | ||||||
|  |         return [f for (f,i) in L] | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def _exitfunc(cls): | ||||||
|  |         # At shutdown invoke finalizers for which atexit is true. | ||||||
|  |         # This is called once all other non-daemonic threads have been | ||||||
|  |         # joined. | ||||||
|  |         reenable_gc = False | ||||||
|  |         try: | ||||||
|  |             if cls._registry: | ||||||
|  |                 import gc | ||||||
|  |                 if gc.isenabled(): | ||||||
|  |                     reenable_gc = True | ||||||
|  |                     gc.disable() | ||||||
|  |                 pending = None | ||||||
|  |                 while True: | ||||||
|  |                     if pending is None or finalize._dirty: | ||||||
|  |                         pending = cls._select_for_exit() | ||||||
|  |                         finalize._dirty = False | ||||||
|  |                     if not pending: | ||||||
|  |                         break | ||||||
|  |                     f = pending.pop() | ||||||
|  |                     try: | ||||||
|  |                         # gc is disabled, so (assuming no daemonic | ||||||
|  |                         # threads) the following is the only line in | ||||||
|  |                         # this function which might trigger creation | ||||||
|  |                         # of a new finalizer | ||||||
|  |                         f() | ||||||
|  |                     except Exception: | ||||||
|  |                         sys.excepthook(*sys.exc_info()) | ||||||
|  |                     assert f not in cls._registry | ||||||
|  |         finally: | ||||||
|  |             # prevent any more finalizers from executing during shutdown | ||||||
|  |             finalize._shutdown = True | ||||||
|  |             if reenable_gc: | ||||||
|  |                 gc.enable() | ||||||
							
								
								
									
										83
									
								
								v1/plexMovies.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										83
									
								
								v1/plexMovies.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,83 @@ | |||||||
|  | #!/usr/bin/env python3 | ||||||
|  | # -*- coding: utf-8 -*- | ||||||
|  | # @Author: KevinMidboe | ||||||
|  | # @Date:   2017-01-28 23:21:22 | ||||||
|  | # @Last Modified by:   KevinMidboe | ||||||
|  | # @Last Modified time: 2017-02-06 11:58:31 | ||||||
|  |  | ||||||
|  | from os import system | ||||||
|  | import xml.etree.ElementTree as ET | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | from time import time | ||||||
|  |  | ||||||
|  | def getLibraryXML(): | ||||||
|  | 	# Every call saves the info of session.xml to a file named plexPlaying | ||||||
|  | 	system('curl --silent http://10.0.0.41:32400/library/sections/1/all > xmlMovieLib.xml') | ||||||
|  | 	# XML parsing, creates a tree and saves the root node as root | ||||||
|  | 	try: | ||||||
|  | 		parser = ET.parse('xmlMovieLib.xml') | ||||||
|  | 		xmlTreeRoot = parser.getroot() | ||||||
|  | 		return xmlTreeRoot | ||||||
|  |  | ||||||
|  | 	except xml.etree.ElementTree.ParseError: | ||||||
|  | 		return None | ||||||
|  |  | ||||||
|  | def getMovieExistance(): | ||||||
|  | 	pass | ||||||
|  |  | ||||||
|  | def getSpecificMovieInfo(movieTitle, movieYear=None): | ||||||
|  | 	xmlTreeRoot = getLibraryXML() | ||||||
|  |  | ||||||
|  | 	try: | ||||||
|  | 		treeSize = int(xmlTreeRoot.get('size')) | ||||||
|  | 	except TypeError: | ||||||
|  | 		return None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | 	if (treeSize > 0): | ||||||
|  | 		for video in xmlTreeRoot.findall('Video'): | ||||||
|  | 			if video.get('title') == movieTitle: | ||||||
|  | 				title = movieTitle | ||||||
|  | 				year = video.get('year') | ||||||
|  | 				if movieYear == None or movieYear == year: | ||||||
|  | 					mediaInfo = video.find('Media') | ||||||
|  | 					bitrate = mediaInfo.get('bitrate') | ||||||
|  | 					width = mediaInfo.get('width') | ||||||
|  | 					height = mediaInfo.get('height') | ||||||
|  |  | ||||||
|  | 					return { 'title':title, 'year': year, 'bitrate':bitrate,  | ||||||
|  | 						'width':width, 'height':height } | ||||||
|  | 				else: | ||||||
|  | 					# field: 404? | ||||||
|  | 					return { 'Error': 'Movie matching that year does not exist, did '\ | ||||||
|  | 						'you mean ' + title + ' (' + year + ')?'} | ||||||
|  | 		 | ||||||
|  | 		# Return none | ||||||
|  |  | ||||||
|  | def plexMovies(xmlTreeRoot, query='title'): | ||||||
|  | 	test = int(xmlTreeRoot.get('size')) | ||||||
|  | 	sys.exit() | ||||||
|  | 	# The root node named MediaContainer has a size variable that holds number of active processes. | ||||||
|  | 	# If this is '0' then there are none playing, no need to compute. | ||||||
|  | 	if (root.get('size') != '0'): | ||||||
|  | 		# Goes through all the 'video' elements in MediaContainer | ||||||
|  | 		for video in root.findall('Video'): | ||||||
|  | 			if query=='title' or query=='year': | ||||||
|  | 				result = video.get(query) | ||||||
|  | 				print(result) | ||||||
|  |  | ||||||
|  | 			elif query=='bitrate' or query=='width' or query=='height': | ||||||
|  | 				mediaInfo = video.find('Media') | ||||||
|  | 				result = mediaInfo.get(query) | ||||||
|  | 				print(result) | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  | 	# Query: !title, !year, bitrate, width, height | ||||||
|  | 	start_time = time() | ||||||
|  | 	# xmlTreeRoot = getLibraryXML() | ||||||
|  | 	# plexMovies(xmlTreeRoot) | ||||||
|  |  | ||||||
|  | 	print(getSpecificMovieInfo('10 Cloverfield Lane')) | ||||||
|  | 	print("--- %s seconds ---" % (time() - start_time)) | ||||||
| @@ -3,7 +3,7 @@ | |||||||
| # @Author: KevinMidboe | # @Author: KevinMidboe | ||||||
| # @Date:   2017-01-27 19:48:42 | # @Date:   2017-01-27 19:48:42 | ||||||
| # @Last Modified by:   KevinMidboe | # @Last Modified by:   KevinMidboe | ||||||
| # @Last Modified time: 2017-01-31 23:13:20 | # @Last Modified time: 2017-02-03 12:33:51 | ||||||
|  |  | ||||||
| # TODO add better error handling to return statements | # TODO add better error handling to return statements | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										9718
									
								
								v1/xmlMovieLib.xml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9718
									
								
								v1/xmlMovieLib.xml
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
		Reference in New Issue
	
	Block a user