From da5a649215486a71bd65f9e5ec91ab35c411ff13 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Fri, 15 Nov 2024 10:41:26 -0600
Subject: [PATCH 01/17] Using werkzeug library

---
 azure/functions/_abc.py                       |    2 +-
 azure/functions/_http.py                      |   30 +-
 azure/functions/_http_wsgi.py                 |    9 +-
 .../_thirdparty/werkzeug/LICENSE.rst          |   28 -
 .../_thirdparty/werkzeug/__init__.py          |    0
 .../functions/_thirdparty/werkzeug/_compat.py |  219 --
 .../_thirdparty/werkzeug/_internal.py         |  459 ---
 .../_thirdparty/werkzeug/datastructures.py    | 2846 -----------------
 .../_thirdparty/werkzeug/exceptions.py        |  763 -----
 .../_thirdparty/werkzeug/formparser.py        |  586 ----
 azure/functions/_thirdparty/werkzeug/http.py  | 1249 --------
 azure/functions/_thirdparty/werkzeug/urls.py  | 1134 -------
 azure/functions/_thirdparty/werkzeug/utils.py |  748 -----
 azure/functions/_thirdparty/werkzeug/wsgi.py  | 1000 ------
 azure/functions/http.py                       |    2 +-
 setup.py                                      |    3 +
 16 files changed, 29 insertions(+), 9049 deletions(-)
 delete mode 100644 azure/functions/_thirdparty/werkzeug/LICENSE.rst
 delete mode 100644 azure/functions/_thirdparty/werkzeug/__init__.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/_compat.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/_internal.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/datastructures.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/exceptions.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/formparser.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/http.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/urls.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/utils.py
 delete mode 100644 azure/functions/_thirdparty/werkzeug/wsgi.py

diff --git a/azure/functions/_abc.py b/azure/functions/_abc.py
index 17b4822c..5812787a 100644
--- a/azure/functions/_abc.py
+++ b/azure/functions/_abc.py
@@ -7,7 +7,7 @@
 import threading
 import typing
 
-from azure.functions._thirdparty.werkzeug.datastructures import Headers
+from werkzeug.datastructures import Headers
 
 T = typing.TypeVar('T')
 
diff --git a/azure/functions/_http.py b/azure/functions/_http.py
index ce6ec812..9cae82d7 100644
--- a/azure/functions/_http.py
+++ b/azure/functions/_http.py
@@ -8,11 +8,12 @@
 import types
 import typing
 
+from multidict._multidict import MultiDict
+from werkzeug import formparser as _wk_parser
+from werkzeug import http as _wk_http
+from werkzeug.datastructures import Headers, FileStorage
+
 from . import _abc
-from ._thirdparty.werkzeug import datastructures as _wk_datastructures
-from ._thirdparty.werkzeug import formparser as _wk_parser
-from ._thirdparty.werkzeug import http as _wk_http
-from ._thirdparty.werkzeug.datastructures import Headers
 
 
 class BaseHeaders(collections.abc.Mapping):
@@ -174,8 +175,8 @@ def __init__(self,
         self.__route_params = types.MappingProxyType(route_params or {})
         self.__body_bytes = body
         self.__form_parsed = False
-        self.__form = None
-        self.__files = None
+        self.__form: MultiDict[str, str] = None
+        self.__files:MultiDict[str, FileStorage] = None
 
     @property
     def url(self):
@@ -216,18 +217,21 @@ def get_json(self) -> typing.Any:
     def _parse_form_data(self):
         if self.__form_parsed:
             return
-
+        """
+          stream_factory: TStreamFactory | None = None,
+        max_form_memory_size: int | None = None,
+        max_content_length: int | None = None,
+        cls: type[MultiDict[str, t.Any]] | None = None,
+        silent: bool = True,
+        *,
+        max_form_parts: int | None = None,
+        """
         body = self.get_body()
         content_type = self.headers.get('Content-Type', '')
         content_length = len(body)
         mimetype, options = _wk_http.parse_options_header(content_type)
         parser = _wk_parser.FormDataParser(
-            _wk_parser.default_stream_factory,
-            options.get('charset') or 'utf-8',
-            'replace',
-            None,
-            None,
-            _wk_datastructures.ImmutableMultiDict,
+            _wk_parser.default_stream_factory
         )
 
         body_stream = io.BytesIO(body)
diff --git a/azure/functions/_http_wsgi.py b/azure/functions/_http_wsgi.py
index 48d64733..f3065004 100644
--- a/azure/functions/_http_wsgi.py
+++ b/azure/functions/_http_wsgi.py
@@ -9,7 +9,12 @@
 
 from ._abc import Context
 from ._http import HttpRequest, HttpResponse
-from ._thirdparty.werkzeug._compat import string_types, wsgi_encoding_dance
+
+
+def wsgi_encoding_dance(value):
+    if isinstance(value, str):
+        return value.encode("latin-1")
+    return value
 
 
 class WsgiRequest:
@@ -98,7 +103,7 @@ def to_environ(self, errors_buffer: StringIO) -> Dict[str, Any]:
 
         # Ensure WSGI string fits in IOS-8859-1 code points
         for k, v in environ.items():
-            if isinstance(v, string_types):
+            if isinstance(v, (str,)):
                 environ[k] = wsgi_encoding_dance(v)
 
         # Remove None values
diff --git a/azure/functions/_thirdparty/werkzeug/LICENSE.rst b/azure/functions/_thirdparty/werkzeug/LICENSE.rst
deleted file mode 100644
index c37cae49..00000000
--- a/azure/functions/_thirdparty/werkzeug/LICENSE.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2007 Pallets
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-1.  Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-2.  Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
-3.  Neither the name of the copyright holder nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/azure/functions/_thirdparty/werkzeug/__init__.py b/azure/functions/_thirdparty/werkzeug/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/azure/functions/_thirdparty/werkzeug/_compat.py b/azure/functions/_thirdparty/werkzeug/_compat.py
deleted file mode 100644
index 1097983e..00000000
--- a/azure/functions/_thirdparty/werkzeug/_compat.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# flake8: noqa
-# This whole file is full of lint errors
-import functools
-import operator
-import sys
-
-try:
-    import builtins
-except ImportError:
-    import __builtin__ as builtins
-
-
-PY2 = sys.version_info[0] == 2
-WIN = sys.platform.startswith("win")
-
-_identity = lambda x: x
-
-if PY2:
-    unichr = unichr
-    text_type = unicode
-    string_types = (str, unicode)
-    integer_types = (int, long)
-
-    iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
-    itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
-    iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
-
-    iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
-    iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
-
-    int_to_byte = chr
-    iter_bytes = iter
-
-    import collections as collections_abc
-
-    exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
-
-    def fix_tuple_repr(obj):
-        def __repr__(self):
-            cls = self.__class__
-            return "%s(%s)" % (
-                cls.__name__,
-                ", ".join(
-                    "%s=%r" % (field, self[index])
-                    for index, field in enumerate(cls._fields)
-                ),
-            )
-
-        obj.__repr__ = __repr__
-        return obj
-
-    def implements_iterator(cls):
-        cls.next = cls.__next__
-        del cls.__next__
-        return cls
-
-    def implements_to_string(cls):
-        cls.__unicode__ = cls.__str__
-        cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
-        return cls
-
-    def native_string_result(func):
-        def wrapper(*args, **kwargs):
-            return func(*args, **kwargs).encode("utf-8")
-
-        return functools.update_wrapper(wrapper, func)
-
-    def implements_bool(cls):
-        cls.__nonzero__ = cls.__bool__
-        del cls.__bool__
-        return cls
-
-    from itertools import imap, izip, ifilter
-
-    range_type = xrange
-
-    from StringIO import StringIO
-    from cStringIO import StringIO as BytesIO
-
-    NativeStringIO = BytesIO
-
-    def make_literal_wrapper(reference):
-        return _identity
-
-    def normalize_string_tuple(tup):
-        """Normalizes a string tuple to a common type. Following Python 2
-        rules, upgrades to unicode are implicit.
-        """
-        if any(isinstance(x, text_type) for x in tup):
-            return tuple(to_unicode(x) for x in tup)
-        return tup
-
-    def try_coerce_native(s):
-        """Try to coerce a unicode string to native if possible. Otherwise,
-        leave it as unicode.
-        """
-        try:
-            return to_native(s)
-        except UnicodeError:
-            return s
-
-    wsgi_get_bytes = _identity
-
-    def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
-        return s.decode(charset, errors)
-
-    def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
-        if isinstance(s, bytes):
-            return s
-        return s.encode(charset, errors)
-
-    def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
-        if x is None:
-            return None
-        if isinstance(x, (bytes, bytearray, buffer)):
-            return bytes(x)
-        if isinstance(x, unicode):
-            return x.encode(charset, errors)
-        raise TypeError("Expected bytes")
-
-    def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
-        if x is None or isinstance(x, str):
-            return x
-        return x.encode(charset, errors)
-
-
-else:
-    unichr = chr
-    text_type = str
-    string_types = (str,)
-    integer_types = (int,)
-
-    iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
-    itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
-    iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
-
-    iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
-    iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
-
-    int_to_byte = operator.methodcaller("to_bytes", 1, "big")
-    iter_bytes = functools.partial(map, int_to_byte)
-
-    import collections.abc as collections_abc
-
-    def reraise(tp, value, tb=None):
-        if value.__traceback__ is not tb:
-            raise value.with_traceback(tb)
-        raise value
-
-    fix_tuple_repr = _identity
-    implements_iterator = _identity
-    implements_to_string = _identity
-    implements_bool = _identity
-    native_string_result = _identity
-    imap = map
-    izip = zip
-    ifilter = filter
-    range_type = range
-
-    from io import StringIO, BytesIO
-
-    NativeStringIO = StringIO
-
-    _latin1_encode = operator.methodcaller("encode", "latin1")
-
-    def make_literal_wrapper(reference):
-        if isinstance(reference, text_type):
-            return _identity
-        return _latin1_encode
-
-    def normalize_string_tuple(tup):
-        """Ensures that all types in the tuple are either strings
-        or bytes.
-        """
-        tupiter = iter(tup)
-        is_text = isinstance(next(tupiter, None), text_type)
-        for arg in tupiter:
-            if isinstance(arg, text_type) != is_text:
-                raise TypeError(
-                    "Cannot mix str and bytes arguments (got %s)" % repr(tup)
-                )
-        return tup
-
-    try_coerce_native = _identity
-    wsgi_get_bytes = _latin1_encode
-
-    def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
-        return s.encode("latin1").decode(charset, errors)
-
-    def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
-        if isinstance(s, text_type):
-            s = s.encode(charset)
-        return s.decode("latin1", errors)
-
-    def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
-        if x is None:
-            return None
-        if isinstance(x, (bytes, bytearray, memoryview)):  # noqa
-            return bytes(x)
-        if isinstance(x, str):
-            return x.encode(charset, errors)
-        raise TypeError("Expected bytes")
-
-    def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
-        if x is None or isinstance(x, str):
-            return x
-        return x.decode(charset, errors)
-
-
-def to_unicode(
-    x, charset=sys.getdefaultencoding(), errors="strict", allow_none_charset=False
-):
-    if x is None:
-        return None
-    if not isinstance(x, bytes):
-        return text_type(x)
-    if charset is None and allow_none_charset:
-        return x
-    return x.decode(charset, errors)
diff --git a/azure/functions/_thirdparty/werkzeug/_internal.py b/azure/functions/_thirdparty/werkzeug/_internal.py
deleted file mode 100644
index d8b83363..00000000
--- a/azure/functions/_thirdparty/werkzeug/_internal.py
+++ /dev/null
@@ -1,459 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug._internal
-    ~~~~~~~~~~~~~~~~~~
-
-    This module provides internally used helpers and constants.
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import inspect
-import re
-import string
-from datetime import date
-from datetime import datetime
-from itertools import chain
-from weakref import WeakKeyDictionary
-
-from ._compat import int_to_byte
-from ._compat import integer_types
-from ._compat import iter_bytes
-from ._compat import range_type
-from ._compat import text_type
-
-
-_logger = None
-_signature_cache = WeakKeyDictionary()
-_epoch_ord = date(1970, 1, 1).toordinal()
-_cookie_params = {
-    b"expires",
-    b"path",
-    b"comment",
-    b"max-age",
-    b"secure",
-    b"httponly",
-    b"version",
-}
-_legal_cookie_chars = (
-    string.ascii_letters + string.digits + u"/=!#$%&'*+-.^_`|~:"
-).encode("ascii")
-
-_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
-for _i in chain(range_type(32), range_type(127, 256)):
-    _cookie_quoting_map[int_to_byte(_i)] = ("\\%03o" % _i).encode("latin1")
-
-_octal_re = re.compile(br"\\[0-3][0-7][0-7]")
-_quote_re = re.compile(br"[\\].")
-_legal_cookie_chars_re = br"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
-_cookie_re = re.compile(
-    br"""
-    (?P<key>[^=;]+)
-    (?:\s*=\s*
-        (?P<val>
-            "(?:[^\\"]|\\.)*" |
-             (?:.*?)
-        )
-    )?
-    \s*;
-""",
-    flags=re.VERBOSE,
-)
-
-
-class _Missing(object):
-    def __repr__(self):
-        return "no value"
-
-    def __reduce__(self):
-        return "_missing"
-
-
-_missing = _Missing()
-
-
-def _get_environ(obj):
-    env = getattr(obj, "environ", obj)
-    assert isinstance(env, dict), (
-        "%r is not a WSGI environment (has to be a dict)" % type(obj).__name__
-    )
-    return env
-
-
-def _log(type, message, *args, **kwargs):
-    """Log into the internal werkzeug logger."""
-    global _logger
-    if _logger is None:
-        import logging
-
-        _logger = logging.getLogger("werkzeug")
-        if _logger.level == logging.NOTSET:
-            _logger.setLevel(logging.INFO)
-        # Only set up a default log handler if the
-        # end-user application didn't set anything up.
-        if not logging.root.handlers:
-            handler = logging.StreamHandler()
-            _logger.addHandler(handler)
-    getattr(_logger, type)(message.rstrip(), *args, **kwargs)
-
-
-def _parse_signature(func):
-    """Return a signature object for the function."""
-    if hasattr(func, "im_func"):
-        func = func.im_func
-
-    # if we have a cached validator for this function, return it
-    parse = _signature_cache.get(func)
-    if parse is not None:
-        return parse
-
-    # inspect the function signature and collect all the information
-    if hasattr(inspect, "getfullargspec"):
-        tup = inspect.getfullargspec(func)
-    else:
-        tup = inspect.getargspec(func)
-    positional, vararg_var, kwarg_var, defaults = tup[:4]
-    defaults = defaults or ()
-    arg_count = len(positional)
-    arguments = []
-    for idx, name in enumerate(positional):
-        if isinstance(name, list):
-            raise TypeError(
-                "cannot parse functions that unpack tuples in the function signature"
-            )
-        try:
-            default = defaults[idx - arg_count]
-        except IndexError:
-            param = (name, False, None)
-        else:
-            param = (name, True, default)
-        arguments.append(param)
-    arguments = tuple(arguments)
-
-    def parse(args, kwargs):
-        new_args = []
-        missing = []
-        extra = {}
-
-        # consume as many arguments as positional as possible
-        for idx, (name, has_default, default) in enumerate(arguments):
-            try:
-                new_args.append(args[idx])
-            except IndexError:
-                try:
-                    new_args.append(kwargs.pop(name))
-                except KeyError:
-                    if has_default:
-                        new_args.append(default)
-                    else:
-                        missing.append(name)
-            else:
-                if name in kwargs:
-                    extra[name] = kwargs.pop(name)
-
-        # handle extra arguments
-        extra_positional = args[arg_count:]
-        if vararg_var is not None:
-            new_args.extend(extra_positional)
-            extra_positional = ()
-        if kwargs and kwarg_var is None:
-            extra.update(kwargs)
-            kwargs = {}
-
-        return (
-            new_args,
-            kwargs,
-            missing,
-            extra,
-            extra_positional,
-            arguments,
-            vararg_var,
-            kwarg_var,
-        )
-
-    _signature_cache[func] = parse
-    return parse
-
-
-def _date_to_unix(arg):
-    """Converts a timetuple, integer or datetime object into the seconds from
-    epoch in utc.
-    """
-    if isinstance(arg, datetime):
-        arg = arg.utctimetuple()
-    elif isinstance(arg, integer_types + (float,)):
-        return int(arg)
-    year, month, day, hour, minute, second = arg[:6]
-    days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
-    hours = days * 24 + hour
-    minutes = hours * 60 + minute
-    seconds = minutes * 60 + second
-    return seconds
-
-
-class _DictAccessorProperty(object):
-    """Baseclass for `environ_property` and `header_property`."""
-
-    read_only = False
-
-    def __init__(
-        self,
-        name,
-        default=None,
-        load_func=None,
-        dump_func=None,
-        read_only=None,
-        doc=None,
-    ):
-        self.name = name
-        self.default = default
-        self.load_func = load_func
-        self.dump_func = dump_func
-        if read_only is not None:
-            self.read_only = read_only
-        self.__doc__ = doc
-
-    def __get__(self, obj, type=None):
-        if obj is None:
-            return self
-        storage = self.lookup(obj)
-        if self.name not in storage:
-            return self.default
-        rv = storage[self.name]
-        if self.load_func is not None:
-            try:
-                rv = self.load_func(rv)
-            except (ValueError, TypeError):
-                rv = self.default
-        return rv
-
-    def __set__(self, obj, value):
-        if self.read_only:
-            raise AttributeError("read only property")
-        if self.dump_func is not None:
-            value = self.dump_func(value)
-        self.lookup(obj)[self.name] = value
-
-    def __delete__(self, obj):
-        if self.read_only:
-            raise AttributeError("read only property")
-        self.lookup(obj).pop(self.name, None)
-
-    def __repr__(self):
-        return "<%s %s>" % (self.__class__.__name__, self.name)
-
-
-def _cookie_quote(b):
-    buf = bytearray()
-    all_legal = True
-    _lookup = _cookie_quoting_map.get
-    _push = buf.extend
-
-    for char in iter_bytes(b):
-        if char not in _legal_cookie_chars:
-            all_legal = False
-            char = _lookup(char, char)
-        _push(char)
-
-    if all_legal:
-        return bytes(buf)
-    return bytes(b'"' + buf + b'"')
-
-
-def _cookie_unquote(b):
-    if len(b) < 2:
-        return b
-    if b[:1] != b'"' or b[-1:] != b'"':
-        return b
-
-    b = b[1:-1]
-
-    i = 0
-    n = len(b)
-    rv = bytearray()
-    _push = rv.extend
-
-    while 0 <= i < n:
-        o_match = _octal_re.search(b, i)
-        q_match = _quote_re.search(b, i)
-        if not o_match and not q_match:
-            rv.extend(b[i:])
-            break
-        j = k = -1
-        if o_match:
-            j = o_match.start(0)
-        if q_match:
-            k = q_match.start(0)
-        if q_match and (not o_match or k < j):
-            _push(b[i:k])
-            _push(b[k + 1 : k + 2])
-            i = k + 2
-        else:
-            _push(b[i:j])
-            rv.append(int(b[j + 1 : j + 4], 8))
-            i = j + 4
-
-    return bytes(rv)
-
-
-def _cookie_parse_impl(b):
-    """Lowlevel cookie parsing facility that operates on bytes."""
-    i = 0
-    n = len(b)
-
-    while i < n:
-        match = _cookie_re.search(b + b";", i)
-        if not match:
-            break
-
-        key = match.group("key").strip()
-        value = match.group("val") or b""
-        i = match.end(0)
-
-        # Ignore parameters.  We have no interest in them.
-        if key.lower() not in _cookie_params:
-            yield _cookie_unquote(key), _cookie_unquote(value)
-
-
-def _encode_idna(domain):
-    # If we're given bytes, make sure they fit into ASCII
-    if not isinstance(domain, text_type):
-        domain.decode("ascii")
-        return domain
-
-    # Otherwise check if it's already ascii, then return
-    try:
-        return domain.encode("ascii")
-    except UnicodeError:
-        pass
-
-    # Otherwise encode each part separately
-    parts = domain.split(".")
-    for idx, part in enumerate(parts):
-        parts[idx] = part.encode("idna")
-    return b".".join(parts)
-
-
-def _decode_idna(domain):
-    # If the input is a string try to encode it to ascii to
-    # do the idna decoding.  if that fails because of an
-    # unicode error, then we already have a decoded idna domain
-    if isinstance(domain, text_type):
-        try:
-            domain = domain.encode("ascii")
-        except UnicodeError:
-            return domain
-
-    # Decode each part separately.  If a part fails, try to
-    # decode it with ascii and silently ignore errors.  This makes
-    # most sense because the idna codec does not have error handling
-    parts = domain.split(b".")
-    for idx, part in enumerate(parts):
-        try:
-            parts[idx] = part.decode("idna")
-        except UnicodeError:
-            parts[idx] = part.decode("ascii", "ignore")
-
-    return ".".join(parts)
-
-
-def _make_cookie_domain(domain):
-    if domain is None:
-        return None
-    domain = _encode_idna(domain)
-    if b":" in domain:
-        domain = domain.split(b":", 1)[0]
-    if b"." in domain:
-        return domain
-    raise ValueError(
-        "Setting 'domain' for a cookie on a server running locally (ex: "
-        "localhost) is not supported by complying browsers. You should "
-        "have something like: '127.0.0.1 localhost dev.localhost' on "
-        "your hosts file and then point your server to run on "
-        "'dev.localhost' and also set 'domain' for 'dev.localhost'"
-    )
-
-
-def _easteregg(app=None):
-    """Like the name says.  But who knows how it works?"""
-
-    def bzzzzzzz(gyver):
-        import base64
-        import zlib
-
-        return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
-
-    gyver = u"\n".join(
-        [
-            x + (77 - len(x)) * u" "
-            for x in bzzzzzzz(
-                b"""
-eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
-9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
-4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
-jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
-q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
-jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
-8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
-v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
-XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
-LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
-iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
-tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
-1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
-GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
-Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
-QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
-8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
-jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
-DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
-MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
-GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
-RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
-Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
-NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
-pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
-sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
-p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
-krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
-nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
-mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
-7f2zLkGNv8b191cD/3vs9Q833z8t"""
-            ).splitlines()
-        ]
-    )
-
-    def easteregged(environ, start_response):
-        def injecting_start_response(status, headers, exc_info=None):
-            headers.append(("X-Powered-By", "Werkzeug"))
-            return start_response(status, headers, exc_info)
-
-        if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
-            return app(environ, injecting_start_response)
-        injecting_start_response("200 OK", [("Content-Type", "text/html")])
-        return [
-            (
-                u"""
-<!DOCTYPE html>
-<html>
-<head>
-<title>About Werkzeug</title>
-<style type="text/css">
-  body { font: 15px Georgia, serif; text-align: center; }
-  a { color: #333; text-decoration: none; }
-  h1 { font-size: 30px; margin: 20px 0 10px 0; }
-  p { margin: 0 0 30px 0; }
-  pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
-</style>
-</head>
-<body>
-<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
-<p>the Swiss Army knife of Python web development.</p>
-<pre>%s\n\n\n</pre>
-</body>
-</html>"""
-                % gyver
-            ).encode("latin1")
-        ]
-
-    return easteregged
diff --git a/azure/functions/_thirdparty/werkzeug/datastructures.py b/azure/functions/_thirdparty/werkzeug/datastructures.py
deleted file mode 100644
index 4df573b7..00000000
--- a/azure/functions/_thirdparty/werkzeug/datastructures.py
+++ /dev/null
@@ -1,2846 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.datastructures
-    ~~~~~~~~~~~~~~~~~~~~~~~
-
-    This module provides mixins and classes with an immutable interface.
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import codecs
-import mimetypes
-import re
-from copy import deepcopy
-from itertools import repeat
-
-from ._compat import BytesIO
-from ._compat import collections_abc
-from ._compat import integer_types
-from ._compat import iteritems
-from ._compat import iterkeys
-from ._compat import iterlists
-from ._compat import itervalues
-from ._compat import make_literal_wrapper
-from ._compat import PY2
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import to_native
-
-_locale_delim_re = re.compile(r"[_-]")
-
-
-class _Missing(object):
-    def __repr__(self):
-        return "no value"
-
-    def __reduce__(self):
-        return "_missing"
-
-
-_missing = _Missing()
-
-
-def is_immutable(self):
-    raise TypeError("%r objects are immutable" % self.__class__.__name__)
-
-
-def iter_multi_items(mapping):
-    """Iterates over the items of a mapping yielding keys and values
-    without dropping any from more complex structures.
-    """
-    if isinstance(mapping, MultiDict):
-        for item in iteritems(mapping, multi=True):
-            yield item
-    elif isinstance(mapping, dict):
-        for key, value in iteritems(mapping):
-            if isinstance(value, (tuple, list)):
-                for value in value:
-                    yield key, value
-            else:
-                yield key, value
-    else:
-        for item in mapping:
-            yield item
-
-
-def native_itermethods(names):
-    if not PY2:
-        return lambda x: x
-
-    def setviewmethod(cls, name):
-        viewmethod_name = "view%s" % name
-        repr_name = "view_%s" % name
-
-        def viewmethod(self, *a, **kw):
-            return ViewItems(self, name, repr_name, *a, **kw)
-
-        viewmethod.__name__ = viewmethod_name
-        viewmethod.__doc__ = "`%s()` object providing a view on %s" % (
-            viewmethod_name,
-            name,
-        )
-        setattr(cls, viewmethod_name, viewmethod)
-
-    def setitermethod(cls, name):
-        itermethod = getattr(cls, name)
-        setattr(cls, "iter%s" % name, itermethod)
-
-        def listmethod(self, *a, **kw):
-            return list(itermethod(self, *a, **kw))
-
-        listmethod.__name__ = name
-        listmethod.__doc__ = "Like :py:meth:`iter%s`, but returns a list." % name
-        setattr(cls, name, listmethod)
-
-    def wrap(cls):
-        for name in names:
-            setitermethod(cls, name)
-            setviewmethod(cls, name)
-        return cls
-
-    return wrap
-
-
-class ImmutableListMixin(object):
-    """Makes a :class:`list` immutable.
-
-    .. versionadded:: 0.5
-
-    :private:
-    """
-
-    _hash_cache = None
-
-    def __hash__(self):
-        if self._hash_cache is not None:
-            return self._hash_cache
-        rv = self._hash_cache = hash(tuple(self))
-        return rv
-
-    def __reduce_ex__(self, protocol):
-        return type(self), (list(self),)
-
-    def __delitem__(self, key):
-        is_immutable(self)
-
-    def __iadd__(self, other):
-        is_immutable(self)
-
-    __imul__ = __iadd__
-
-    def __setitem__(self, key, value):
-        is_immutable(self)
-
-    def append(self, item):
-        is_immutable(self)
-
-    remove = append
-
-    def extend(self, iterable):
-        is_immutable(self)
-
-    def insert(self, pos, value):
-        is_immutable(self)
-
-    def pop(self, index=-1):
-        is_immutable(self)
-
-    def reverse(self):
-        is_immutable(self)
-
-    def sort(self, cmp=None, key=None, reverse=None):
-        is_immutable(self)
-
-
-class ImmutableList(ImmutableListMixin, list):
-    """An immutable :class:`list`.
-
-    .. versionadded:: 0.5
-
-    :private:
-    """
-
-    def __repr__(self):
-        return "%s(%s)" % (self.__class__.__name__, list.__repr__(self))
-
-
-class ImmutableDictMixin(object):
-    """Makes a :class:`dict` immutable.
-
-    .. versionadded:: 0.5
-
-    :private:
-    """
-
-    _hash_cache = None
-
-    @classmethod
-    def fromkeys(cls, keys, value=None):
-        instance = super(cls, cls).__new__(cls)
-        instance.__init__(zip(keys, repeat(value)))
-        return instance
-
-    def __reduce_ex__(self, protocol):
-        return type(self), (dict(self),)
-
-    def _iter_hashitems(self):
-        return iteritems(self)
-
-    def __hash__(self):
-        if self._hash_cache is not None:
-            return self._hash_cache
-        rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
-        return rv
-
-    def setdefault(self, key, default=None):
-        is_immutable(self)
-
-    def update(self, *args, **kwargs):
-        is_immutable(self)
-
-    def pop(self, key, default=None):
-        is_immutable(self)
-
-    def popitem(self):
-        is_immutable(self)
-
-    def __setitem__(self, key, value):
-        is_immutable(self)
-
-    def __delitem__(self, key):
-        is_immutable(self)
-
-    def clear(self):
-        is_immutable(self)
-
-
-class ImmutableMultiDictMixin(ImmutableDictMixin):
-    """Makes a :class:`MultiDict` immutable.
-
-    .. versionadded:: 0.5
-
-    :private:
-    """
-
-    def __reduce_ex__(self, protocol):
-        return type(self), (list(iteritems(self, multi=True)),)
-
-    def _iter_hashitems(self):
-        return iteritems(self, multi=True)
-
-    def add(self, key, value):
-        is_immutable(self)
-
-    def popitemlist(self):
-        is_immutable(self)
-
-    def poplist(self, key):
-        is_immutable(self)
-
-    def setlist(self, key, new_list):
-        is_immutable(self)
-
-    def setlistdefault(self, key, default_list=None):
-        is_immutable(self)
-
-
-class UpdateDictMixin(object):
-    """Makes dicts call `self.on_update` on modifications.
-
-    .. versionadded:: 0.5
-
-    :private:
-    """
-
-    on_update = None
-
-    def calls_update(name):  # noqa: B902
-        def oncall(self, *args, **kw):
-            rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
-            if self.on_update is not None:
-                self.on_update(self)
-            return rv
-
-        oncall.__name__ = name
-        return oncall
-
-    def setdefault(self, key, default=None):
-        modified = key not in self
-        rv = super(UpdateDictMixin, self).setdefault(key, default)
-        if modified and self.on_update is not None:
-            self.on_update(self)
-        return rv
-
-    def pop(self, key, default=_missing):
-        modified = key in self
-        if default is _missing:
-            rv = super(UpdateDictMixin, self).pop(key)
-        else:
-            rv = super(UpdateDictMixin, self).pop(key, default)
-        if modified and self.on_update is not None:
-            self.on_update(self)
-        return rv
-
-    __setitem__ = calls_update("__setitem__")
-    __delitem__ = calls_update("__delitem__")
-    clear = calls_update("clear")
-    popitem = calls_update("popitem")
-    update = calls_update("update")
-    del calls_update
-
-
-class TypeConversionDict(dict):
-    """Works like a regular dict but the :meth:`get` method can perform
-    type conversions.  :class:`MultiDict` and :class:`CombinedMultiDict`
-    are subclasses of this class and provide the same feature.
-
-    .. versionadded:: 0.5
-    """
-
-    def get(self, key, default=None, type=None):
-        """Return the default value if the requested data doesn't exist.
-        If `type` is provided and is a callable it should convert the value,
-        return it or raise a :exc:`ValueError` if that is not possible.  In
-        this case the function will return the default as if the value was not
-        found:
-
-        >>> d = TypeConversionDict(foo='42', bar='blub')
-        >>> d.get('foo', type=int)
-        42
-        >>> d.get('bar', -1, type=int)
-        -1
-
-        :param key: The key to be looked up.
-        :param default: The default value to be returned if the key can't
-                        be looked up.  If not further specified `None` is
-                        returned.
-        :param type: A callable that is used to cast the value in the
-                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
-                     by this callable the default value is returned.
-        """
-        try:
-            rv = self[key]
-        except KeyError:
-            return default
-        if type is not None:
-            try:
-                rv = type(rv)
-            except ValueError:
-                rv = default
-        return rv
-
-
-class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
-    """Works like a :class:`TypeConversionDict` but does not support
-    modifications.
-
-    .. versionadded:: 0.5
-    """
-
-    def copy(self):
-        """Return a shallow mutable copy of this object.  Keep in mind that
-        the standard library's :func:`copy` function is a no-op for this class
-        like for any other python immutable type (eg: :class:`tuple`).
-        """
-        return TypeConversionDict(self)
-
-    def __copy__(self):
-        return self
-
-
-class ViewItems(object):
-    def __init__(self, multi_dict, method, repr_name, *a, **kw):
-        self.__multi_dict = multi_dict
-        self.__method = method
-        self.__repr_name = repr_name
-        self.__a = a
-        self.__kw = kw
-
-    def __get_items(self):
-        return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw)
-
-    def __repr__(self):
-        return "%s(%r)" % (self.__repr_name, list(self.__get_items()))
-
-    def __iter__(self):
-        return iter(self.__get_items())
-
-
-@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
-class MultiDict(TypeConversionDict):
-    """A :class:`MultiDict` is a dictionary subclass customized to deal with
-    multiple values for the same key which is for example used by the parsing
-    functions in the wrappers.  This is necessary because some HTML form
-    elements pass multiple values for the same key.
-
-    :class:`MultiDict` implements all standard dictionary methods.
-    Internally, it saves all values for a key as a list, but the standard dict
-    access methods will only return the first value for a key. If you want to
-    gain access to the other values, too, you have to use the `list` methods as
-    explained below.
-
-    Basic Usage:
-
-    >>> d = MultiDict([('a', 'b'), ('a', 'c')])
-    >>> d
-    MultiDict([('a', 'b'), ('a', 'c')])
-    >>> d['a']
-    'b'
-    >>> d.getlist('a')
-    ['b', 'c']
-    >>> 'a' in d
-    True
-
-    It behaves like a normal dict thus all dict functions will only return the
-    first value when multiple values for one key are found.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
-    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
-    exceptions.
-
-    A :class:`MultiDict` can be constructed from an iterable of
-    ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
-    onwards some keyword parameters.
-
-    :param mapping: the initial value for the :class:`MultiDict`.  Either a
-                    regular dict, an iterable of ``(key, value)`` tuples
-                    or `None`.
-    """
-
-    def __init__(self, mapping=None):
-        if isinstance(mapping, MultiDict):
-            dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
-        elif isinstance(mapping, dict):
-            tmp = {}
-            for key, value in iteritems(mapping):
-                if isinstance(value, (tuple, list)):
-                    if len(value) == 0:
-                        continue
-                    value = list(value)
-                else:
-                    value = [value]
-                tmp[key] = value
-            dict.__init__(self, tmp)
-        else:
-            tmp = {}
-            for key, value in mapping or ():
-                tmp.setdefault(key, []).append(value)
-            dict.__init__(self, tmp)
-
-    def __getstate__(self):
-        return dict(self.lists())
-
-    def __setstate__(self, value):
-        dict.clear(self)
-        dict.update(self, value)
-
-    def __getitem__(self, key):
-        """Return the first data value for this key;
-        raises KeyError if not found.
-
-        :param key: The key to be looked up.
-        :raise KeyError: if the key does not exist.
-        """
-
-        if key in self:
-            lst = dict.__getitem__(self, key)
-            if len(lst) > 0:
-                return lst[0]
-        raise exceptions.BadRequestKeyError(key)
-
-    def __setitem__(self, key, value):
-        """Like :meth:`add` but removes an existing key first.
-
-        :param key: the key for the value.
-        :param value: the value to set.
-        """
-        dict.__setitem__(self, key, [value])
-
-    def add(self, key, value):
-        """Adds a new value for the key.
-
-        .. versionadded:: 0.6
-
-        :param key: the key for the value.
-        :param value: the value to add.
-        """
-        dict.setdefault(self, key, []).append(value)
-
-    def getlist(self, key, type=None):
-        """Return the list of items for a given key. If that key is not in the
-        `MultiDict`, the return value will be an empty list.  Just as `get`
-        `getlist` accepts a `type` parameter.  All items will be converted
-        with the callable defined there.
-
-        :param key: The key to be looked up.
-        :param type: A callable that is used to cast the value in the
-                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
-                     by this callable the value will be removed from the list.
-        :return: a :class:`list` of all the values for the key.
-        """
-        try:
-            rv = dict.__getitem__(self, key)
-        except KeyError:
-            return []
-        if type is None:
-            return list(rv)
-        result = []
-        for item in rv:
-            try:
-                result.append(type(item))
-            except ValueError:
-                pass
-        return result
-
-    def setlist(self, key, new_list):
-        """Remove the old values for a key and add new ones.  Note that the list
-        you pass the values in will be shallow-copied before it is inserted in
-        the dictionary.
-
-        >>> d = MultiDict()
-        >>> d.setlist('foo', ['1', '2'])
-        >>> d['foo']
-        '1'
-        >>> d.getlist('foo')
-        ['1', '2']
-
-        :param key: The key for which the values are set.
-        :param new_list: An iterable with the new values for the key.  Old values
-                         are removed first.
-        """
-        dict.__setitem__(self, key, list(new_list))
-
-    def setdefault(self, key, default=None):
-        """Returns the value for the key if it is in the dict, otherwise it
-        returns `default` and sets that value for `key`.
-
-        :param key: The key to be looked up.
-        :param default: The default value to be returned if the key is not
-                        in the dict.  If not further specified it's `None`.
-        """
-        if key not in self:
-            self[key] = default
-        else:
-            default = self[key]
-        return default
-
-    def setlistdefault(self, key, default_list=None):
-        """Like `setdefault` but sets multiple values.  The list returned
-        is not a copy, but the list that is actually used internally.  This
-        means that you can put new values into the dict by appending items
-        to the list:
-
-        >>> d = MultiDict({"foo": 1})
-        >>> d.setlistdefault("foo").extend([2, 3])
-        >>> d.getlist("foo")
-        [1, 2, 3]
-
-        :param key: The key to be looked up.
-        :param default_list: An iterable of default values.  It is either copied
-                             (in case it was a list) or converted into a list
-                             before returned.
-        :return: a :class:`list`
-        """
-        if key not in self:
-            default_list = list(default_list or ())
-            dict.__setitem__(self, key, default_list)
-        else:
-            default_list = dict.__getitem__(self, key)
-        return default_list
-
-    def items(self, multi=False):
-        """Return an iterator of ``(key, value)`` pairs.
-
-        :param multi: If set to `True` the iterator returned will have a pair
-                      for each value of each key.  Otherwise it will only
-                      contain pairs for the first value of each key.
-        """
-
-        for key, values in iteritems(dict, self):
-            if multi:
-                for value in values:
-                    yield key, value
-            else:
-                yield key, values[0]
-
-    def lists(self):
-        """Return a iterator of ``(key, values)`` pairs, where values is the list
-        of all values associated with the key."""
-
-        for key, values in iteritems(dict, self):
-            yield key, list(values)
-
-    def keys(self):
-        return iterkeys(dict, self)
-
-    __iter__ = keys
-
-    def values(self):
-        """Returns an iterator of the first value on every key's value list."""
-        for values in itervalues(dict, self):
-            yield values[0]
-
-    def listvalues(self):
-        """Return an iterator of all values associated with a key.  Zipping
-        :meth:`keys` and this is the same as calling :meth:`lists`:
-
-        >>> d = MultiDict({"foo": [1, 2, 3]})
-        >>> zip(d.keys(), d.listvalues()) == d.lists()
-        True
-        """
-
-        return itervalues(dict, self)
-
-    def copy(self):
-        """Return a shallow copy of this object."""
-        return self.__class__(self)
-
-    def deepcopy(self, memo=None):
-        """Return a deep copy of this object."""
-        return self.__class__(deepcopy(self.to_dict(flat=False), memo))
-
-    def to_dict(self, flat=True):
-        """Return the contents as regular dict.  If `flat` is `True` the
-        returned dict will only have the first item present, if `flat` is
-        `False` all values will be returned as lists.
-
-        :param flat: If set to `False` the dict returned will have lists
-                     with all the values in it.  Otherwise it will only
-                     contain the first value for each key.
-        :return: a :class:`dict`
-        """
-        if flat:
-            return dict(iteritems(self))
-        return dict(self.lists())
-
-    def update(self, other_dict):
-        """update() extends rather than replaces existing key lists:
-
-        >>> a = MultiDict({'x': 1})
-        >>> b = MultiDict({'x': 2, 'y': 3})
-        >>> a.update(b)
-        >>> a
-        MultiDict([('y', 3), ('x', 1), ('x', 2)])
-
-        If the value list for a key in ``other_dict`` is empty, no new values
-        will be added to the dict and the key will not be created:
-
-        >>> x = {'empty_list': []}
-        >>> y = MultiDict()
-        >>> y.update(x)
-        >>> y
-        MultiDict([])
-        """
-        for key, value in iter_multi_items(other_dict):
-            MultiDict.add(self, key, value)
-
-    def pop(self, key, default=_missing):
-        """Pop the first item for a list on the dict.  Afterwards the
-        key is removed from the dict, so additional values are discarded:
-
-        >>> d = MultiDict({"foo": [1, 2, 3]})
-        >>> d.pop("foo")
-        1
-        >>> "foo" in d
-        False
-
-        :param key: the key to pop.
-        :param default: if provided the value to return if the key was
-                        not in the dictionary.
-        """
-        try:
-            lst = dict.pop(self, key)
-
-            if len(lst) == 0:
-                raise exceptions.BadRequestKeyError(key)
-
-            return lst[0]
-        except KeyError:
-            if default is not _missing:
-                return default
-            raise exceptions.BadRequestKeyError(key)
-
-    def popitem(self):
-        """Pop an item from the dict."""
-        try:
-            item = dict.popitem(self)
-
-            if len(item[1]) == 0:
-                raise exceptions.BadRequestKeyError(item)
-
-            return (item[0], item[1][0])
-        except KeyError as e:
-            raise exceptions.BadRequestKeyError(e.args[0])
-
-    def poplist(self, key):
-        """Pop the list for a key from the dict.  If the key is not in the dict
-        an empty list is returned.
-
-        .. versionchanged:: 0.5
-           If the key does no longer exist a list is returned instead of
-           raising an error.
-        """
-        return dict.pop(self, key, [])
-
-    def popitemlist(self):
-        """Pop a ``(key, list)`` tuple from the dict."""
-        try:
-            return dict.popitem(self)
-        except KeyError as e:
-            raise exceptions.BadRequestKeyError(e.args[0])
-
-    def __copy__(self):
-        return self.copy()
-
-    def __deepcopy__(self, memo):
-        return self.deepcopy(memo=memo)
-
-    def __repr__(self):
-        return "%s(%r)" % (self.__class__.__name__, list(iteritems(self, multi=True)))
-
-
-class _omd_bucket(object):
-    """Wraps values in the :class:`OrderedMultiDict`.  This makes it
-    possible to keep an order over multiple different keys.  It requires
-    a lot of extra memory and slows down access a lot, but makes it
-    possible to access elements in O(1) and iterate in O(n).
-    """
-
-    __slots__ = ("prev", "key", "value", "next")
-
-    def __init__(self, omd, key, value):
-        self.prev = omd._last_bucket
-        self.key = key
-        self.value = value
-        self.next = None
-
-        if omd._first_bucket is None:
-            omd._first_bucket = self
-        if omd._last_bucket is not None:
-            omd._last_bucket.next = self
-        omd._last_bucket = self
-
-    def unlink(self, omd):
-        if self.prev:
-            self.prev.next = self.next
-        if self.next:
-            self.next.prev = self.prev
-        if omd._first_bucket is self:
-            omd._first_bucket = self.next
-        if omd._last_bucket is self:
-            omd._last_bucket = self.prev
-
-
-@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
-class OrderedMultiDict(MultiDict):
-    """Works like a regular :class:`MultiDict` but preserves the
-    order of the fields.  To convert the ordered multi dict into a
-    list you can use the :meth:`items` method and pass it ``multi=True``.
-
-    In general an :class:`OrderedMultiDict` is an order of magnitude
-    slower than a :class:`MultiDict`.
-
-    .. admonition:: note
-
-       Due to a limitation in Python you cannot convert an ordered
-       multi dict into a regular dict by using ``dict(multidict)``.
-       Instead you have to use the :meth:`to_dict` method, otherwise
-       the internal bucket objects are exposed.
-    """
-
-    def __init__(self, mapping=None):
-        dict.__init__(self)
-        self._first_bucket = self._last_bucket = None
-        if mapping is not None:
-            OrderedMultiDict.update(self, mapping)
-
-    def __eq__(self, other):
-        if not isinstance(other, MultiDict):
-            return NotImplemented
-        if isinstance(other, OrderedMultiDict):
-            iter1 = iteritems(self, multi=True)
-            iter2 = iteritems(other, multi=True)
-            try:
-                for k1, v1 in iter1:
-                    k2, v2 = next(iter2)
-                    if k1 != k2 or v1 != v2:
-                        return False
-            except StopIteration:
-                return False
-            try:
-                next(iter2)
-            except StopIteration:
-                return True
-            return False
-        if len(self) != len(other):
-            return False
-        for key, values in iterlists(self):
-            if other.getlist(key) != values:
-                return False
-        return True
-
-    __hash__ = None
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def __reduce_ex__(self, protocol):
-        return type(self), (list(iteritems(self, multi=True)),)
-
-    def __getstate__(self):
-        return list(iteritems(self, multi=True))
-
-    def __setstate__(self, values):
-        dict.clear(self)
-        for key, value in values:
-            self.add(key, value)
-
-    def __getitem__(self, key):
-        if key in self:
-            return dict.__getitem__(self, key)[0].value
-        raise exceptions.BadRequestKeyError(key)
-
-    def __setitem__(self, key, value):
-        self.poplist(key)
-        self.add(key, value)
-
-    def __delitem__(self, key):
-        self.pop(key)
-
-    def keys(self):
-        return (key for key, value in iteritems(self))
-
-    __iter__ = keys
-
-    def values(self):
-        return (value for key, value in iteritems(self))
-
-    def items(self, multi=False):
-        ptr = self._first_bucket
-        if multi:
-            while ptr is not None:
-                yield ptr.key, ptr.value
-                ptr = ptr.next
-        else:
-            returned_keys = set()
-            while ptr is not None:
-                if ptr.key not in returned_keys:
-                    returned_keys.add(ptr.key)
-                    yield ptr.key, ptr.value
-                ptr = ptr.next
-
-    def lists(self):
-        returned_keys = set()
-        ptr = self._first_bucket
-        while ptr is not None:
-            if ptr.key not in returned_keys:
-                yield ptr.key, self.getlist(ptr.key)
-                returned_keys.add(ptr.key)
-            ptr = ptr.next
-
-    def listvalues(self):
-        for _key, values in iterlists(self):
-            yield values
-
-    def add(self, key, value):
-        dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
-
-    def getlist(self, key, type=None):
-        try:
-            rv = dict.__getitem__(self, key)
-        except KeyError:
-            return []
-        if type is None:
-            return [x.value for x in rv]
-        result = []
-        for item in rv:
-            try:
-                result.append(type(item.value))
-            except ValueError:
-                pass
-        return result
-
-    def setlist(self, key, new_list):
-        self.poplist(key)
-        for value in new_list:
-            self.add(key, value)
-
-    def setlistdefault(self, key, default_list=None):
-        raise TypeError("setlistdefault is unsupported for ordered multi dicts")
-
-    def update(self, mapping):
-        for key, value in iter_multi_items(mapping):
-            OrderedMultiDict.add(self, key, value)
-
-    def poplist(self, key):
-        buckets = dict.pop(self, key, ())
-        for bucket in buckets:
-            bucket.unlink(self)
-        return [x.value for x in buckets]
-
-    def pop(self, key, default=_missing):
-        try:
-            buckets = dict.pop(self, key)
-        except KeyError:
-            if default is not _missing:
-                return default
-            raise exceptions.BadRequestKeyError(key)
-        for bucket in buckets:
-            bucket.unlink(self)
-        return buckets[0].value
-
-    def popitem(self):
-        try:
-            key, buckets = dict.popitem(self)
-        except KeyError as e:
-            raise exceptions.BadRequestKeyError(e.args[0])
-        for bucket in buckets:
-            bucket.unlink(self)
-        return key, buckets[0].value
-
-    def popitemlist(self):
-        try:
-            key, buckets = dict.popitem(self)
-        except KeyError as e:
-            raise exceptions.BadRequestKeyError(e.args[0])
-        for bucket in buckets:
-            bucket.unlink(self)
-        return key, [x.value for x in buckets]
-
-
-def _options_header_vkw(value, kw):
-    return dump_options_header(
-        value, dict((k.replace("_", "-"), v) for k, v in kw.items())
-    )
-
-
-def _unicodify_header_value(value):
-    if isinstance(value, bytes):
-        value = value.decode("latin-1")
-    if not isinstance(value, text_type):
-        value = text_type(value)
-    return value
-
-
-@native_itermethods(["keys", "values", "items"])
-class Headers(object):
-    """An object that stores some headers.  It has a dict-like interface
-    but is ordered and can store the same keys multiple times.
-
-    This data structure is useful if you want a nicer way to handle WSGI
-    headers which are stored as tuples in a list.
-
-    From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
-    also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
-    and will render a page for a ``400 BAD REQUEST`` if caught in a
-    catch-all for HTTP exceptions.
-
-    Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
-    class, with the exception of `__getitem__`.  :mod:`wsgiref` will return
-    `None` for ``headers['missing']``, whereas :class:`Headers` will raise
-    a :class:`KeyError`.
-
-    To create a new :class:`Headers` object pass it a list or dict of headers
-    which are used as default values.  This does not reuse the list passed
-    to the constructor for internal usage.
-
-    :param defaults: The list of default values for the :class:`Headers`.
-
-    .. versionchanged:: 0.9
-       This data structure now stores unicode values similar to how the
-       multi dicts do it.  The main difference is that bytes can be set as
-       well which will automatically be latin1 decoded.
-
-    .. versionchanged:: 0.9
-       The :meth:`linked` function was removed without replacement as it
-       was an API that does not support the changes to the encoding model.
-    """
-
-    def __init__(self, defaults=None):
-        self._list = []
-        if defaults is not None:
-            if isinstance(defaults, (list, Headers)):
-                self._list.extend(defaults)
-            else:
-                self.extend(defaults)
-
-    def __getitem__(self, key, _get_mode=False):
-        if not _get_mode:
-            if isinstance(key, integer_types):
-                return self._list[key]
-            elif isinstance(key, slice):
-                return self.__class__(self._list[key])
-        if not isinstance(key, string_types):
-            raise exceptions.BadRequestKeyError(key)
-        ikey = key.lower()
-        for k, v in self._list:
-            if k.lower() == ikey:
-                return v
-        # micro optimization: if we are in get mode we will catch that
-        # exception one stack level down so we can raise a standard
-        # key error instead of our special one.
-        if _get_mode:
-            raise KeyError()
-        raise exceptions.BadRequestKeyError(key)
-
-    def __eq__(self, other):
-        return other.__class__ is self.__class__ and set(other._list) == set(self._list)
-
-    __hash__ = None
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def get(self, key, default=None, type=None, as_bytes=False):
-        """Return the default value if the requested data doesn't exist.
-        If `type` is provided and is a callable it should convert the value,
-        return it or raise a :exc:`ValueError` if that is not possible.  In
-        this case the function will return the default as if the value was not
-        found:
-
-        >>> d = Headers([('Content-Length', '42')])
-        >>> d.get('Content-Length', type=int)
-        42
-
-        If a headers object is bound you must not add unicode strings
-        because no encoding takes place.
-
-        .. versionadded:: 0.9
-           Added support for `as_bytes`.
-
-        :param key: The key to be looked up.
-        :param default: The default value to be returned if the key can't
-                        be looked up.  If not further specified `None` is
-                        returned.
-        :param type: A callable that is used to cast the value in the
-                     :class:`Headers`.  If a :exc:`ValueError` is raised
-                     by this callable the default value is returned.
-        :param as_bytes: return bytes instead of unicode strings.
-        """
-        try:
-            rv = self.__getitem__(key, _get_mode=True)
-        except KeyError:
-            return default
-        if as_bytes:
-            rv = rv.encode("latin1")
-        if type is None:
-            return rv
-        try:
-            return type(rv)
-        except ValueError:
-            return default
-
-    def getlist(self, key, type=None, as_bytes=False):
-        """Return the list of items for a given key. If that key is not in the
-        :class:`Headers`, the return value will be an empty list.  Just as
-        :meth:`get` :meth:`getlist` accepts a `type` parameter.  All items will
-        be converted with the callable defined there.
-
-        .. versionadded:: 0.9
-           Added support for `as_bytes`.
-
-        :param key: The key to be looked up.
-        :param type: A callable that is used to cast the value in the
-                     :class:`Headers`.  If a :exc:`ValueError` is raised
-                     by this callable the value will be removed from the list.
-        :return: a :class:`list` of all the values for the key.
-        :param as_bytes: return bytes instead of unicode strings.
-        """
-        ikey = key.lower()
-        result = []
-        for k, v in self:
-            if k.lower() == ikey:
-                if as_bytes:
-                    v = v.encode("latin1")
-                if type is not None:
-                    try:
-                        v = type(v)
-                    except ValueError:
-                        continue
-                result.append(v)
-        return result
-
-    def get_all(self, name):
-        """Return a list of all the values for the named field.
-
-        This method is compatible with the :mod:`wsgiref`
-        :meth:`~wsgiref.headers.Headers.get_all` method.
-        """
-        return self.getlist(name)
-
-    def items(self, lower=False):
-        for key, value in self:
-            if lower:
-                key = key.lower()
-            yield key, value
-
-    def keys(self, lower=False):
-        for key, _ in iteritems(self, lower):
-            yield key
-
-    def values(self):
-        for _, value in iteritems(self):
-            yield value
-
-    def extend(self, iterable):
-        """Extend the headers with a dict or an iterable yielding keys and
-        values.
-        """
-        if isinstance(iterable, dict):
-            for key, value in iteritems(iterable):
-                if isinstance(value, (tuple, list)):
-                    for v in value:
-                        self.add(key, v)
-                else:
-                    self.add(key, value)
-        else:
-            for key, value in iterable:
-                self.add(key, value)
-
-    def __delitem__(self, key, _index_operation=True):
-        if _index_operation and isinstance(key, (integer_types, slice)):
-            del self._list[key]
-            return
-        key = key.lower()
-        new = []
-        for k, v in self._list:
-            if k.lower() != key:
-                new.append((k, v))
-        self._list[:] = new
-
-    def remove(self, key):
-        """Remove a key.
-
-        :param key: The key to be removed.
-        """
-        return self.__delitem__(key, _index_operation=False)
-
-    def pop(self, key=None, default=_missing):
-        """Removes and returns a key or index.
-
-        :param key: The key to be popped.  If this is an integer the item at
-                    that position is removed, if it's a string the value for
-                    that key is.  If the key is omitted or `None` the last
-                    item is removed.
-        :return: an item.
-        """
-        if key is None:
-            return self._list.pop()
-        if isinstance(key, integer_types):
-            return self._list.pop(key)
-        try:
-            rv = self[key]
-            self.remove(key)
-        except KeyError:
-            if default is not _missing:
-                return default
-            raise
-        return rv
-
-    def popitem(self):
-        """Removes a key or index and returns a (key, value) item."""
-        return self.pop()
-
-    def __contains__(self, key):
-        """Check if a key is present."""
-        try:
-            self.__getitem__(key, _get_mode=True)
-        except KeyError:
-            return False
-        return True
-
-    has_key = __contains__
-
-    def __iter__(self):
-        """Yield ``(key, value)`` tuples."""
-        return iter(self._list)
-
-    def __len__(self):
-        return len(self._list)
-
-    def add(self, _key, _value, **kw):
-        """Add a new header tuple to the list.
-
-        Keyword arguments can specify additional parameters for the header
-        value, with underscores converted to dashes::
-
-        >>> d = Headers()
-        >>> d.add('Content-Type', 'text/plain')
-        >>> d.add('Content-Disposition', 'attachment', filename='foo.png')
-
-        The keyword argument dumping uses :func:`dump_options_header`
-        behind the scenes.
-
-        .. versionadded:: 0.4.1
-            keyword arguments were added for :mod:`wsgiref` compatibility.
-        """
-        if kw:
-            _value = _options_header_vkw(_value, kw)
-        _key = _unicodify_header_value(_key)
-        _value = _unicodify_header_value(_value)
-        self._validate_value(_value)
-        self._list.append((_key, _value))
-
-    def _validate_value(self, value):
-        if not isinstance(value, text_type):
-            raise TypeError("Value should be unicode.")
-        if u"\n" in value or u"\r" in value:
-            raise ValueError(
-                "Detected newline in header value.  This is "
-                "a potential security problem"
-            )
-
-    def add_header(self, _key, _value, **_kw):
-        """Add a new header tuple to the list.
-
-        An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
-        :meth:`~wsgiref.headers.Headers.add_header` method.
-        """
-        self.add(_key, _value, **_kw)
-
-    def clear(self):
-        """Clears all headers."""
-        del self._list[:]
-
-    def set(self, _key, _value, **kw):
-        """Remove all header tuples for `key` and add a new one.  The newly
-        added key either appears at the end of the list if there was no
-        entry or replaces the first one.
-
-        Keyword arguments can specify additional parameters for the header
-        value, with underscores converted to dashes.  See :meth:`add` for
-        more information.
-
-        .. versionchanged:: 0.6.1
-           :meth:`set` now accepts the same arguments as :meth:`add`.
-
-        :param key: The key to be inserted.
-        :param value: The value to be inserted.
-        """
-        if kw:
-            _value = _options_header_vkw(_value, kw)
-        _key = _unicodify_header_value(_key)
-        _value = _unicodify_header_value(_value)
-        self._validate_value(_value)
-        if not self._list:
-            self._list.append((_key, _value))
-            return
-        listiter = iter(self._list)
-        ikey = _key.lower()
-        for idx, (old_key, _old_value) in enumerate(listiter):
-            if old_key.lower() == ikey:
-                # replace first ocurrence
-                self._list[idx] = (_key, _value)
-                break
-        else:
-            self._list.append((_key, _value))
-            return
-        self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
-
-    def setdefault(self, key, default):
-        """Returns the value for the key if it is in the dict, otherwise it
-        returns `default` and sets that value for `key`.
-
-        :param key: The key to be looked up.
-        :param default: The default value to be returned if the key is not
-                        in the dict.  If not further specified it's `None`.
-        """
-        if key in self:
-            return self[key]
-        self.set(key, default)
-        return default
-
-    def __setitem__(self, key, value):
-        """Like :meth:`set` but also supports index/slice based setting."""
-        if isinstance(key, (slice, integer_types)):
-            if isinstance(key, integer_types):
-                value = [value]
-            value = [
-                (_unicodify_header_value(k), _unicodify_header_value(v))
-                for (k, v) in value
-            ]
-            [self._validate_value(v) for (k, v) in value]
-            if isinstance(key, integer_types):
-                self._list[key] = value[0]
-            else:
-                self._list[key] = value
-        else:
-            self.set(key, value)
-
-    def to_wsgi_list(self):
-        """Convert the headers into a list suitable for WSGI.
-
-        The values are byte strings in Python 2 converted to latin1 and unicode
-        strings in Python 3 for the WSGI server to encode.
-
-        :return: list
-        """
-        if PY2:
-            return [(to_native(k), v.encode("latin1")) for k, v in self]
-        return list(self)
-
-    def copy(self):
-        return self.__class__(self._list)
-
-    def __copy__(self):
-        return self.copy()
-
-    def __str__(self):
-        """Returns formatted headers suitable for HTTP transmission."""
-        strs = []
-        for key, value in self.to_wsgi_list():
-            strs.append("%s: %s" % (key, value))
-        strs.append("\r\n")
-        return "\r\n".join(strs)
-
-    def __repr__(self):
-        return "%s(%r)" % (self.__class__.__name__, list(self))
-
-
-class ImmutableHeadersMixin(object):
-    """Makes a :class:`Headers` immutable.  We do not mark them as
-    hashable though since the only usecase for this datastructure
-    in Werkzeug is a view on a mutable structure.
-
-    .. versionadded:: 0.5
-
-    :private:
-    """
-
-    def __delitem__(self, key, **kwargs):
-        is_immutable(self)
-
-    def __setitem__(self, key, value):
-        is_immutable(self)
-
-    set = __setitem__
-
-    def add(self, item):
-        is_immutable(self)
-
-    remove = add_header = add
-
-    def extend(self, iterable):
-        is_immutable(self)
-
-    def insert(self, pos, value):
-        is_immutable(self)
-
-    def pop(self, index=-1):
-        is_immutable(self)
-
-    def popitem(self):
-        is_immutable(self)
-
-    def setdefault(self, key, default):
-        is_immutable(self)
-
-
-class EnvironHeaders(ImmutableHeadersMixin, Headers):
-    """Read only version of the headers from a WSGI environment.  This
-    provides the same interface as `Headers` and is constructed from
-    a WSGI environment.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
-    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
-    HTTP exceptions.
-    """
-
-    def __init__(self, environ):
-        self.environ = environ
-
-    def __eq__(self, other):
-        return self.environ is other.environ
-
-    __hash__ = None
-
-    def __getitem__(self, key, _get_mode=False):
-        # _get_mode is a no-op for this class as there is no index but
-        # used because get() calls it.
-        if not isinstance(key, string_types):
-            raise KeyError(key)
-        key = key.upper().replace("-", "_")
-        if key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
-            return _unicodify_header_value(self.environ[key])
-        return _unicodify_header_value(self.environ["HTTP_" + key])
-
-    def __len__(self):
-        # the iter is necessary because otherwise list calls our
-        # len which would call list again and so forth.
-        return len(list(iter(self)))
-
-    def __iter__(self):
-        for key, value in iteritems(self.environ):
-            if key.startswith("HTTP_") and key not in (
-                "HTTP_CONTENT_TYPE",
-                "HTTP_CONTENT_LENGTH",
-            ):
-                yield (
-                    key[5:].replace("_", "-").title(),
-                    _unicodify_header_value(value),
-                )
-            elif key in ("CONTENT_TYPE", "CONTENT_LENGTH") and value:
-                yield (key.replace("_", "-").title(), _unicodify_header_value(value))
-
-    def copy(self):
-        raise TypeError("cannot create %r copies" % self.__class__.__name__)
-
-
-@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
-class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
-    """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
-    instances as sequence and it will combine the return values of all wrapped
-    dicts:
-
-    >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
-    >>> post = MultiDict([('foo', 'bar')])
-    >>> get = MultiDict([('blub', 'blah')])
-    >>> combined = CombinedMultiDict([get, post])
-    >>> combined['foo']
-    'bar'
-    >>> combined['blub']
-    'blah'
-
-    This works for all read operations and will raise a `TypeError` for
-    methods that usually change data which isn't possible.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
-    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
-    exceptions.
-    """
-
-    def __reduce_ex__(self, protocol):
-        return type(self), (self.dicts,)
-
-    def __init__(self, dicts=None):
-        self.dicts = dicts or []
-
-    @classmethod
-    def fromkeys(cls):
-        raise TypeError("cannot create %r instances by fromkeys" % cls.__name__)
-
-    def __getitem__(self, key):
-        for d in self.dicts:
-            if key in d:
-                return d[key]
-        raise exceptions.BadRequestKeyError(key)
-
-    def get(self, key, default=None, type=None):
-        for d in self.dicts:
-            if key in d:
-                if type is not None:
-                    try:
-                        return type(d[key])
-                    except ValueError:
-                        continue
-                return d[key]
-        return default
-
-    def getlist(self, key, type=None):
-        rv = []
-        for d in self.dicts:
-            rv.extend(d.getlist(key, type))
-        return rv
-
-    def _keys_impl(self):
-        """This function exists so __len__ can be implemented more efficiently,
-        saving one list creation from an iterator.
-
-        Using this for Python 2's ``dict.keys`` behavior would be useless since
-        `dict.keys` in Python 2 returns a list, while we have a set here.
-        """
-        rv = set()
-        for d in self.dicts:
-            rv.update(iterkeys(d))
-        return rv
-
-    def keys(self):
-        return iter(self._keys_impl())
-
-    __iter__ = keys
-
-    def items(self, multi=False):
-        found = set()
-        for d in self.dicts:
-            for key, value in iteritems(d, multi):
-                if multi:
-                    yield key, value
-                elif key not in found:
-                    found.add(key)
-                    yield key, value
-
-    def values(self):
-        for _key, value in iteritems(self):
-            yield value
-
-    def lists(self):
-        rv = {}
-        for d in self.dicts:
-            for key, values in iterlists(d):
-                rv.setdefault(key, []).extend(values)
-        return iteritems(rv)
-
-    def listvalues(self):
-        return (x[1] for x in self.lists())
-
-    def copy(self):
-        """Return a shallow mutable copy of this object.
-
-        This returns a :class:`MultiDict` representing the data at the
-        time of copying. The copy will no longer reflect changes to the
-        wrapped dicts.
-
-        .. versionchanged:: 0.15
-            Return a mutable :class:`MultiDict`.
-        """
-        return MultiDict(self)
-
-    def to_dict(self, flat=True):
-        """Return the contents as regular dict.  If `flat` is `True` the
-        returned dict will only have the first item present, if `flat` is
-        `False` all values will be returned as lists.
-
-        :param flat: If set to `False` the dict returned will have lists
-                     with all the values in it.  Otherwise it will only
-                     contain the first item for each key.
-        :return: a :class:`dict`
-        """
-        rv = {}
-        for d in reversed(self.dicts):
-            rv.update(d.to_dict(flat))
-        return rv
-
-    def __len__(self):
-        return len(self._keys_impl())
-
-    def __contains__(self, key):
-        for d in self.dicts:
-            if key in d:
-                return True
-        return False
-
-    has_key = __contains__
-
-    def __repr__(self):
-        return "%s(%r)" % (self.__class__.__name__, self.dicts)
-
-
-class FileMultiDict(MultiDict):
-    """A special :class:`MultiDict` that has convenience methods to add
-    files to it.  This is used for :class:`EnvironBuilder` and generally
-    useful for unittesting.
-
-    .. versionadded:: 0.5
-    """
-
-    def add_file(self, name, file, filename=None, content_type=None):
-        """Adds a new file to the dict.  `file` can be a file name or
-        a :class:`file`-like or a :class:`FileStorage` object.
-
-        :param name: the name of the field.
-        :param file: a filename or :class:`file`-like object
-        :param filename: an optional filename
-        :param content_type: an optional content type
-        """
-        if isinstance(file, FileStorage):
-            value = file
-        else:
-            if isinstance(file, string_types):
-                if filename is None:
-                    filename = file
-                file = open(file, "rb")
-            if filename and content_type is None:
-                content_type = (
-                    mimetypes.guess_type(filename)[0] or "application/octet-stream"
-                )
-            value = FileStorage(file, filename, name, content_type)
-
-        self.add(name, value)
-
-
-class ImmutableDict(ImmutableDictMixin, dict):
-    """An immutable :class:`dict`.
-
-    .. versionadded:: 0.5
-    """
-
-    def __repr__(self):
-        return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
-
-    def copy(self):
-        """Return a shallow mutable copy of this object.  Keep in mind that
-        the standard library's :func:`copy` function is a no-op for this class
-        like for any other python immutable type (eg: :class:`tuple`).
-        """
-        return dict(self)
-
-    def __copy__(self):
-        return self
-
-
-class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
-    """An immutable :class:`MultiDict`.
-
-    .. versionadded:: 0.5
-    """
-
-    def copy(self):
-        """Return a shallow mutable copy of this object.  Keep in mind that
-        the standard library's :func:`copy` function is a no-op for this class
-        like for any other python immutable type (eg: :class:`tuple`).
-        """
-        return MultiDict(self)
-
-    def __copy__(self):
-        return self
-
-
-class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
-    """An immutable :class:`OrderedMultiDict`.
-
-    .. versionadded:: 0.6
-    """
-
-    def _iter_hashitems(self):
-        return enumerate(iteritems(self, multi=True))
-
-    def copy(self):
-        """Return a shallow mutable copy of this object.  Keep in mind that
-        the standard library's :func:`copy` function is a no-op for this class
-        like for any other python immutable type (eg: :class:`tuple`).
-        """
-        return OrderedMultiDict(self)
-
-    def __copy__(self):
-        return self
-
-
-@native_itermethods(["values"])
-class Accept(ImmutableList):
-    """An :class:`Accept` object is just a list subclass for lists of
-    ``(value, quality)`` tuples.  It is automatically sorted by specificity
-    and quality.
-
-    All :class:`Accept` objects work similar to a list but provide extra
-    functionality for working with the data.  Containment checks are
-    normalized to the rules of that header:
-
-    >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
-    >>> a.best
-    'ISO-8859-1'
-    >>> 'iso-8859-1' in a
-    True
-    >>> 'UTF8' in a
-    True
-    >>> 'utf7' in a
-    False
-
-    To get the quality for an item you can use normal item lookup:
-
-    >>> print a['utf-8']
-    0.7
-    >>> a['utf7']
-    0
-
-    .. versionchanged:: 0.5
-       :class:`Accept` objects are forced immutable now.
-    """
-
-    def __init__(self, values=()):
-        if values is None:
-            list.__init__(self)
-            self.provided = False
-        elif isinstance(values, Accept):
-            self.provided = values.provided
-            list.__init__(self, values)
-        else:
-            self.provided = True
-            values = sorted(
-                values,
-                key=lambda x: (self._specificity(x[0]), x[1], x[0]),
-                reverse=True,
-            )
-            list.__init__(self, values)
-
-    def _specificity(self, value):
-        """Returns a tuple describing the value's specificity."""
-        return (value != "*",)
-
-    def _value_matches(self, value, item):
-        """Check if a value matches a given accept item."""
-        return item == "*" or item.lower() == value.lower()
-
-    def __getitem__(self, key):
-        """Besides index lookup (getting item n) you can also pass it a string
-        to get the quality for the item.  If the item is not in the list, the
-        returned quality is ``0``.
-        """
-        if isinstance(key, string_types):
-            return self.quality(key)
-        return list.__getitem__(self, key)
-
-    def quality(self, key):
-        """Returns the quality of the key.
-
-        .. versionadded:: 0.6
-           In previous versions you had to use the item-lookup syntax
-           (eg: ``obj[key]`` instead of ``obj.quality(key)``)
-        """
-        for item, quality in self:
-            if self._value_matches(key, item):
-                return quality
-        return 0
-
-    def __contains__(self, value):
-        for item, _quality in self:
-            if self._value_matches(value, item):
-                return True
-        return False
-
-    def __repr__(self):
-        return "%s([%s])" % (
-            self.__class__.__name__,
-            ", ".join("(%r, %s)" % (x, y) for x, y in self),
-        )
-
-    def index(self, key):
-        """Get the position of an entry or raise :exc:`ValueError`.
-
-        :param key: The key to be looked up.
-
-        .. versionchanged:: 0.5
-           This used to raise :exc:`IndexError`, which was inconsistent
-           with the list API.
-        """
-        if isinstance(key, string_types):
-            for idx, (item, _quality) in enumerate(self):
-                if self._value_matches(key, item):
-                    return idx
-            raise ValueError(key)
-        return list.index(self, key)
-
-    def find(self, key):
-        """Get the position of an entry or return -1.
-
-        :param key: The key to be looked up.
-        """
-        try:
-            return self.index(key)
-        except ValueError:
-            return -1
-
-    def values(self):
-        """Iterate over all values."""
-        for item in self:
-            yield item[0]
-
-    def to_header(self):
-        """Convert the header set into an HTTP header string."""
-        result = []
-        for value, quality in self:
-            if quality != 1:
-                value = "%s;q=%s" % (value, quality)
-            result.append(value)
-        return ",".join(result)
-
-    def __str__(self):
-        return self.to_header()
-
-    def _best_single_match(self, match):
-        for client_item, quality in self:
-            if self._value_matches(match, client_item):
-                # self is sorted by specificity descending, we can exit
-                return client_item, quality
-
-    def best_match(self, matches, default=None):
-        """Returns the best match from a list of possible matches based
-        on the specificity and quality of the client. If two items have the
-        same quality and specificity, the one is returned that comes first.
-
-        :param matches: a list of matches to check for
-        :param default: the value that is returned if none match
-        """
-        result = default
-        best_quality = -1
-        best_specificity = (-1,)
-        for server_item in matches:
-            match = self._best_single_match(server_item)
-            if not match:
-                continue
-            client_item, quality = match
-            specificity = self._specificity(client_item)
-            if quality <= 0 or quality < best_quality:
-                continue
-            # better quality or same quality but more specific => better match
-            if quality > best_quality or specificity > best_specificity:
-                result = server_item
-                best_quality = quality
-                best_specificity = specificity
-        return result
-
-    @property
-    def best(self):
-        """The best match as value."""
-        if self:
-            return self[0][0]
-
-
-class MIMEAccept(Accept):
-    """Like :class:`Accept` but with special methods and behavior for
-    mimetypes.
-    """
-
-    def _specificity(self, value):
-        return tuple(x != "*" for x in value.split("/", 1))
-
-    def _value_matches(self, value, item):
-        def _normalize(x):
-            x = x.lower()
-            return ("*", "*") if x == "*" else x.split("/", 1)
-
-        # this is from the application which is trusted.  to avoid developer
-        # frustration we actually check these for valid values
-        if "/" not in value:
-            raise ValueError("invalid mimetype %r" % value)
-        value_type, value_subtype = _normalize(value)
-        if value_type == "*" and value_subtype != "*":
-            raise ValueError("invalid mimetype %r" % value)
-
-        if "/" not in item:
-            return False
-        item_type, item_subtype = _normalize(item)
-        if item_type == "*" and item_subtype != "*":
-            return False
-        return (
-            item_type == item_subtype == "*" or value_type == value_subtype == "*"
-        ) or (
-            item_type == value_type
-            and (
-                item_subtype == "*"
-                or value_subtype == "*"
-                or item_subtype == value_subtype
-            )
-        )
-
-    @property
-    def accept_html(self):
-        """True if this object accepts HTML."""
-        return (
-            "text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
-        )
-
-    @property
-    def accept_xhtml(self):
-        """True if this object accepts XHTML."""
-        return "application/xhtml+xml" in self or "application/xml" in self
-
-    @property
-    def accept_json(self):
-        """True if this object accepts JSON."""
-        return "application/json" in self
-
-
-class LanguageAccept(Accept):
-    """Like :class:`Accept` but with normalization for languages."""
-
-    def _value_matches(self, value, item):
-        def _normalize(language):
-            return _locale_delim_re.split(language.lower())
-
-        return item == "*" or _normalize(value) == _normalize(item)
-
-
-class CharsetAccept(Accept):
-    """Like :class:`Accept` but with normalization for charsets."""
-
-    def _value_matches(self, value, item):
-        def _normalize(name):
-            try:
-                return codecs.lookup(name).name
-            except LookupError:
-                return name.lower()
-
-        return item == "*" or _normalize(value) == _normalize(item)
-
-
-def cache_property(key, empty, type):
-    """Return a new property object for a cache header.  Useful if you
-    want to add support for a cache extension in a subclass."""
-    return property(
-        lambda x: x._get_cache_value(key, empty, type),
-        lambda x, v: x._set_cache_value(key, v, type),
-        lambda x: x._del_cache_value(key),
-        "accessor for %r" % key,
-    )
-
-
-class _CacheControl(UpdateDictMixin, dict):
-    """Subclass of a dict that stores values for a Cache-Control header.  It
-    has accessors for all the cache-control directives specified in RFC 2616.
-    The class does not differentiate between request and response directives.
-
-    Because the cache-control directives in the HTTP header use dashes the
-    python descriptors use underscores for that.
-
-    To get a header of the :class:`CacheControl` object again you can convert
-    the object into a string or call the :meth:`to_header` method.  If you plan
-    to subclass it and add your own items have a look at the sourcecode for
-    that class.
-
-    .. versionchanged:: 0.4
-
-       Setting `no_cache` or `private` to boolean `True` will set the implicit
-       none-value which is ``*``:
-
-       >>> cc = ResponseCacheControl()
-       >>> cc.no_cache = True
-       >>> cc
-       <ResponseCacheControl 'no-cache'>
-       >>> cc.no_cache
-       '*'
-       >>> cc.no_cache = None
-       >>> cc
-       <ResponseCacheControl ''>
-
-       In versions before 0.5 the behavior documented here affected the now
-       no longer existing `CacheControl` class.
-    """
-
-    no_cache = cache_property("no-cache", "*", None)
-    no_store = cache_property("no-store", None, bool)
-    max_age = cache_property("max-age", -1, int)
-    no_transform = cache_property("no-transform", None, None)
-
-    def __init__(self, values=(), on_update=None):
-        dict.__init__(self, values or ())
-        self.on_update = on_update
-        self.provided = values is not None
-
-    def _get_cache_value(self, key, empty, type):
-        """Used internally by the accessor properties."""
-        if type is bool:
-            return key in self
-        if key in self:
-            value = self[key]
-            if value is None:
-                return empty
-            elif type is not None:
-                try:
-                    value = type(value)
-                except ValueError:
-                    pass
-            return value
-
-    def _set_cache_value(self, key, value, type):
-        """Used internally by the accessor properties."""
-        if type is bool:
-            if value:
-                self[key] = None
-            else:
-                self.pop(key, None)
-        else:
-            if value is None:
-                self.pop(key)
-            elif value is True:
-                self[key] = None
-            else:
-                self[key] = value
-
-    def _del_cache_value(self, key):
-        """Used internally by the accessor properties."""
-        if key in self:
-            del self[key]
-
-    def to_header(self):
-        """Convert the stored values into a cache control header."""
-        return dump_header(self)
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return "<%s %s>" % (
-            self.__class__.__name__,
-            " ".join("%s=%r" % (k, v) for k, v in sorted(self.items())),
-        )
-
-
-class RequestCacheControl(ImmutableDictMixin, _CacheControl):
-    """A cache control for requests.  This is immutable and gives access
-    to all the request-relevant cache control headers.
-
-    To get a header of the :class:`RequestCacheControl` object again you can
-    convert the object into a string or call the :meth:`to_header` method.  If
-    you plan to subclass it and add your own items have a look at the sourcecode
-    for that class.
-
-    .. versionadded:: 0.5
-       In previous versions a `CacheControl` class existed that was used
-       both for request and response.
-    """
-
-    max_stale = cache_property("max-stale", "*", int)
-    min_fresh = cache_property("min-fresh", "*", int)
-    no_transform = cache_property("no-transform", None, None)
-    only_if_cached = cache_property("only-if-cached", None, bool)
-
-
-class ResponseCacheControl(_CacheControl):
-    """A cache control for responses.  Unlike :class:`RequestCacheControl`
-    this is mutable and gives access to response-relevant cache control
-    headers.
-
-    To get a header of the :class:`ResponseCacheControl` object again you can
-    convert the object into a string or call the :meth:`to_header` method.  If
-    you plan to subclass it and add your own items have a look at the sourcecode
-    for that class.
-
-    .. versionadded:: 0.5
-       In previous versions a `CacheControl` class existed that was used
-       both for request and response.
-    """
-
-    public = cache_property("public", None, bool)
-    private = cache_property("private", "*", None)
-    must_revalidate = cache_property("must-revalidate", None, bool)
-    proxy_revalidate = cache_property("proxy-revalidate", None, bool)
-    s_maxage = cache_property("s-maxage", None, None)
-
-
-# attach cache_property to the _CacheControl as staticmethod
-# so that others can reuse it.
-_CacheControl.cache_property = staticmethod(cache_property)
-
-
-class CallbackDict(UpdateDictMixin, dict):
-    """A dict that calls a function passed every time something is changed.
-    The function is passed the dict instance.
-    """
-
-    def __init__(self, initial=None, on_update=None):
-        dict.__init__(self, initial or ())
-        self.on_update = on_update
-
-    def __repr__(self):
-        return "<%s %s>" % (self.__class__.__name__, dict.__repr__(self))
-
-
-class HeaderSet(collections_abc.MutableSet):
-    """Similar to the :class:`ETags` class this implements a set-like structure.
-    Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
-    content-language headers.
-
-    If not constructed using the :func:`parse_set_header` function the
-    instantiation works like this:
-
-    >>> hs = HeaderSet(['foo', 'bar', 'baz'])
-    >>> hs
-    HeaderSet(['foo', 'bar', 'baz'])
-    """
-
-    def __init__(self, headers=None, on_update=None):
-        self._headers = list(headers or ())
-        self._set = set([x.lower() for x in self._headers])
-        self.on_update = on_update
-
-    def add(self, header):
-        """Add a new header to the set."""
-        self.update((header,))
-
-    def remove(self, header):
-        """Remove a header from the set.  This raises an :exc:`KeyError` if the
-        header is not in the set.
-
-        .. versionchanged:: 0.5
-            In older versions a :exc:`IndexError` was raised instead of a
-            :exc:`KeyError` if the object was missing.
-
-        :param header: the header to be removed.
-        """
-        key = header.lower()
-        if key not in self._set:
-            raise KeyError(header)
-        self._set.remove(key)
-        for idx, key in enumerate(self._headers):
-            if key.lower() == header:
-                del self._headers[idx]
-                break
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def update(self, iterable):
-        """Add all the headers from the iterable to the set.
-
-        :param iterable: updates the set with the items from the iterable.
-        """
-        inserted_any = False
-        for header in iterable:
-            key = header.lower()
-            if key not in self._set:
-                self._headers.append(header)
-                self._set.add(key)
-                inserted_any = True
-        if inserted_any and self.on_update is not None:
-            self.on_update(self)
-
-    def discard(self, header):
-        """Like :meth:`remove` but ignores errors.
-
-        :param header: the header to be discarded.
-        """
-        try:
-            return self.remove(header)
-        except KeyError:
-            pass
-
-    def find(self, header):
-        """Return the index of the header in the set or return -1 if not found.
-
-        :param header: the header to be looked up.
-        """
-        header = header.lower()
-        for idx, item in enumerate(self._headers):
-            if item.lower() == header:
-                return idx
-        return -1
-
-    def index(self, header):
-        """Return the index of the header in the set or raise an
-        :exc:`IndexError`.
-
-        :param header: the header to be looked up.
-        """
-        rv = self.find(header)
-        if rv < 0:
-            raise IndexError(header)
-        return rv
-
-    def clear(self):
-        """Clear the set."""
-        self._set.clear()
-        del self._headers[:]
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def as_set(self, preserve_casing=False):
-        """Return the set as real python set type.  When calling this, all
-        the items are converted to lowercase and the ordering is lost.
-
-        :param preserve_casing: if set to `True` the items in the set returned
-                                will have the original case like in the
-                                :class:`HeaderSet`, otherwise they will
-                                be lowercase.
-        """
-        if preserve_casing:
-            return set(self._headers)
-        return set(self._set)
-
-    def to_header(self):
-        """Convert the header set into an HTTP header string."""
-        return ", ".join(map(quote_header_value, self._headers))
-
-    def __getitem__(self, idx):
-        return self._headers[idx]
-
-    def __delitem__(self, idx):
-        rv = self._headers.pop(idx)
-        self._set.remove(rv.lower())
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def __setitem__(self, idx, value):
-        old = self._headers[idx]
-        self._set.remove(old.lower())
-        self._headers[idx] = value
-        self._set.add(value.lower())
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def __contains__(self, header):
-        return header.lower() in self._set
-
-    def __len__(self):
-        return len(self._set)
-
-    def __iter__(self):
-        return iter(self._headers)
-
-    def __nonzero__(self):
-        return bool(self._set)
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return "%s(%r)" % (self.__class__.__name__, self._headers)
-
-
-class ETags(collections_abc.Container, collections_abc.Iterable):
-    """A set that can be used to check if one etag is present in a collection
-    of etags.
-    """
-
-    def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
-        self._strong = frozenset(not star_tag and strong_etags or ())
-        self._weak = frozenset(weak_etags or ())
-        self.star_tag = star_tag
-
-    def as_set(self, include_weak=False):
-        """Convert the `ETags` object into a python set.  Per default all the
-        weak etags are not part of this set."""
-        rv = set(self._strong)
-        if include_weak:
-            rv.update(self._weak)
-        return rv
-
-    def is_weak(self, etag):
-        """Check if an etag is weak."""
-        return etag in self._weak
-
-    def is_strong(self, etag):
-        """Check if an etag is strong."""
-        return etag in self._strong
-
-    def contains_weak(self, etag):
-        """Check if an etag is part of the set including weak and strong tags."""
-        return self.is_weak(etag) or self.contains(etag)
-
-    def contains(self, etag):
-        """Check if an etag is part of the set ignoring weak tags.
-        It is also possible to use the ``in`` operator.
-        """
-        if self.star_tag:
-            return True
-        return self.is_strong(etag)
-
-    def contains_raw(self, etag):
-        """When passed a quoted tag it will check if this tag is part of the
-        set.  If the tag is weak it is checked against weak and strong tags,
-        otherwise strong only."""
-        etag, weak = unquote_etag(etag)
-        if weak:
-            return self.contains_weak(etag)
-        return self.contains(etag)
-
-    def to_header(self):
-        """Convert the etags set into a HTTP header string."""
-        if self.star_tag:
-            return "*"
-        return ", ".join(
-            ['"%s"' % x for x in self._strong] + ['W/"%s"' % x for x in self._weak]
-        )
-
-    def __call__(self, etag=None, data=None, include_weak=False):
-        if [etag, data].count(None) != 1:
-            raise TypeError("either tag or data required, but at least one")
-        if etag is None:
-            etag = generate_etag(data)
-        if include_weak:
-            if etag in self._weak:
-                return True
-        return etag in self._strong
-
-    def __bool__(self):
-        return bool(self.star_tag or self._strong or self._weak)
-
-    __nonzero__ = __bool__
-
-    def __str__(self):
-        return self.to_header()
-
-    def __iter__(self):
-        return iter(self._strong)
-
-    def __contains__(self, etag):
-        return self.contains(etag)
-
-    def __repr__(self):
-        return "<%s %r>" % (self.__class__.__name__, str(self))
-
-
-class IfRange(object):
-    """Very simple object that represents the `If-Range` header in parsed
-    form.  It will either have neither a etag or date or one of either but
-    never both.
-
-    .. versionadded:: 0.7
-    """
-
-    def __init__(self, etag=None, date=None):
-        #: The etag parsed and unquoted.  Ranges always operate on strong
-        #: etags so the weakness information is not necessary.
-        self.etag = etag
-        #: The date in parsed format or `None`.
-        self.date = date
-
-    def to_header(self):
-        """Converts the object back into an HTTP header."""
-        if self.date is not None:
-            return http_date(self.date)
-        if self.etag is not None:
-            return quote_etag(self.etag)
-        return ""
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return "<%s %r>" % (self.__class__.__name__, str(self))
-
-
-class Range(object):
-    """Represents a ``Range`` header. All methods only support only
-    bytes as the unit. Stores a list of ranges if given, but the methods
-    only work if only one range is provided.
-
-    :raise ValueError: If the ranges provided are invalid.
-
-    .. versionchanged:: 0.15
-        The ranges passed in are validated.
-
-    .. versionadded:: 0.7
-    """
-
-    def __init__(self, units, ranges):
-        #: The units of this range.  Usually "bytes".
-        self.units = units
-        #: A list of ``(begin, end)`` tuples for the range header provided.
-        #: The ranges are non-inclusive.
-        self.ranges = ranges
-
-        for start, end in ranges:
-            if start is None or (end is not None and (start < 0 or start >= end)):
-                raise ValueError("{} is not a valid range.".format((start, end)))
-
-    def range_for_length(self, length):
-        """If the range is for bytes, the length is not None and there is
-        exactly one range and it is satisfiable it returns a ``(start, stop)``
-        tuple, otherwise `None`.
-        """
-        if self.units != "bytes" or length is None or len(self.ranges) != 1:
-            return None
-        start, end = self.ranges[0]
-        if end is None:
-            end = length
-            if start < 0:
-                start += length
-        if is_byte_range_valid(start, end, length):
-            return start, min(end, length)
-
-    def make_content_range(self, length):
-        """Creates a :class:`~werkzeug.datastructures.ContentRange` object
-        from the current range and given content length.
-        """
-        rng = self.range_for_length(length)
-        if rng is not None:
-            return ContentRange(self.units, rng[0], rng[1], length)
-
-    def to_header(self):
-        """Converts the object back into an HTTP header."""
-        ranges = []
-        for begin, end in self.ranges:
-            if end is None:
-                ranges.append("%s-" % begin if begin >= 0 else str(begin))
-            else:
-                ranges.append("%s-%s" % (begin, end - 1))
-        return "%s=%s" % (self.units, ",".join(ranges))
-
-    def to_content_range_header(self, length):
-        """Converts the object into `Content-Range` HTTP header,
-        based on given length
-        """
-        range_for_length = self.range_for_length(length)
-        if range_for_length is not None:
-            return "%s %d-%d/%d" % (
-                self.units,
-                range_for_length[0],
-                range_for_length[1] - 1,
-                length,
-            )
-        return None
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return "<%s %r>" % (self.__class__.__name__, str(self))
-
-
-class ContentRange(object):
-    """Represents the content range header.
-
-    .. versionadded:: 0.7
-    """
-
-    def __init__(self, units, start, stop, length=None, on_update=None):
-        assert is_byte_range_valid(start, stop, length), "Bad range provided"
-        self.on_update = on_update
-        self.set(start, stop, length, units)
-
-    def _callback_property(name):  # noqa: B902
-        def fget(self):
-            return getattr(self, name)
-
-        def fset(self, value):
-            setattr(self, name, value)
-            if self.on_update is not None:
-                self.on_update(self)
-
-        return property(fget, fset)
-
-    #: The units to use, usually "bytes"
-    units = _callback_property("_units")
-    #: The start point of the range or `None`.
-    start = _callback_property("_start")
-    #: The stop point of the range (non-inclusive) or `None`.  Can only be
-    #: `None` if also start is `None`.
-    stop = _callback_property("_stop")
-    #: The length of the range or `None`.
-    length = _callback_property("_length")
-    del _callback_property
-
-    def set(self, start, stop, length=None, units="bytes"):
-        """Simple method to update the ranges."""
-        assert is_byte_range_valid(start, stop, length), "Bad range provided"
-        self._units = units
-        self._start = start
-        self._stop = stop
-        self._length = length
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def unset(self):
-        """Sets the units to `None` which indicates that the header should
-        no longer be used.
-        """
-        self.set(None, None, units=None)
-
-    def to_header(self):
-        if self.units is None:
-            return ""
-        if self.length is None:
-            length = "*"
-        else:
-            length = self.length
-        if self.start is None:
-            return "%s */%s" % (self.units, length)
-        return "%s %s-%s/%s" % (self.units, self.start, self.stop - 1, length)
-
-    def __nonzero__(self):
-        return self.units is not None
-
-    __bool__ = __nonzero__
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return "<%s %r>" % (self.__class__.__name__, str(self))
-
-
-class Authorization(ImmutableDictMixin, dict):
-    """Represents an `Authorization` header sent by the client.  You should
-    not create this kind of object yourself but use it when it's returned by
-    the `parse_authorization_header` function.
-
-    This object is a dict subclass and can be altered by setting dict items
-    but it should be considered immutable as it's returned by the client and
-    not meant for modifications.
-
-    .. versionchanged:: 0.5
-       This object became immutable.
-    """
-
-    def __init__(self, auth_type, data=None):
-        dict.__init__(self, data or {})
-        self.type = auth_type
-
-    username = property(
-        lambda self: self.get("username"),
-        doc="""
-        The username transmitted.  This is set for both basic and digest
-        auth all the time.""",
-    )
-    password = property(
-        lambda self: self.get("password"),
-        doc="""
-        When the authentication type is basic this is the password
-        transmitted by the client, else `None`.""",
-    )
-    realm = property(
-        lambda self: self.get("realm"),
-        doc="""
-        This is the server realm sent back for HTTP digest auth.""",
-    )
-    nonce = property(
-        lambda self: self.get("nonce"),
-        doc="""
-        The nonce the server sent for digest auth, sent back by the client.
-        A nonce should be unique for every 401 response for HTTP digest
-        auth.""",
-    )
-    uri = property(
-        lambda self: self.get("uri"),
-        doc="""
-        The URI from Request-URI of the Request-Line; duplicated because
-        proxies are allowed to change the Request-Line in transit.  HTTP
-        digest auth only.""",
-    )
-    nc = property(
-        lambda self: self.get("nc"),
-        doc="""
-        The nonce count value transmitted by clients if a qop-header is
-        also transmitted.  HTTP digest auth only.""",
-    )
-    cnonce = property(
-        lambda self: self.get("cnonce"),
-        doc="""
-        If the server sent a qop-header in the ``WWW-Authenticate``
-        header, the client has to provide this value for HTTP digest auth.
-        See the RFC for more details.""",
-    )
-    response = property(
-        lambda self: self.get("response"),
-        doc="""
-        A string of 32 hex digits computed as defined in RFC 2617, which
-        proves that the user knows a password.  Digest auth only.""",
-    )
-    opaque = property(
-        lambda self: self.get("opaque"),
-        doc="""
-        The opaque header from the server returned unchanged by the client.
-        It is recommended that this string be base64 or hexadecimal data.
-        Digest auth only.""",
-    )
-    qop = property(
-        lambda self: self.get("qop"),
-        doc="""
-        Indicates what "quality of protection" the client has applied to
-        the message for HTTP digest auth. Note that this is a single token,
-        not a quoted list of alternatives as in WWW-Authenticate.""",
-    )
-
-
-class WWWAuthenticate(UpdateDictMixin, dict):
-    """Provides simple access to `WWW-Authenticate` headers."""
-
-    #: list of keys that require quoting in the generated header
-    _require_quoting = frozenset(["domain", "nonce", "opaque", "realm", "qop"])
-
-    def __init__(self, auth_type=None, values=None, on_update=None):
-        dict.__init__(self, values or ())
-        if auth_type:
-            self["__auth_type__"] = auth_type
-        self.on_update = on_update
-
-    def set_basic(self, realm="authentication required"):
-        """Clear the auth info and enable basic auth."""
-        dict.clear(self)
-        dict.update(self, {"__auth_type__": "basic", "realm": realm})
-        if self.on_update:
-            self.on_update(self)
-
-    def set_digest(
-        self, realm, nonce, qop=("auth",), opaque=None, algorithm=None, stale=False
-    ):
-        """Clear the auth info and enable digest auth."""
-        d = {
-            "__auth_type__": "digest",
-            "realm": realm,
-            "nonce": nonce,
-            "qop": dump_header(qop),
-        }
-        if stale:
-            d["stale"] = "TRUE"
-        if opaque is not None:
-            d["opaque"] = opaque
-        if algorithm is not None:
-            d["algorithm"] = algorithm
-        dict.clear(self)
-        dict.update(self, d)
-        if self.on_update:
-            self.on_update(self)
-
-    def to_header(self):
-        """Convert the stored values into a WWW-Authenticate header."""
-        d = dict(self)
-        auth_type = d.pop("__auth_type__", None) or "basic"
-        return "%s %s" % (
-            auth_type.title(),
-            ", ".join(
-                [
-                    "%s=%s"
-                    % (
-                        key,
-                        quote_header_value(
-                            value, allow_token=key not in self._require_quoting
-                        ),
-                    )
-                    for key, value in iteritems(d)
-                ]
-            ),
-        )
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return "<%s %r>" % (self.__class__.__name__, self.to_header())
-
-    def auth_property(name, doc=None):  # noqa: B902
-        """A static helper function for subclasses to add extra authentication
-        system properties onto a class::
-
-            class FooAuthenticate(WWWAuthenticate):
-                special_realm = auth_property('special_realm')
-
-        For more information have a look at the sourcecode to see how the
-        regular properties (:attr:`realm` etc.) are implemented.
-        """
-
-        def _set_value(self, value):
-            if value is None:
-                self.pop(name, None)
-            else:
-                self[name] = str(value)
-
-        return property(lambda x: x.get(name), _set_value, doc=doc)
-
-    def _set_property(name, doc=None):  # noqa: B902
-        def fget(self):
-            def on_update(header_set):
-                if not header_set and name in self:
-                    del self[name]
-                elif header_set:
-                    self[name] = header_set.to_header()
-
-            return parse_set_header(self.get(name), on_update)
-
-        return property(fget, doc=doc)
-
-    type = auth_property(
-        "__auth_type__",
-        doc="""The type of the auth mechanism. HTTP currently specifies
-        ``Basic`` and ``Digest``.""",
-    )
-    realm = auth_property(
-        "realm",
-        doc="""A string to be displayed to users so they know which
-        username and password to use. This string should contain at
-        least the name of the host performing the authentication and
-        might additionally indicate the collection of users who might
-        have access.""",
-    )
-    domain = _set_property(
-        "domain",
-        doc="""A list of URIs that define the protection space. If a URI
-        is an absolute path, it is relative to the canonical root URL of
-        the server being accessed.""",
-    )
-    nonce = auth_property(
-        "nonce",
-        doc="""
-        A server-specified data string which should be uniquely generated
-        each time a 401 response is made. It is recommended that this
-        string be base64 or hexadecimal data.""",
-    )
-    opaque = auth_property(
-        "opaque",
-        doc="""A string of data, specified by the server, which should
-        be returned by the client unchanged in the Authorization header
-        of subsequent requests with URIs in the same protection space.
-        It is recommended that this string be base64 or hexadecimal
-        data.""",
-    )
-    algorithm = auth_property(
-        "algorithm",
-        doc="""A string indicating a pair of algorithms used to produce
-        the digest and a checksum. If this is not present it is assumed
-        to be "MD5". If the algorithm is not understood, the challenge
-        should be ignored (and a different one used, if there is more
-        than one).""",
-    )
-    qop = _set_property(
-        "qop",
-        doc="""A set of quality-of-privacy directives such as auth and
-        auth-int.""",
-    )
-
-    @property
-    def stale(self):
-        """A flag, indicating that the previous request from the client
-        was rejected because the nonce value was stale.
-        """
-        val = self.get("stale")
-        if val is not None:
-            return val.lower() == "true"
-
-    @stale.setter
-    def stale(self, value):
-        if value is None:
-            self.pop("stale", None)
-        else:
-            self["stale"] = "TRUE" if value else "FALSE"
-
-    auth_property = staticmethod(auth_property)
-    del _set_property
-
-
-class FileStorage(object):
-    """The :class:`FileStorage` class is a thin wrapper over incoming files.
-    It is used by the request object to represent uploaded files.  All the
-    attributes of the wrapper stream are proxied by the file storage so
-    it's possible to do ``storage.read()`` instead of the long form
-    ``storage.stream.read()``.
-    """
-
-    def __init__(
-        self,
-        stream=None,
-        filename=None,
-        name=None,
-        content_type=None,
-        content_length=None,
-        headers=None,
-    ):
-        self.name = name
-        self.stream = stream or BytesIO()
-
-        # if no filename is provided we can attempt to get the filename
-        # from the stream object passed.  There we have to be careful to
-        # skip things like <fdopen>, <stderr> etc.  Python marks these
-        # special filenames with angular brackets.
-        if filename is None:
-            filename = getattr(stream, "name", None)
-            s = make_literal_wrapper(filename)
-            if filename and filename[0] == s("<") and filename[-1] == s(">"):
-                filename = None
-
-            # On Python 3 we want to make sure the filename is always unicode.
-            # This might not be if the name attribute is bytes due to the
-            # file being opened from the bytes API.
-            if not PY2 and isinstance(filename, bytes):
-                filename = filename.decode(sys.getfilesystemencoding(), "replace")
-
-        self.filename = filename
-        if headers is None:
-            headers = Headers()
-        self.headers = headers
-        if content_type is not None:
-            headers["Content-Type"] = content_type
-        if content_length is not None:
-            headers["Content-Length"] = str(content_length)
-
-    def _parse_content_type(self):
-        if not hasattr(self, "_parsed_content_type"):
-            self._parsed_content_type = parse_options_header(self.content_type)
-
-    @property
-    def content_type(self):
-        """The content-type sent in the header.  Usually not available"""
-        return self.headers.get("content-type")
-
-    @property
-    def content_length(self):
-        """The content-length sent in the header.  Usually not available"""
-        return int(self.headers.get("content-length") or 0)
-
-    @property
-    def mimetype(self):
-        """Like :attr:`content_type`, but without parameters (eg, without
-        charset, type etc.) and always lowercase.  For example if the content
-        type is ``text/HTML; charset=utf-8`` the mimetype would be
-        ``'text/html'``.
-
-        .. versionadded:: 0.7
-        """
-        self._parse_content_type()
-        return self._parsed_content_type[0].lower()
-
-    @property
-    def mimetype_params(self):
-        """The mimetype parameters as dict.  For example if the content
-        type is ``text/html; charset=utf-8`` the params would be
-        ``{'charset': 'utf-8'}``.
-
-        .. versionadded:: 0.7
-        """
-        self._parse_content_type()
-        return self._parsed_content_type[1]
-
-    def save(self, dst, buffer_size=16384):
-        """Save the file to a destination path or file object.  If the
-        destination is a file object you have to close it yourself after the
-        call.  The buffer size is the number of bytes held in memory during
-        the copy process.  It defaults to 16KB.
-
-        For secure file saving also have a look at :func:`secure_filename`.
-
-        :param dst: a filename or open file object the uploaded file
-                    is saved to.
-        :param buffer_size: the size of the buffer.  This works the same as
-                            the `length` parameter of
-                            :func:`shutil.copyfileobj`.
-        """
-        from shutil import copyfileobj
-
-        close_dst = False
-        if isinstance(dst, string_types):
-            dst = open(dst, "wb")
-            close_dst = True
-        try:
-            copyfileobj(self.stream, dst, buffer_size)
-        finally:
-            if close_dst:
-                dst.close()
-
-    def close(self):
-        """Close the underlying file if possible."""
-        try:
-            self.stream.close()
-        except Exception:
-            pass
-
-    def __nonzero__(self):
-        return bool(self.filename)
-
-    __bool__ = __nonzero__
-
-    def __getattr__(self, name):
-        try:
-            return getattr(self.stream, name)
-        except AttributeError:
-            # SpooledTemporaryFile doesn't implement IOBase, get the
-            # attribute from its backing file instead.
-            # https://github.com/python/cpython/pull/3249
-            if hasattr(self.stream, "_file"):
-                return getattr(self.stream._file, name)
-            raise
-
-    def __iter__(self):
-        return iter(self.stream)
-
-    def __repr__(self):
-        return "<%s: %r (%r)>" % (
-            self.__class__.__name__,
-            self.filename,
-            self.content_type,
-        )
-
-
-# circular dependencies
-from . import exceptions
-from .http import dump_header
-from .http import dump_options_header
-from .http import generate_etag
-from .http import http_date
-from .http import is_byte_range_valid
-from .http import parse_options_header
-from .http import parse_set_header
-from .http import quote_etag
-from .http import quote_header_value
-from .http import unquote_etag
diff --git a/azure/functions/_thirdparty/werkzeug/exceptions.py b/azure/functions/_thirdparty/werkzeug/exceptions.py
deleted file mode 100644
index c1ec8f2d..00000000
--- a/azure/functions/_thirdparty/werkzeug/exceptions.py
+++ /dev/null
@@ -1,763 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.exceptions
-    ~~~~~~~~~~~~~~~~~~~
-
-    This module implements a number of Python exceptions you can raise from
-    within your views to trigger a standard non-200 response.
-
-
-    Usage Example
-    -------------
-
-    ::
-
-        from werkzeug.wrappers import BaseRequest
-        from werkzeug.wsgi import responder
-        from werkzeug.exceptions import HTTPException, NotFound
-
-        def view(request):
-            raise NotFound()
-
-        @responder
-        def application(environ, start_response):
-            request = BaseRequest(environ)
-            try:
-                return view(request)
-            except HTTPException as e:
-                return e
-
-
-    As you can see from this example those exceptions are callable WSGI
-    applications.  Because of Python 2.4 compatibility those do not extend
-    from the response objects but only from the python exception class.
-
-    As a matter of fact they are not Werkzeug response objects.  However you
-    can get a response object by calling ``get_response()`` on a HTTP
-    exception.
-
-    Keep in mind that you have to pass an environment to ``get_response()``
-    because some errors fetch additional information from the WSGI
-    environment.
-
-    If you want to hook in a different exception page to say, a 404 status
-    code, you can add a second except for a specific subclass of an error::
-
-        @responder
-        def application(environ, start_response):
-            request = BaseRequest(environ)
-            try:
-                return view(request)
-            except NotFound, e:
-                return not_found(request)
-            except HTTPException, e:
-                return e
-
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import sys
-
-# import werkzeug
-
-# Because of bootstrapping reasons we need to manually patch ourselves
-# onto our parent module.
-# werkzeug.exceptions = sys.modules[__name__]
-
-from ._compat import implements_to_string
-from ._compat import integer_types
-from ._compat import iteritems
-from ._compat import text_type
-from ._internal import _get_environ
-
-
-@implements_to_string
-class HTTPException(Exception):
-    """Baseclass for all HTTP exceptions.  This exception can be called as WSGI
-    application to render a default error page or you can catch the subclasses
-    of it independently and render nicer error messages.
-    """
-
-    code = None
-    description = None
-
-    def __init__(self, description=None, response=None):
-        super(Exception, self).__init__()
-        if description is not None:
-            self.description = description
-        self.response = response
-
-    @classmethod
-    def wrap(cls, exception, name=None):
-        """Create an exception that is a subclass of the calling HTTP
-        exception and the ``exception`` argument.
-
-        The first argument to the class will be passed to the
-        wrapped ``exception``, the rest to the HTTP exception. If
-        ``self.args`` is not empty, the wrapped exception message is
-        added to the HTTP exception description.
-
-        .. versionchanged:: 0.15
-            The description includes the wrapped exception message.
-        """
-
-        class newcls(cls, exception):
-            def __init__(self, arg=None, *args, **kwargs):
-                super(cls, self).__init__(*args, **kwargs)
-
-                if arg is None:
-                    exception.__init__(self)
-                else:
-                    exception.__init__(self, arg)
-
-            def get_description(self, environ=None):
-                out = super(cls, self).get_description(environ=environ)
-
-                if self.args:
-                    out += "<p><pre><code>{}: {}</code></pre></p>".format(
-                        exception.__name__, escape(exception.__str__(self))
-                    )
-
-                return out
-
-        newcls.__module__ = sys._getframe(1).f_globals.get("__name__")
-        newcls.__name__ = name or cls.__name__ + exception.__name__
-        return newcls
-
-    @property
-    def name(self):
-        """The status name."""
-        return HTTP_STATUS_CODES.get(self.code, "Unknown Error")
-
-    def get_description(self, environ=None):
-        """Get the description."""
-        return u"<p>%s</p>" % escape(self.description)
-
-    def get_body(self, environ=None):
-        """Get the HTML body."""
-        return text_type(
-            (
-                u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
-                u"<title>%(code)s %(name)s</title>\n"
-                u"<h1>%(name)s</h1>\n"
-                u"%(description)s\n"
-            )
-            % {
-                "code": self.code,
-                "name": escape(self.name),
-                "description": self.get_description(environ),
-            }
-        )
-
-    def get_headers(self, environ=None):
-        """Get a list of headers."""
-        return [("Content-Type", "text/html")]
-
-    def get_response(self, environ=None):
-        """Get a response object.  If one was passed to the exception
-        it's returned directly.
-
-        :param environ: the optional environ for the request.  This
-                        can be used to modify the response depending
-                        on how the request looked like.
-        :return: a :class:`Response` object or a subclass thereof.
-        """
-        if self.response is not None:
-            return self.response
-        if environ is not None:
-            environ = _get_environ(environ)
-        headers = self.get_headers(environ)
-        return Response(self.get_body(environ), self.code, headers)
-
-    def __call__(self, environ, start_response):
-        """Call the exception as WSGI application.
-
-        :param environ: the WSGI environment.
-        :param start_response: the response callable provided by the WSGI
-                               server.
-        """
-        response = self.get_response(environ)
-        return response(environ, start_response)
-
-    def __str__(self):
-        code = self.code if self.code is not None else "???"
-        return "%s %s: %s" % (code, self.name, self.description)
-
-    def __repr__(self):
-        code = self.code if self.code is not None else "???"
-        return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name)
-
-
-class BadRequest(HTTPException):
-    """*400* `Bad Request`
-
-    Raise if the browser sends something to the application the application
-    or server cannot handle.
-    """
-
-    code = 400
-    description = (
-        "The browser (or proxy) sent a request that this server could "
-        "not understand."
-    )
-
-
-class ClientDisconnected(BadRequest):
-    """Internal exception that is raised if Werkzeug detects a disconnected
-    client.  Since the client is already gone at that point attempting to
-    send the error message to the client might not work and might ultimately
-    result in another exception in the server.  Mainly this is here so that
-    it is silenced by default as far as Werkzeug is concerned.
-
-    Since disconnections cannot be reliably detected and are unspecified
-    by WSGI to a large extent this might or might not be raised if a client
-    is gone.
-
-    .. versionadded:: 0.8
-    """
-
-
-class SecurityError(BadRequest):
-    """Raised if something triggers a security error.  This is otherwise
-    exactly like a bad request error.
-
-    .. versionadded:: 0.9
-    """
-
-
-class BadHost(BadRequest):
-    """Raised if the submitted host is badly formatted.
-
-    .. versionadded:: 0.11.2
-    """
-
-
-class Unauthorized(HTTPException):
-    """*401* ``Unauthorized``
-
-    Raise if the user is not authorized to access a resource.
-
-    The ``www_authenticate`` argument should be used to set the
-    ``WWW-Authenticate`` header. This is used for HTTP basic auth and
-    other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
-    to create correctly formatted values. Strictly speaking a 401
-    response is invalid if it doesn't provide at least one value for
-    this header, although real clients typically don't care.
-
-    :param description: Override the default message used for the body
-        of the response.
-    :param www-authenticate: A single value, or list of values, for the
-        WWW-Authenticate header.
-
-    .. versionchanged:: 0.15.1
-        ``description`` was moved back as the first argument, restoring
-         its previous position.
-
-    .. versionchanged:: 0.15.0
-        ``www_authenticate`` was added as the first argument, ahead of
-        ``description``.
-    """
-
-    code = 401
-    description = (
-        "The server could not verify that you are authorized to access"
-        " the URL requested. You either supplied the wrong credentials"
-        " (e.g. a bad password), or your browser doesn't understand"
-        " how to supply the credentials required."
-    )
-
-    def __init__(self, description=None, www_authenticate=None):
-        HTTPException.__init__(self, description)
-        if not isinstance(www_authenticate, (tuple, list)):
-            www_authenticate = (www_authenticate,)
-        self.www_authenticate = www_authenticate
-
-    def get_headers(self, environ=None):
-        headers = HTTPException.get_headers(self, environ)
-        if self.www_authenticate:
-            headers.append(
-                ("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate]))
-            )
-        return headers
-
-
-class Forbidden(HTTPException):
-    """*403* `Forbidden`
-
-    Raise if the user doesn't have the permission for the requested resource
-    but was authenticated.
-    """
-
-    code = 403
-    description = (
-        "You don't have the permission to access the requested"
-        " resource. It is either read-protected or not readable by the"
-        " server."
-    )
-
-
-class NotFound(HTTPException):
-    """*404* `Not Found`
-
-    Raise if a resource does not exist and never existed.
-    """
-
-    code = 404
-    description = (
-        "The requested URL was not found on the server. If you entered"
-        " the URL manually please check your spelling and try again."
-    )
-
-
-class MethodNotAllowed(HTTPException):
-    """*405* `Method Not Allowed`
-
-    Raise if the server used a method the resource does not handle.  For
-    example `POST` if the resource is view only.  Especially useful for REST.
-
-    The first argument for this exception should be a list of allowed methods.
-    Strictly speaking the response would be invalid if you don't provide valid
-    methods in the header which you can do with that list.
-    """
-
-    code = 405
-    description = "The method is not allowed for the requested URL."
-
-    def __init__(self, valid_methods=None, description=None):
-        """Takes an optional list of valid http methods
-        starting with werkzeug 0.3 the list will be mandatory."""
-        HTTPException.__init__(self, description)
-        self.valid_methods = valid_methods
-
-    def get_headers(self, environ=None):
-        headers = HTTPException.get_headers(self, environ)
-        if self.valid_methods:
-            headers.append(("Allow", ", ".join(self.valid_methods)))
-        return headers
-
-
-class NotAcceptable(HTTPException):
-    """*406* `Not Acceptable`
-
-    Raise if the server can't return any content conforming to the
-    `Accept` headers of the client.
-    """
-
-    code = 406
-
-    description = (
-        "The resource identified by the request is only capable of"
-        " generating response entities which have content"
-        " characteristics not acceptable according to the accept"
-        " headers sent in the request."
-    )
-
-
-class RequestTimeout(HTTPException):
-    """*408* `Request Timeout`
-
-    Raise to signalize a timeout.
-    """
-
-    code = 408
-    description = (
-        "The server closed the network connection because the browser"
-        " didn't finish the request within the specified time."
-    )
-
-
-class Conflict(HTTPException):
-    """*409* `Conflict`
-
-    Raise to signal that a request cannot be completed because it conflicts
-    with the current state on the server.
-
-    .. versionadded:: 0.7
-    """
-
-    code = 409
-    description = (
-        "A conflict happened while processing the request. The"
-        " resource might have been modified while the request was being"
-        " processed."
-    )
-
-
-class Gone(HTTPException):
-    """*410* `Gone`
-
-    Raise if a resource existed previously and went away without new location.
-    """
-
-    code = 410
-    description = (
-        "The requested URL is no longer available on this server and"
-        " there is no forwarding address. If you followed a link from a"
-        " foreign page, please contact the author of this page."
-    )
-
-
-class LengthRequired(HTTPException):
-    """*411* `Length Required`
-
-    Raise if the browser submitted data but no ``Content-Length`` header which
-    is required for the kind of processing the server does.
-    """
-
-    code = 411
-    description = (
-        "A request with this method requires a valid <code>Content-"
-        "Length</code> header."
-    )
-
-
-class PreconditionFailed(HTTPException):
-    """*412* `Precondition Failed`
-
-    Status code used in combination with ``If-Match``, ``If-None-Match``, or
-    ``If-Unmodified-Since``.
-    """
-
-    code = 412
-    description = (
-        "The precondition on the request for the URL failed positive evaluation."
-    )
-
-
-class RequestEntityTooLarge(HTTPException):
-    """*413* `Request Entity Too Large`
-
-    The status code one should return if the data submitted exceeded a given
-    limit.
-    """
-
-    code = 413
-    description = "The data value transmitted exceeds the capacity limit."
-
-
-class RequestURITooLarge(HTTPException):
-    """*414* `Request URI Too Large`
-
-    Like *413* but for too long URLs.
-    """
-
-    code = 414
-    description = (
-        "The length of the requested URL exceeds the capacity limit for"
-        " this server. The request cannot be processed."
-    )
-
-
-class UnsupportedMediaType(HTTPException):
-    """*415* `Unsupported Media Type`
-
-    The status code returned if the server is unable to handle the media type
-    the client transmitted.
-    """
-
-    code = 415
-    description = (
-        "The server does not support the media type transmitted in the request."
-    )
-
-
-class RequestedRangeNotSatisfiable(HTTPException):
-    """*416* `Requested Range Not Satisfiable`
-
-    The client asked for an invalid part of the file.
-
-    .. versionadded:: 0.7
-    """
-
-    code = 416
-    description = "The server cannot provide the requested range."
-
-    def __init__(self, length=None, units="bytes", description=None):
-        """Takes an optional `Content-Range` header value based on ``length``
-        parameter.
-        """
-        HTTPException.__init__(self, description)
-        self.length = length
-        self.units = units
-
-    def get_headers(self, environ=None):
-        headers = HTTPException.get_headers(self, environ)
-        if self.length is not None:
-            headers.append(("Content-Range", "%s */%d" % (self.units, self.length)))
-        return headers
-
-
-class ExpectationFailed(HTTPException):
-    """*417* `Expectation Failed`
-
-    The server cannot meet the requirements of the Expect request-header.
-
-    .. versionadded:: 0.7
-    """
-
-    code = 417
-    description = "The server could not meet the requirements of the Expect header"
-
-
-class ImATeapot(HTTPException):
-    """*418* `I'm a teapot`
-
-    The server should return this if it is a teapot and someone attempted
-    to brew coffee with it.
-
-    .. versionadded:: 0.7
-    """
-
-    code = 418
-    description = "This server is a teapot, not a coffee machine"
-
-
-class UnprocessableEntity(HTTPException):
-    """*422* `Unprocessable Entity`
-
-    Used if the request is well formed, but the instructions are otherwise
-    incorrect.
-    """
-
-    code = 422
-    description = (
-        "The request was well-formed but was unable to be followed due"
-        " to semantic errors."
-    )
-
-
-class Locked(HTTPException):
-    """*423* `Locked`
-
-    Used if the resource that is being accessed is locked.
-    """
-
-    code = 423
-    description = "The resource that is being accessed is locked."
-
-
-class FailedDependency(HTTPException):
-    """*424* `Failed Dependency`
-
-    Used if the method could not be performed on the resource
-    because the requested action depended on another action and that action failed.
-    """
-
-    code = 424
-    description = (
-        "The method could not be performed on the resource because the"
-        " requested action depended on another action and that action"
-        " failed."
-    )
-
-
-class PreconditionRequired(HTTPException):
-    """*428* `Precondition Required`
-
-    The server requires this request to be conditional, typically to prevent
-    the lost update problem, which is a race condition between two or more
-    clients attempting to update a resource through PUT or DELETE. By requiring
-    each client to include a conditional header ("If-Match" or "If-Unmodified-
-    Since") with the proper value retained from a recent GET request, the
-    server ensures that each client has at least seen the previous revision of
-    the resource.
-    """
-
-    code = 428
-    description = (
-        "This request is required to be conditional; try using"
-        ' "If-Match" or "If-Unmodified-Since".'
-    )
-
-
-class TooManyRequests(HTTPException):
-    """*429* `Too Many Requests`
-
-    The server is limiting the rate at which this user receives responses, and
-    this request exceeds that rate. (The server may use any convenient method
-    to identify users and their request rates). The server may include a
-    "Retry-After" header to indicate how long the user should wait before
-    retrying.
-    """
-
-    code = 429
-    description = "This user has exceeded an allotted request count. Try again later."
-
-
-class RequestHeaderFieldsTooLarge(HTTPException):
-    """*431* `Request Header Fields Too Large`
-
-    The server refuses to process the request because the header fields are too
-    large. One or more individual fields may be too large, or the set of all
-    headers is too large.
-    """
-
-    code = 431
-    description = "One or more header fields exceeds the maximum size."
-
-
-class UnavailableForLegalReasons(HTTPException):
-    """*451* `Unavailable For Legal Reasons`
-
-    This status code indicates that the server is denying access to the
-    resource as a consequence of a legal demand.
-    """
-
-    code = 451
-    description = "Unavailable for legal reasons."
-
-
-class InternalServerError(HTTPException):
-    """*500* `Internal Server Error`
-
-    Raise if an internal server error occurred.  This is a good fallback if an
-    unknown error occurred in the dispatcher.
-    """
-
-    code = 500
-    description = (
-        "The server encountered an internal error and was unable to"
-        " complete your request. Either the server is overloaded or"
-        " there is an error in the application."
-    )
-
-
-class NotImplemented(HTTPException):
-    """*501* `Not Implemented`
-
-    Raise if the application does not support the action requested by the
-    browser.
-    """
-
-    code = 501
-    description = "The server does not support the action requested by the browser."
-
-
-class BadGateway(HTTPException):
-    """*502* `Bad Gateway`
-
-    If you do proxying in your application you should return this status code
-    if you received an invalid response from the upstream server it accessed
-    in attempting to fulfill the request.
-    """
-
-    code = 502
-    description = (
-        "The proxy server received an invalid response from an upstream server."
-    )
-
-
-class ServiceUnavailable(HTTPException):
-    """*503* `Service Unavailable`
-
-    Status code you should return if a service is temporarily unavailable.
-    """
-
-    code = 503
-    description = (
-        "The server is temporarily unable to service your request due"
-        " to maintenance downtime or capacity problems. Please try"
-        " again later."
-    )
-
-
-class GatewayTimeout(HTTPException):
-    """*504* `Gateway Timeout`
-
-    Status code you should return if a connection to an upstream server
-    times out.
-    """
-
-    code = 504
-    description = "The connection to an upstream server timed out."
-
-
-class HTTPVersionNotSupported(HTTPException):
-    """*505* `HTTP Version Not Supported`
-
-    The server does not support the HTTP protocol version used in the request.
-    """
-
-    code = 505
-    description = (
-        "The server does not support the HTTP protocol version used in the request."
-    )
-
-
-default_exceptions = {}
-__all__ = ["HTTPException"]
-
-
-def _find_exceptions():
-    for _name, obj in iteritems(globals()):
-        try:
-            is_http_exception = issubclass(obj, HTTPException)
-        except TypeError:
-            is_http_exception = False
-        if not is_http_exception or obj.code is None:
-            continue
-        __all__.append(obj.__name__)
-        old_obj = default_exceptions.get(obj.code, None)
-        if old_obj is not None and issubclass(obj, old_obj):
-            continue
-        default_exceptions[obj.code] = obj
-
-
-_find_exceptions()
-del _find_exceptions
-
-
-class Aborter(object):
-    """When passed a dict of code -> exception items it can be used as
-    callable that raises exceptions.  If the first argument to the
-    callable is an integer it will be looked up in the mapping, if it's
-    a WSGI application it will be raised in a proxy exception.
-
-    The rest of the arguments are forwarded to the exception constructor.
-    """
-
-    def __init__(self, mapping=None, extra=None):
-        if mapping is None:
-            mapping = default_exceptions
-        self.mapping = dict(mapping)
-        if extra is not None:
-            self.mapping.update(extra)
-
-    def __call__(self, code, *args, **kwargs):
-        if not args and not kwargs and not isinstance(code, integer_types):
-            raise HTTPException(response=code)
-        if code not in self.mapping:
-            raise LookupError("no exception for %r" % code)
-        raise self.mapping[code](*args, **kwargs)
-
-
-def abort(status, *args, **kwargs):
-    """Raises an :py:exc:`HTTPException` for the given status code or WSGI
-    application::
-
-        abort(404)  # 404 Not Found
-        abort(Response('Hello World'))
-
-    Can be passed a WSGI application or a status code.  If a status code is
-    given it's looked up in the list of exceptions and will raise that
-    exception, if passed a WSGI application it will wrap it in a proxy WSGI
-    exception and raise that::
-
-       abort(404)
-       abort(Response('Hello World'))
-
-    """
-    return _aborter(status, *args, **kwargs)
-
-
-_aborter = Aborter()
-
-
-#: an exception that is used internally to signal both a key error and a
-#: bad request.  Used by a lot of the datastructures.
-BadRequestKeyError = BadRequest.wrap(KeyError)
-
-# imported here because of circular dependencies of werkzeug.utils
-from .http import HTTP_STATUS_CODES
-from .utils import escape
diff --git a/azure/functions/_thirdparty/werkzeug/formparser.py b/azure/functions/_thirdparty/werkzeug/formparser.py
deleted file mode 100644
index 0ddc5c8f..00000000
--- a/azure/functions/_thirdparty/werkzeug/formparser.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.formparser
-    ~~~~~~~~~~~~~~~~~~~
-
-    This module implements the form parsing.  It supports url-encoded forms
-    as well as non-nested multipart uploads.
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import codecs
-import re
-from functools import update_wrapper
-from itertools import chain
-from itertools import repeat
-from itertools import tee
-
-from ._compat import BytesIO
-from ._compat import text_type
-from ._compat import to_native
-from .datastructures import FileStorage
-from .datastructures import Headers
-from .datastructures import MultiDict
-from .http import parse_options_header
-from .urls import url_decode_stream
-from .wsgi import get_content_length
-from .wsgi import get_input_stream
-from .wsgi import make_line_iter
-
-# there are some platforms where SpooledTemporaryFile is not available.
-# In that case we need to provide a fallback.
-try:
-    from tempfile import SpooledTemporaryFile
-except ImportError:
-    from tempfile import TemporaryFile
-
-    SpooledTemporaryFile = None
-
-
-#: an iterator that yields empty strings
-_empty_string_iter = repeat("")
-
-#: a regular expression for multipart boundaries
-_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
-
-#: supported http encodings that are also available in python we support
-#: for multipart messages.
-_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
-
-
-def default_stream_factory(
-    total_content_length, filename, content_type, content_length=None
-):
-    """The stream factory that is used per default."""
-    max_size = 1024 * 500
-    if SpooledTemporaryFile is not None:
-        return SpooledTemporaryFile(max_size=max_size, mode="wb+")
-    if total_content_length is None or total_content_length > max_size:
-        return TemporaryFile("wb+")
-    return BytesIO()
-
-
-def parse_form_data(
-    environ,
-    stream_factory=None,
-    charset="utf-8",
-    errors="replace",
-    max_form_memory_size=None,
-    max_content_length=None,
-    cls=None,
-    silent=True,
-):
-    """Parse the form data in the environ and return it as tuple in the form
-    ``(stream, form, files)``.  You should only call this method if the
-    transport method is `POST`, `PUT`, or `PATCH`.
-
-    If the mimetype of the data transmitted is `multipart/form-data` the
-    files multidict will be filled with `FileStorage` objects.  If the
-    mimetype is unknown the input stream is wrapped and returned as first
-    argument, else the stream is empty.
-
-    This is a shortcut for the common usage of :class:`FormDataParser`.
-
-    Have a look at :ref:`dealing-with-request-data` for more details.
-
-    .. versionadded:: 0.5
-       The `max_form_memory_size`, `max_content_length` and
-       `cls` parameters were added.
-
-    .. versionadded:: 0.5.1
-       The optional `silent` flag was added.
-
-    :param environ: the WSGI environment to be used for parsing.
-    :param stream_factory: An optional callable that returns a new read and
-                           writeable file descriptor.  This callable works
-                           the same as :meth:`~BaseResponse._get_file_stream`.
-    :param charset: The character set for URL and url encoded form data.
-    :param errors: The encoding error behavior.
-    :param max_form_memory_size: the maximum number of bytes to be accepted for
-                           in-memory stored form data.  If the data
-                           exceeds the value specified an
-                           :exc:`~exceptions.RequestEntityTooLarge`
-                           exception is raised.
-    :param max_content_length: If this is provided and the transmitted data
-                               is longer than this value an
-                               :exc:`~exceptions.RequestEntityTooLarge`
-                               exception is raised.
-    :param cls: an optional dict class to use.  If this is not specified
-                       or `None` the default :class:`MultiDict` is used.
-    :param silent: If set to False parsing errors will not be caught.
-    :return: A tuple in the form ``(stream, form, files)``.
-    """
-    return FormDataParser(
-        stream_factory,
-        charset,
-        errors,
-        max_form_memory_size,
-        max_content_length,
-        cls,
-        silent,
-    ).parse_from_environ(environ)
-
-
-def exhaust_stream(f):
-    """Helper decorator for methods that exhausts the stream on return."""
-
-    def wrapper(self, stream, *args, **kwargs):
-        try:
-            return f(self, stream, *args, **kwargs)
-        finally:
-            exhaust = getattr(stream, "exhaust", None)
-            if exhaust is not None:
-                exhaust()
-            else:
-                while 1:
-                    chunk = stream.read(1024 * 64)
-                    if not chunk:
-                        break
-
-    return update_wrapper(wrapper, f)
-
-
-class FormDataParser(object):
-    """This class implements parsing of form data for Werkzeug.  By itself
-    it can parse multipart and url encoded form data.  It can be subclassed
-    and extended but for most mimetypes it is a better idea to use the
-    untouched stream and expose it as separate attributes on a request
-    object.
-
-    .. versionadded:: 0.8
-
-    :param stream_factory: An optional callable that returns a new read and
-                           writeable file descriptor.  This callable works
-                           the same as :meth:`~BaseResponse._get_file_stream`.
-    :param charset: The character set for URL and url encoded form data.
-    :param errors: The encoding error behavior.
-    :param max_form_memory_size: the maximum number of bytes to be accepted for
-                           in-memory stored form data.  If the data
-                           exceeds the value specified an
-                           :exc:`~exceptions.RequestEntityTooLarge`
-                           exception is raised.
-    :param max_content_length: If this is provided and the transmitted data
-                               is longer than this value an
-                               :exc:`~exceptions.RequestEntityTooLarge`
-                               exception is raised.
-    :param cls: an optional dict class to use.  If this is not specified
-                       or `None` the default :class:`MultiDict` is used.
-    :param silent: If set to False parsing errors will not be caught.
-    """
-
-    def __init__(
-        self,
-        stream_factory=None,
-        charset="utf-8",
-        errors="replace",
-        max_form_memory_size=None,
-        max_content_length=None,
-        cls=None,
-        silent=True,
-    ):
-        if stream_factory is None:
-            stream_factory = default_stream_factory
-        self.stream_factory = stream_factory
-        self.charset = charset
-        self.errors = errors
-        self.max_form_memory_size = max_form_memory_size
-        self.max_content_length = max_content_length
-        if cls is None:
-            cls = MultiDict
-        self.cls = cls
-        self.silent = silent
-
-    def get_parse_func(self, mimetype, options):
-        return self.parse_functions.get(mimetype)
-
-    def parse_from_environ(self, environ):
-        """Parses the information from the environment as form data.
-
-        :param environ: the WSGI environment to be used for parsing.
-        :return: A tuple in the form ``(stream, form, files)``.
-        """
-        content_type = environ.get("CONTENT_TYPE", "")
-        content_length = get_content_length(environ)
-        mimetype, options = parse_options_header(content_type)
-        return self.parse(get_input_stream(environ), mimetype, content_length, options)
-
-    def parse(self, stream, mimetype, content_length, options=None):
-        """Parses the information from the given stream, mimetype,
-        content length and mimetype parameters.
-
-        :param stream: an input stream
-        :param mimetype: the mimetype of the data
-        :param content_length: the content length of the incoming data
-        :param options: optional mimetype parameters (used for
-                        the multipart boundary for instance)
-        :return: A tuple in the form ``(stream, form, files)``.
-        """
-        if (
-            self.max_content_length is not None
-            and content_length is not None
-            and content_length > self.max_content_length
-        ):
-            raise exceptions.RequestEntityTooLarge()
-        if options is None:
-            options = {}
-
-        parse_func = self.get_parse_func(mimetype, options)
-        if parse_func is not None:
-            try:
-                return parse_func(self, stream, mimetype, content_length, options)
-            except ValueError:
-                if not self.silent:
-                    raise
-
-        return stream, self.cls(), self.cls()
-
-    @exhaust_stream
-    def _parse_multipart(self, stream, mimetype, content_length, options):
-        parser = MultiPartParser(
-            self.stream_factory,
-            self.charset,
-            self.errors,
-            max_form_memory_size=self.max_form_memory_size,
-            cls=self.cls,
-        )
-        boundary = options.get("boundary")
-        if boundary is None:
-            raise ValueError("Missing boundary")
-        if isinstance(boundary, text_type):
-            boundary = boundary.encode("ascii")
-        form, files = parser.parse(stream, boundary, content_length)
-        return stream, form, files
-
-    @exhaust_stream
-    def _parse_urlencoded(self, stream, mimetype, content_length, options):
-        if (
-            self.max_form_memory_size is not None
-            and content_length is not None
-            and content_length > self.max_form_memory_size
-        ):
-            raise exceptions.RequestEntityTooLarge()
-        form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
-        return stream, form, self.cls()
-
-    #: mapping of mimetypes to parsing functions
-    parse_functions = {
-        "multipart/form-data": _parse_multipart,
-        "application/x-www-form-urlencoded": _parse_urlencoded,
-        "application/x-url-encoded": _parse_urlencoded,
-    }
-
-
-def is_valid_multipart_boundary(boundary):
-    """Checks if the string given is a valid multipart boundary."""
-    return _multipart_boundary_re.match(boundary) is not None
-
-
-def _line_parse(line):
-    """Removes line ending characters and returns a tuple (`stripped_line`,
-    `is_terminated`).
-    """
-    if line[-2:] in ["\r\n", b"\r\n"]:
-        return line[:-2], True
-    elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
-        return line[:-1], True
-    return line, False
-
-
-def parse_multipart_headers(iterable):
-    """Parses multipart headers from an iterable that yields lines (including
-    the trailing newline symbol).  The iterable has to be newline terminated.
-
-    The iterable will stop at the line where the headers ended so it can be
-    further consumed.
-
-    :param iterable: iterable of strings that are newline terminated
-    """
-    result = []
-    for line in iterable:
-        line = to_native(line)
-        line, line_terminated = _line_parse(line)
-        if not line_terminated:
-            raise ValueError("unexpected end of line in multipart header")
-        if not line:
-            break
-        elif line[0] in " \t" and result:
-            key, value = result[-1]
-            result[-1] = (key, value + "\n " + line[1:])
-        else:
-            parts = line.split(":", 1)
-            if len(parts) == 2:
-                result.append((parts[0].strip(), parts[1].strip()))
-
-    # we link the list to the headers, no need to create a copy, the
-    # list was not shared anyways.
-    return Headers(result)
-
-
-_begin_form = "begin_form"
-_begin_file = "begin_file"
-_cont = "cont"
-_end = "end"
-
-
-class MultiPartParser(object):
-    def __init__(
-        self,
-        stream_factory=None,
-        charset="utf-8",
-        errors="replace",
-        max_form_memory_size=None,
-        cls=None,
-        buffer_size=64 * 1024,
-    ):
-        self.charset = charset
-        self.errors = errors
-        self.max_form_memory_size = max_form_memory_size
-        self.stream_factory = (
-            default_stream_factory if stream_factory is None else stream_factory
-        )
-        self.cls = MultiDict if cls is None else cls
-
-        # make sure the buffer size is divisible by four so that we can base64
-        # decode chunk by chunk
-        assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
-        # also the buffer size has to be at least 1024 bytes long or long headers
-        # will freak out the system
-        assert buffer_size >= 1024, "buffer size has to be at least 1KB"
-
-        self.buffer_size = buffer_size
-
-    def _fix_ie_filename(self, filename):
-        """Internet Explorer 6 transmits the full file name if a file is
-        uploaded.  This function strips the full path if it thinks the
-        filename is Windows-like absolute.
-        """
-        if filename[1:3] == ":\\" or filename[:2] == "\\\\":
-            return filename.split("\\")[-1]
-        return filename
-
-    def _find_terminator(self, iterator):
-        """The terminator might have some additional newlines before it.
-        There is at least one application that sends additional newlines
-        before headers (the python setuptools package).
-        """
-        for line in iterator:
-            if not line:
-                break
-            line = line.strip()
-            if line:
-                return line
-        return b""
-
-    def fail(self, message):
-        raise ValueError(message)
-
-    def get_part_encoding(self, headers):
-        transfer_encoding = headers.get("content-transfer-encoding")
-        if (
-            transfer_encoding is not None
-            and transfer_encoding in _supported_multipart_encodings
-        ):
-            return transfer_encoding
-
-    def get_part_charset(self, headers):
-        # Figure out input charset for current part
-        content_type = headers.get("content-type")
-        if content_type:
-            mimetype, ct_params = parse_options_header(content_type)
-            return ct_params.get("charset", self.charset)
-        return self.charset
-
-    def start_file_streaming(self, filename, headers, total_content_length):
-        if isinstance(filename, bytes):
-            filename = filename.decode(self.charset, self.errors)
-        filename = self._fix_ie_filename(filename)
-        content_type = headers.get("content-type")
-        try:
-            content_length = int(headers["content-length"])
-        except (KeyError, ValueError):
-            content_length = 0
-        container = self.stream_factory(
-            total_content_length=total_content_length,
-            filename=filename,
-            content_type=content_type,
-            content_length=content_length,
-        )
-        return filename, container
-
-    def in_memory_threshold_reached(self, bytes):
-        raise exceptions.RequestEntityTooLarge()
-
-    def validate_boundary(self, boundary):
-        if not boundary:
-            self.fail("Missing boundary")
-        if not is_valid_multipart_boundary(boundary):
-            self.fail("Invalid boundary: %s" % boundary)
-        if len(boundary) > self.buffer_size:  # pragma: no cover
-            # this should never happen because we check for a minimum size
-            # of 1024 and boundaries may not be longer than 200.  The only
-            # situation when this happens is for non debug builds where
-            # the assert is skipped.
-            self.fail("Boundary longer than buffer size")
-
-    def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
-        """Generate parts of
-        ``('begin_form', (headers, name))``
-        ``('begin_file', (headers, name, filename))``
-        ``('cont', bytestring)``
-        ``('end', None)``
-
-        Always obeys the grammar
-        parts = ( begin_form cont* end |
-                  begin_file cont* end )*
-        """
-        next_part = b"--" + boundary
-        last_part = next_part + b"--"
-
-        iterator = chain(
-            make_line_iter(
-                file,
-                limit=content_length,
-                buffer_size=self.buffer_size,
-                cap_at_buffer=cap_at_buffer,
-            ),
-            _empty_string_iter,
-        )
-
-        terminator = self._find_terminator(iterator)
-
-        if terminator == last_part:
-            return
-        elif terminator != next_part:
-            self.fail("Expected boundary at start of multipart data")
-
-        while terminator != last_part:
-            headers = parse_multipart_headers(iterator)
-
-            disposition = headers.get("content-disposition")
-            if disposition is None:
-                self.fail("Missing Content-Disposition header")
-            disposition, extra = parse_options_header(disposition)
-            transfer_encoding = self.get_part_encoding(headers)
-            name = extra.get("name")
-            filename = extra.get("filename")
-
-            # if no content type is given we stream into memory.  A list is
-            # used as a temporary container.
-            if filename is None:
-                yield _begin_form, (headers, name)
-
-            # otherwise we parse the rest of the headers and ask the stream
-            # factory for something we can write in.
-            else:
-                yield _begin_file, (headers, name, filename)
-
-            buf = b""
-            for line in iterator:
-                if not line:
-                    self.fail("unexpected end of stream")
-
-                if line[:2] == b"--":
-                    terminator = line.rstrip()
-                    if terminator in (next_part, last_part):
-                        break
-
-                if transfer_encoding is not None:
-                    if transfer_encoding == "base64":
-                        transfer_encoding = "base64_codec"
-                    try:
-                        line = codecs.decode(line, transfer_encoding)
-                    except Exception:
-                        self.fail("could not decode transfer encoded chunk")
-
-                # we have something in the buffer from the last iteration.
-                # this is usually a newline delimiter.
-                if buf:
-                    yield _cont, buf
-                    buf = b""
-
-                # If the line ends with windows CRLF we write everything except
-                # the last two bytes.  In all other cases however we write
-                # everything except the last byte.  If it was a newline, that's
-                # fine, otherwise it does not matter because we will write it
-                # the next iteration.  this ensures we do not write the
-                # final newline into the stream.  That way we do not have to
-                # truncate the stream.  However we do have to make sure that
-                # if something else than a newline is in there we write it
-                # out.
-                if line[-2:] == b"\r\n":
-                    buf = b"\r\n"
-                    cutoff = -2
-                else:
-                    buf = line[-1:]
-                    cutoff = -1
-                yield _cont, line[:cutoff]
-
-            else:  # pragma: no cover
-                raise ValueError("unexpected end of part")
-
-            # if we have a leftover in the buffer that is not a newline
-            # character we have to flush it, otherwise we will chop of
-            # certain values.
-            if buf not in (b"", b"\r", b"\n", b"\r\n"):
-                yield _cont, buf
-
-            yield _end, None
-
-    def parse_parts(self, file, boundary, content_length):
-        """Generate ``('file', (name, val))`` and
-        ``('form', (name, val))`` parts.
-        """
-        in_memory = 0
-
-        for ellt, ell in self.parse_lines(file, boundary, content_length):
-            if ellt == _begin_file:
-                headers, name, filename = ell
-                is_file = True
-                guard_memory = False
-                filename, container = self.start_file_streaming(
-                    filename, headers, content_length
-                )
-                _write = container.write
-
-            elif ellt == _begin_form:
-                headers, name = ell
-                is_file = False
-                container = []
-                _write = container.append
-                guard_memory = self.max_form_memory_size is not None
-
-            elif ellt == _cont:
-                _write(ell)
-                # if we write into memory and there is a memory size limit we
-                # count the number of bytes in memory and raise an exception if
-                # there is too much data in memory.
-                if guard_memory:
-                    in_memory += len(ell)
-                    if in_memory > self.max_form_memory_size:
-                        self.in_memory_threshold_reached(in_memory)
-
-            elif ellt == _end:
-                if is_file:
-                    container.seek(0)
-                    yield (
-                        "file",
-                        (name, FileStorage(container, filename, name, headers=headers)),
-                    )
-                else:
-                    part_charset = self.get_part_charset(headers)
-                    yield (
-                        "form",
-                        (name, b"".join(container).decode(part_charset, self.errors)),
-                    )
-
-    def parse(self, file, boundary, content_length):
-        formstream, filestream = tee(
-            self.parse_parts(file, boundary, content_length), 2
-        )
-        form = (p[1] for p in formstream if p[0] == "form")
-        files = (p[1] for p in filestream if p[0] == "file")
-        return self.cls(form), self.cls(files)
-
-
-from . import exceptions
diff --git a/azure/functions/_thirdparty/werkzeug/http.py b/azure/functions/_thirdparty/werkzeug/http.py
deleted file mode 100644
index 3f40b308..00000000
--- a/azure/functions/_thirdparty/werkzeug/http.py
+++ /dev/null
@@ -1,1249 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.http
-    ~~~~~~~~~~~~~
-
-    Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
-    HTTP data.  Most of the classes and functions provided by this module are
-    used by the wrappers, but they are useful on their own, too, especially if
-    the response and request objects are not used.
-
-    This covers some of the more HTTP centric features of WSGI, some other
-    utilities such as cookie handling are documented in the `werkzeug.utils`
-    module.
-
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import base64
-import re
-import warnings
-from datetime import datetime
-from datetime import timedelta
-from hashlib import md5
-from time import gmtime
-from time import time
-
-from ._compat import integer_types
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import to_bytes
-from ._compat import to_unicode
-from ._compat import try_coerce_native
-from ._internal import _cookie_parse_impl
-from ._internal import _cookie_quote
-from ._internal import _make_cookie_domain
-
-try:
-    from email.utils import parsedate_tz
-except ImportError:
-    from email.Utils import parsedate_tz
-
-try:
-    from urllib.request import parse_http_list as _parse_list_header
-    from urllib.parse import unquote_to_bytes as _unquote
-except ImportError:
-    from urllib2 import parse_http_list as _parse_list_header
-    from urllib2 import unquote as _unquote
-
-_cookie_charset = "latin1"
-_basic_auth_charset = "utf-8"
-# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
-_accept_re = re.compile(
-    r"""
-    (                       # media-range capturing-parenthesis
-      [^\s;,]+              # type/subtype
-      (?:[ \t]*;[ \t]*      # ";"
-        (?:                 # parameter non-capturing-parenthesis
-          [^\s;,q][^\s;,]*  # token that doesn't start with "q"
-        |                   # or
-          q[^\s;,=][^\s;,]* # token that is more than just "q"
-        )
-      )*                    # zero or more parameters
-    )                       # end of media-range
-    (?:[ \t]*;[ \t]*q=      # weight is a "q" parameter
-      (\d*(?:\.\d+)?)       # qvalue capturing-parentheses
-      [^,]*                 # "extension" accept params: who cares?
-    )?                      # accept params are optional
-    """,
-    re.VERBOSE,
-)
-_token_chars = frozenset(
-    "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
-)
-_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
-_unsafe_header_chars = set('()<>@,;:"/[]?={} \t')
-_option_header_piece_re = re.compile(
-    r"""
-    ;\s*,?\s*  # newlines were replaced with commas
-    (?P<key>
-        "[^"\\]*(?:\\.[^"\\]*)*"  # quoted string
-    |
-        [^\s;,=*]+  # token
-    )
-    (?:\*(?P<count>\d+))?  # *1, optional continuation index
-    \s*
-    (?:  # optionally followed by =value
-        (?:  # equals sign, possibly with encoding
-            \*\s*=\s*  # * indicates extended notation
-            (?:  # optional encoding
-                (?P<encoding>[^\s]+?)
-                '(?P<language>[^\s]*?)'
-            )?
-        |
-            =\s*  # basic notation
-        )
-        (?P<value>
-            "[^"\\]*(?:\\.[^"\\]*)*"  # quoted string
-        |
-            [^;,]+  # token
-        )?
-    )?
-    \s*
-    """,
-    flags=re.VERBOSE,
-)
-_option_header_start_mime_type = re.compile(r",\s*([^;,\s]+)([;,]\s*.+)?")
-
-_entity_headers = frozenset(
-    [
-        "allow",
-        "content-encoding",
-        "content-language",
-        "content-length",
-        "content-location",
-        "content-md5",
-        "content-range",
-        "content-type",
-        "expires",
-        "last-modified",
-    ]
-)
-_hop_by_hop_headers = frozenset(
-    [
-        "connection",
-        "keep-alive",
-        "proxy-authenticate",
-        "proxy-authorization",
-        "te",
-        "trailer",
-        "transfer-encoding",
-        "upgrade",
-    ]
-)
-
-
-HTTP_STATUS_CODES = {
-    100: "Continue",
-    101: "Switching Protocols",
-    102: "Processing",
-    200: "OK",
-    201: "Created",
-    202: "Accepted",
-    203: "Non Authoritative Information",
-    204: "No Content",
-    205: "Reset Content",
-    206: "Partial Content",
-    207: "Multi Status",
-    226: "IM Used",  # see RFC 3229
-    300: "Multiple Choices",
-    301: "Moved Permanently",
-    302: "Found",
-    303: "See Other",
-    304: "Not Modified",
-    305: "Use Proxy",
-    307: "Temporary Redirect",
-    308: "Permanent Redirect",
-    400: "Bad Request",
-    401: "Unauthorized",
-    402: "Payment Required",  # unused
-    403: "Forbidden",
-    404: "Not Found",
-    405: "Method Not Allowed",
-    406: "Not Acceptable",
-    407: "Proxy Authentication Required",
-    408: "Request Timeout",
-    409: "Conflict",
-    410: "Gone",
-    411: "Length Required",
-    412: "Precondition Failed",
-    413: "Request Entity Too Large",
-    414: "Request URI Too Long",
-    415: "Unsupported Media Type",
-    416: "Requested Range Not Satisfiable",
-    417: "Expectation Failed",
-    418: "I'm a teapot",  # see RFC 2324
-    421: "Misdirected Request",  # see RFC 7540
-    422: "Unprocessable Entity",
-    423: "Locked",
-    424: "Failed Dependency",
-    426: "Upgrade Required",
-    428: "Precondition Required",  # see RFC 6585
-    429: "Too Many Requests",
-    431: "Request Header Fields Too Large",
-    449: "Retry With",  # proprietary MS extension
-    451: "Unavailable For Legal Reasons",
-    500: "Internal Server Error",
-    501: "Not Implemented",
-    502: "Bad Gateway",
-    503: "Service Unavailable",
-    504: "Gateway Timeout",
-    505: "HTTP Version Not Supported",
-    507: "Insufficient Storage",
-    510: "Not Extended",
-}
-
-
-def wsgi_to_bytes(data):
-    """coerce wsgi unicode represented bytes to real ones"""
-    if isinstance(data, bytes):
-        return data
-    return data.encode("latin1")  # XXX: utf8 fallback?
-
-
-def bytes_to_wsgi(data):
-    assert isinstance(data, bytes), "data must be bytes"
-    if isinstance(data, str):
-        return data
-    else:
-        return data.decode("latin1")
-
-
-def quote_header_value(value, extra_chars="", allow_token=True):
-    """Quote a header value if necessary.
-
-    .. versionadded:: 0.5
-
-    :param value: the value to quote.
-    :param extra_chars: a list of extra characters to skip quoting.
-    :param allow_token: if this is enabled token values are returned
-                        unchanged.
-    """
-    if isinstance(value, bytes):
-        value = bytes_to_wsgi(value)
-    value = str(value)
-    if allow_token:
-        token_chars = _token_chars | set(extra_chars)
-        if set(value).issubset(token_chars):
-            return value
-    return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"')
-
-
-def unquote_header_value(value, is_filename=False):
-    r"""Unquotes a header value.  (Reversal of :func:`quote_header_value`).
-    This does not use the real unquoting but what browsers are actually
-    using for quoting.
-
-    .. versionadded:: 0.5
-
-    :param value: the header value to unquote.
-    """
-    if value and value[0] == value[-1] == '"':
-        # this is not the real unquoting, but fixing this so that the
-        # RFC is met will result in bugs with internet explorer and
-        # probably some other browsers as well.  IE for example is
-        # uploading files with "C:\foo\bar.txt" as filename
-        value = value[1:-1]
-
-        # if this is a filename and the starting characters look like
-        # a UNC path, then just return the value without quotes.  Using the
-        # replace sequence below on a UNC path has the effect of turning
-        # the leading double slash into a single slash and then
-        # _fix_ie_filename() doesn't work correctly.  See #458.
-        if not is_filename or value[:2] != "\\\\":
-            return value.replace("\\\\", "\\").replace('\\"', '"')
-    return value
-
-
-def dump_options_header(header, options):
-    """The reverse function to :func:`parse_options_header`.
-
-    :param header: the header to dump
-    :param options: a dict of options to append.
-    """
-    segments = []
-    if header is not None:
-        segments.append(header)
-    for key, value in iteritems(options):
-        if value is None:
-            segments.append(key)
-        else:
-            segments.append("%s=%s" % (key, quote_header_value(value)))
-    return "; ".join(segments)
-
-
-def dump_header(iterable, allow_token=True):
-    """Dump an HTTP header again.  This is the reversal of
-    :func:`parse_list_header`, :func:`parse_set_header` and
-    :func:`parse_dict_header`.  This also quotes strings that include an
-    equals sign unless you pass it as dict of key, value pairs.
-
-    >>> dump_header({'foo': 'bar baz'})
-    'foo="bar baz"'
-    >>> dump_header(('foo', 'bar baz'))
-    'foo, "bar baz"'
-
-    :param iterable: the iterable or dict of values to quote.
-    :param allow_token: if set to `False` tokens as values are disallowed.
-                        See :func:`quote_header_value` for more details.
-    """
-    if isinstance(iterable, dict):
-        items = []
-        for key, value in iteritems(iterable):
-            if value is None:
-                items.append(key)
-            else:
-                items.append(
-                    "%s=%s" % (key, quote_header_value(value, allow_token=allow_token))
-                )
-    else:
-        items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
-    return ", ".join(items)
-
-
-def parse_list_header(value):
-    """Parse lists as described by RFC 2068 Section 2.
-
-    In particular, parse comma-separated lists where the elements of
-    the list may include quoted-strings.  A quoted-string could
-    contain a comma.  A non-quoted string could have quotes in the
-    middle.  Quotes are removed automatically after parsing.
-
-    It basically works like :func:`parse_set_header` just that items
-    may appear multiple times and case sensitivity is preserved.
-
-    The return value is a standard :class:`list`:
-
-    >>> parse_list_header('token, "quoted value"')
-    ['token', 'quoted value']
-
-    To create a header from the :class:`list` again, use the
-    :func:`dump_header` function.
-
-    :param value: a string with a list header.
-    :return: :class:`list`
-    """
-    result = []
-    for item in _parse_list_header(value):
-        if item[:1] == item[-1:] == '"':
-            item = unquote_header_value(item[1:-1])
-        result.append(item)
-    return result
-
-
-def parse_dict_header(value, cls=dict):
-    """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
-    convert them into a python dict (or any other mapping object created from
-    the type with a dict like interface provided by the `cls` argument):
-
-    >>> d = parse_dict_header('foo="is a fish", bar="as well"')
-    >>> type(d) is dict
-    True
-    >>> sorted(d.items())
-    [('bar', 'as well'), ('foo', 'is a fish')]
-
-    If there is no value for a key it will be `None`:
-
-    >>> parse_dict_header('key_without_value')
-    {'key_without_value': None}
-
-    To create a header from the :class:`dict` again, use the
-    :func:`dump_header` function.
-
-    .. versionchanged:: 0.9
-       Added support for `cls` argument.
-
-    :param value: a string with a dict header.
-    :param cls: callable to use for storage of parsed results.
-    :return: an instance of `cls`
-    """
-    result = cls()
-    if not isinstance(value, text_type):
-        # XXX: validate
-        value = bytes_to_wsgi(value)
-    for item in _parse_list_header(value):
-        if "=" not in item:
-            result[item] = None
-            continue
-        name, value = item.split("=", 1)
-        if value[:1] == value[-1:] == '"':
-            value = unquote_header_value(value[1:-1])
-        result[name] = value
-    return result
-
-
-def parse_options_header(value, multiple=False):
-    """Parse a ``Content-Type`` like header into a tuple with the content
-    type and the options:
-
-    >>> parse_options_header('text/html; charset=utf8')
-    ('text/html', {'charset': 'utf8'})
-
-    This should not be used to parse ``Cache-Control`` like headers that use
-    a slightly different format.  For these headers use the
-    :func:`parse_dict_header` function.
-
-    .. versionchanged:: 0.15
-        :rfc:`2231` parameter continuations are handled.
-
-    .. versionadded:: 0.5
-
-    :param value: the header to parse.
-    :param multiple: Whether try to parse and return multiple MIME types
-    :return: (mimetype, options) or (mimetype, options, mimetype, options, …)
-             if multiple=True
-    """
-    if not value:
-        return "", {}
-
-    result = []
-
-    value = "," + value.replace("\n", ",")
-    while value:
-        match = _option_header_start_mime_type.match(value)
-        if not match:
-            break
-        result.append(match.group(1))  # mimetype
-        options = {}
-        # Parse options
-        rest = match.group(2)
-        continued_encoding = None
-        while rest:
-            optmatch = _option_header_piece_re.match(rest)
-            if not optmatch:
-                break
-            option, count, encoding, language, option_value = optmatch.groups()
-            # Continuations don't have to supply the encoding after the
-            # first line. If we're in a continuation, track the current
-            # encoding to use for subsequent lines. Reset it when the
-            # continuation ends.
-            if not count:
-                continued_encoding = None
-            else:
-                if not encoding:
-                    encoding = continued_encoding
-                continued_encoding = encoding
-            option = unquote_header_value(option)
-            if option_value is not None:
-                option_value = unquote_header_value(option_value, option == "filename")
-                if encoding is not None:
-                    option_value = _unquote(option_value).decode(encoding)
-            if count:
-                # Continuations append to the existing value. For
-                # simplicity, this ignores the possibility of
-                # out-of-order indices, which shouldn't happen anyway.
-                options[option] = options.get(option, "") + option_value
-            else:
-                options[option] = option_value
-            rest = rest[optmatch.end() :]
-        result.append(options)
-        if multiple is False:
-            return tuple(result)
-        value = rest
-
-    return tuple(result) if result else ("", {})
-
-
-def parse_accept_header(value, cls=None):
-    """Parses an HTTP Accept-* header.  This does not implement a complete
-    valid algorithm but one that supports at least value and quality
-    extraction.
-
-    Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
-    tuples sorted by the quality with some additional accessor methods).
-
-    The second parameter can be a subclass of :class:`Accept` that is created
-    with the parsed values and returned.
-
-    :param value: the accept header string to be parsed.
-    :param cls: the wrapper class for the return value (can be
-                         :class:`Accept` or a subclass thereof)
-    :return: an instance of `cls`.
-    """
-    if cls is None:
-        cls = Accept
-
-    if not value:
-        return cls(None)
-
-    result = []
-    for match in _accept_re.finditer(value):
-        quality = match.group(2)
-        if not quality:
-            quality = 1
-        else:
-            quality = max(min(float(quality), 1), 0)
-        result.append((match.group(1), quality))
-    return cls(result)
-
-
-def parse_cache_control_header(value, on_update=None, cls=None):
-    """Parse a cache control header.  The RFC differs between response and
-    request cache control, this method does not.  It's your responsibility
-    to not use the wrong control statements.
-
-    .. versionadded:: 0.5
-       The `cls` was added.  If not specified an immutable
-       :class:`~werkzeug.datastructures.RequestCacheControl` is returned.
-
-    :param value: a cache control header to be parsed.
-    :param on_update: an optional callable that is called every time a value
-                      on the :class:`~werkzeug.datastructures.CacheControl`
-                      object is changed.
-    :param cls: the class for the returned object.  By default
-                :class:`~werkzeug.datastructures.RequestCacheControl` is used.
-    :return: a `cls` object.
-    """
-    if cls is None:
-        cls = RequestCacheControl
-    if not value:
-        return cls(None, on_update)
-    return cls(parse_dict_header(value), on_update)
-
-
-def parse_set_header(value, on_update=None):
-    """Parse a set-like header and return a
-    :class:`~werkzeug.datastructures.HeaderSet` object:
-
-    >>> hs = parse_set_header('token, "quoted value"')
-
-    The return value is an object that treats the items case-insensitively
-    and keeps the order of the items:
-
-    >>> 'TOKEN' in hs
-    True
-    >>> hs.index('quoted value')
-    1
-    >>> hs
-    HeaderSet(['token', 'quoted value'])
-
-    To create a header from the :class:`HeaderSet` again, use the
-    :func:`dump_header` function.
-
-    :param value: a set header to be parsed.
-    :param on_update: an optional callable that is called every time a
-                      value on the :class:`~werkzeug.datastructures.HeaderSet`
-                      object is changed.
-    :return: a :class:`~werkzeug.datastructures.HeaderSet`
-    """
-    if not value:
-        return HeaderSet(None, on_update)
-    return HeaderSet(parse_list_header(value), on_update)
-
-
-def parse_authorization_header(value):
-    """Parse an HTTP basic/digest authorization header transmitted by the web
-    browser.  The return value is either `None` if the header was invalid or
-    not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
-    object.
-
-    :param value: the authorization header to parse.
-    :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
-    """
-    if not value:
-        return
-    value = wsgi_to_bytes(value)
-    try:
-        auth_type, auth_info = value.split(None, 1)
-        auth_type = auth_type.lower()
-    except ValueError:
-        return
-    if auth_type == b"basic":
-        try:
-            username, password = base64.b64decode(auth_info).split(b":", 1)
-        except Exception:
-            return
-        return Authorization(
-            "basic",
-            {
-                "username": to_unicode(username, _basic_auth_charset),
-                "password": to_unicode(password, _basic_auth_charset),
-            },
-        )
-    elif auth_type == b"digest":
-        auth_map = parse_dict_header(auth_info)
-        for key in "username", "realm", "nonce", "uri", "response":
-            if key not in auth_map:
-                return
-        if "qop" in auth_map:
-            if not auth_map.get("nc") or not auth_map.get("cnonce"):
-                return
-        return Authorization("digest", auth_map)
-
-
-def parse_www_authenticate_header(value, on_update=None):
-    """Parse an HTTP WWW-Authenticate header into a
-    :class:`~werkzeug.datastructures.WWWAuthenticate` object.
-
-    :param value: a WWW-Authenticate header to parse.
-    :param on_update: an optional callable that is called every time a value
-                      on the :class:`~werkzeug.datastructures.WWWAuthenticate`
-                      object is changed.
-    :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
-    """
-    if not value:
-        return WWWAuthenticate(on_update=on_update)
-    try:
-        auth_type, auth_info = value.split(None, 1)
-        auth_type = auth_type.lower()
-    except (ValueError, AttributeError):
-        return WWWAuthenticate(value.strip().lower(), on_update=on_update)
-    return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)
-
-
-def parse_if_range_header(value):
-    """Parses an if-range header which can be an etag or a date.  Returns
-    a :class:`~werkzeug.datastructures.IfRange` object.
-
-    .. versionadded:: 0.7
-    """
-    if not value:
-        return IfRange()
-    date = parse_date(value)
-    if date is not None:
-        return IfRange(date=date)
-    # drop weakness information
-    return IfRange(unquote_etag(value)[0])
-
-
-def parse_range_header(value, make_inclusive=True):
-    """Parses a range header into a :class:`~werkzeug.datastructures.Range`
-    object.  If the header is missing or malformed `None` is returned.
-    `ranges` is a list of ``(start, stop)`` tuples where the ranges are
-    non-inclusive.
-
-    .. versionadded:: 0.7
-    """
-    if not value or "=" not in value:
-        return None
-
-    ranges = []
-    last_end = 0
-    units, rng = value.split("=", 1)
-    units = units.strip().lower()
-
-    for item in rng.split(","):
-        item = item.strip()
-        if "-" not in item:
-            return None
-        if item.startswith("-"):
-            if last_end < 0:
-                return None
-            try:
-                begin = int(item)
-            except ValueError:
-                return None
-            end = None
-            last_end = -1
-        elif "-" in item:
-            begin, end = item.split("-", 1)
-            begin = begin.strip()
-            end = end.strip()
-            if not begin.isdigit():
-                return None
-            begin = int(begin)
-            if begin < last_end or last_end < 0:
-                return None
-            if end:
-                if not end.isdigit():
-                    return None
-                end = int(end) + 1
-                if begin >= end:
-                    return None
-            else:
-                end = None
-            last_end = end
-        ranges.append((begin, end))
-
-    return Range(units, ranges)
-
-
-def parse_content_range_header(value, on_update=None):
-    """Parses a range header into a
-    :class:`~werkzeug.datastructures.ContentRange` object or `None` if
-    parsing is not possible.
-
-    .. versionadded:: 0.7
-
-    :param value: a content range header to be parsed.
-    :param on_update: an optional callable that is called every time a value
-                      on the :class:`~werkzeug.datastructures.ContentRange`
-                      object is changed.
-    """
-    if value is None:
-        return None
-    try:
-        units, rangedef = (value or "").strip().split(None, 1)
-    except ValueError:
-        return None
-
-    if "/" not in rangedef:
-        return None
-    rng, length = rangedef.split("/", 1)
-    if length == "*":
-        length = None
-    elif length.isdigit():
-        length = int(length)
-    else:
-        return None
-
-    if rng == "*":
-        return ContentRange(units, None, None, length, on_update=on_update)
-    elif "-" not in rng:
-        return None
-
-    start, stop = rng.split("-", 1)
-    try:
-        start = int(start)
-        stop = int(stop) + 1
-    except ValueError:
-        return None
-
-    if is_byte_range_valid(start, stop, length):
-        return ContentRange(units, start, stop, length, on_update=on_update)
-
-
-def quote_etag(etag, weak=False):
-    """Quote an etag.
-
-    :param etag: the etag to quote.
-    :param weak: set to `True` to tag it "weak".
-    """
-    if '"' in etag:
-        raise ValueError("invalid etag")
-    etag = '"%s"' % etag
-    if weak:
-        etag = "W/" + etag
-    return etag
-
-
-def unquote_etag(etag):
-    """Unquote a single etag:
-
-    >>> unquote_etag('W/"bar"')
-    ('bar', True)
-    >>> unquote_etag('"bar"')
-    ('bar', False)
-
-    :param etag: the etag identifier to unquote.
-    :return: a ``(etag, weak)`` tuple.
-    """
-    if not etag:
-        return None, None
-    etag = etag.strip()
-    weak = False
-    if etag.startswith(("W/", "w/")):
-        weak = True
-        etag = etag[2:]
-    if etag[:1] == etag[-1:] == '"':
-        etag = etag[1:-1]
-    return etag, weak
-
-
-def parse_etags(value):
-    """Parse an etag header.
-
-    :param value: the tag header to parse
-    :return: an :class:`~werkzeug.datastructures.ETags` object.
-    """
-    if not value:
-        return ETags()
-    strong = []
-    weak = []
-    end = len(value)
-    pos = 0
-    while pos < end:
-        match = _etag_re.match(value, pos)
-        if match is None:
-            break
-        is_weak, quoted, raw = match.groups()
-        if raw == "*":
-            return ETags(star_tag=True)
-        elif quoted:
-            raw = quoted
-        if is_weak:
-            weak.append(raw)
-        else:
-            strong.append(raw)
-        pos = match.end()
-    return ETags(strong, weak)
-
-
-def generate_etag(data):
-    """Generate an etag for some data."""
-    return md5(data).hexdigest()
-
-
-def parse_date(value):
-    """Parse one of the following date formats into a datetime object:
-
-    .. sourcecode:: text
-
-        Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123
-        Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
-        Sun Nov  6 08:49:37 1994       ; ANSI C's asctime() format
-
-    If parsing fails the return value is `None`.
-
-    :param value: a string with a supported date format.
-    :return: a :class:`datetime.datetime` object.
-    """
-    if value:
-        t = parsedate_tz(value.strip())
-        if t is not None:
-            try:
-                year = t[0]
-                # unfortunately that function does not tell us if two digit
-                # years were part of the string, or if they were prefixed
-                # with two zeroes.  So what we do is to assume that 69-99
-                # refer to 1900, and everything below to 2000
-                if year >= 0 and year <= 68:
-                    year += 2000
-                elif year >= 69 and year <= 99:
-                    year += 1900
-                return datetime(*((year,) + t[1:7])) - timedelta(seconds=t[-1] or 0)
-            except (ValueError, OverflowError):
-                return None
-
-
-def _dump_date(d, delim):
-    """Used for `http_date` and `cookie_date`."""
-    if d is None:
-        d = gmtime()
-    elif isinstance(d, datetime):
-        d = d.utctimetuple()
-    elif isinstance(d, (integer_types, float)):
-        d = gmtime(d)
-    return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % (
-        ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday],
-        d.tm_mday,
-        delim,
-        (
-            "Jan",
-            "Feb",
-            "Mar",
-            "Apr",
-            "May",
-            "Jun",
-            "Jul",
-            "Aug",
-            "Sep",
-            "Oct",
-            "Nov",
-            "Dec",
-        )[d.tm_mon - 1],
-        delim,
-        str(d.tm_year),
-        d.tm_hour,
-        d.tm_min,
-        d.tm_sec,
-    )
-
-
-def cookie_date(expires=None):
-    """Formats the time to ensure compatibility with Netscape's cookie
-    standard.
-
-    Accepts a floating point number expressed in seconds since the epoch in, a
-    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
-    function can be used to parse such a date.
-
-    Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
-
-    :param expires: If provided that date is used, otherwise the current.
-    """
-    return _dump_date(expires, "-")
-
-
-def http_date(timestamp=None):
-    """Formats the time to match the RFC1123 date format.
-
-    Accepts a floating point number expressed in seconds since the epoch in, a
-    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
-    function can be used to parse such a date.
-
-    Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
-
-    :param timestamp: If provided that date is used, otherwise the current.
-    """
-    return _dump_date(timestamp, " ")
-
-
-def parse_age(value=None):
-    """Parses a base-10 integer count of seconds into a timedelta.
-
-    If parsing fails, the return value is `None`.
-
-    :param value: a string consisting of an integer represented in base-10
-    :return: a :class:`datetime.timedelta` object or `None`.
-    """
-    if not value:
-        return None
-    try:
-        seconds = int(value)
-    except ValueError:
-        return None
-    if seconds < 0:
-        return None
-    try:
-        return timedelta(seconds=seconds)
-    except OverflowError:
-        return None
-
-
-def dump_age(age=None):
-    """Formats the duration as a base-10 integer.
-
-    :param age: should be an integer number of seconds,
-                a :class:`datetime.timedelta` object, or,
-                if the age is unknown, `None` (default).
-    """
-    if age is None:
-        return
-    if isinstance(age, timedelta):
-        # do the equivalent of Python 2.7's timedelta.total_seconds(),
-        # but disregarding fractional seconds
-        age = age.seconds + (age.days * 24 * 3600)
-
-    age = int(age)
-    if age < 0:
-        raise ValueError("age cannot be negative")
-
-    return str(age)
-
-
-def is_resource_modified(
-    environ, etag=None, data=None, last_modified=None, ignore_if_range=True
-):
-    """Convenience method for conditional requests.
-
-    :param environ: the WSGI environment of the request to be checked.
-    :param etag: the etag for the response for comparison.
-    :param data: or alternatively the data of the response to automatically
-                 generate an etag using :func:`generate_etag`.
-    :param last_modified: an optional date of the last modification.
-    :param ignore_if_range: If `False`, `If-Range` header will be taken into
-                            account.
-    :return: `True` if the resource was modified, otherwise `False`.
-    """
-    if etag is None and data is not None:
-        etag = generate_etag(data)
-    elif data is not None:
-        raise TypeError("both data and etag given")
-    if environ["REQUEST_METHOD"] not in ("GET", "HEAD"):
-        return False
-
-    unmodified = False
-    if isinstance(last_modified, string_types):
-        last_modified = parse_date(last_modified)
-
-    # ensure that microsecond is zero because the HTTP spec does not transmit
-    # that either and we might have some false positives.  See issue #39
-    if last_modified is not None:
-        last_modified = last_modified.replace(microsecond=0)
-
-    if_range = None
-    if not ignore_if_range and "HTTP_RANGE" in environ:
-        # https://tools.ietf.org/html/rfc7233#section-3.2
-        # A server MUST ignore an If-Range header field received in a request
-        # that does not contain a Range header field.
-        if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE"))
-
-    if if_range is not None and if_range.date is not None:
-        modified_since = if_range.date
-    else:
-        modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE"))
-
-    if modified_since and last_modified and last_modified <= modified_since:
-        unmodified = True
-
-    if etag:
-        etag, _ = unquote_etag(etag)
-        if if_range is not None and if_range.etag is not None:
-            unmodified = parse_etags(if_range.etag).contains(etag)
-        else:
-            if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH"))
-            if if_none_match:
-                # https://tools.ietf.org/html/rfc7232#section-3.2
-                # "A recipient MUST use the weak comparison function when comparing
-                # entity-tags for If-None-Match"
-                unmodified = if_none_match.contains_weak(etag)
-
-            # https://tools.ietf.org/html/rfc7232#section-3.1
-            # "Origin server MUST use the strong comparison function when
-            # comparing entity-tags for If-Match"
-            if_match = parse_etags(environ.get("HTTP_IF_MATCH"))
-            if if_match:
-                unmodified = not if_match.is_strong(etag)
-
-    return not unmodified
-
-
-def remove_entity_headers(headers, allowed=("expires", "content-location")):
-    """Remove all entity headers from a list or :class:`Headers` object.  This
-    operation works in-place.  `Expires` and `Content-Location` headers are
-    by default not removed.  The reason for this is :rfc:`2616` section
-    10.3.5 which specifies some entity headers that should be sent.
-
-    .. versionchanged:: 0.5
-       added `allowed` parameter.
-
-    :param headers: a list or :class:`Headers` object.
-    :param allowed: a list of headers that should still be allowed even though
-                    they are entity headers.
-    """
-    allowed = set(x.lower() for x in allowed)
-    headers[:] = [
-        (key, value)
-        for key, value in headers
-        if not is_entity_header(key) or key.lower() in allowed
-    ]
-
-
-def remove_hop_by_hop_headers(headers):
-    """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
-    :class:`Headers` object.  This operation works in-place.
-
-    .. versionadded:: 0.5
-
-    :param headers: a list or :class:`Headers` object.
-    """
-    headers[:] = [
-        (key, value) for key, value in headers if not is_hop_by_hop_header(key)
-    ]
-
-
-def is_entity_header(header):
-    """Check if a header is an entity header.
-
-    .. versionadded:: 0.5
-
-    :param header: the header to test.
-    :return: `True` if it's an entity header, `False` otherwise.
-    """
-    return header.lower() in _entity_headers
-
-
-def is_hop_by_hop_header(header):
-    """Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
-
-    .. versionadded:: 0.5
-
-    :param header: the header to test.
-    :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
-    """
-    return header.lower() in _hop_by_hop_headers
-
-
-def parse_cookie(header, charset="utf-8", errors="replace", cls=None):
-    """Parse a cookie.  Either from a string or WSGI environ.
-
-    Per default encoding errors are ignored.  If you want a different behavior
-    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
-    :exc:`HTTPUnicodeError` is raised.
-
-    .. versionchanged:: 0.5
-       This function now returns a :class:`TypeConversionDict` instead of a
-       regular dict.  The `cls` parameter was added.
-
-    :param header: the header to be used to parse the cookie.  Alternatively
-                   this can be a WSGI environment.
-    :param charset: the charset for the cookie values.
-    :param errors: the error behavior for the charset decoding.
-    :param cls: an optional dict class to use.  If this is not specified
-                       or `None` the default :class:`TypeConversionDict` is
-                       used.
-    """
-    if isinstance(header, dict):
-        header = header.get("HTTP_COOKIE", "")
-    elif header is None:
-        header = ""
-
-    # If the value is an unicode string it's mangled through latin1.  This
-    # is done because on PEP 3333 on Python 3 all headers are assumed latin1
-    # which however is incorrect for cookies, which are sent in page encoding.
-    # As a result we
-    if isinstance(header, text_type):
-        header = header.encode("latin1", "replace")
-
-    if cls is None:
-        cls = TypeConversionDict
-
-    def _parse_pairs():
-        for key, val in _cookie_parse_impl(header):
-            key = to_unicode(key, charset, errors, allow_none_charset=True)
-            if not key:
-                continue
-            val = to_unicode(val, charset, errors, allow_none_charset=True)
-            yield try_coerce_native(key), val
-
-    return cls(_parse_pairs())
-
-
-def dump_cookie(
-    key,
-    value="",
-    max_age=None,
-    expires=None,
-    path="/",
-    domain=None,
-    secure=False,
-    httponly=False,
-    charset="utf-8",
-    sync_expires=True,
-    max_size=4093,
-    samesite=None,
-):
-    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
-    The parameters are the same as in the cookie Morsel object in the
-    Python standard library but it accepts unicode data, too.
-
-    On Python 3 the return value of this function will be a unicode
-    string, on Python 2 it will be a native string.  In both cases the
-    return value is usually restricted to ascii as the vast majority of
-    values are properly escaped, but that is no guarantee.  If a unicode
-    string is returned it's tunneled through latin1 as required by
-    PEP 3333.
-
-    The return value is not ASCII safe if the key contains unicode
-    characters.  This is technically against the specification but
-    happens in the wild.  It's strongly recommended to not use
-    non-ASCII values for the keys.
-
-    :param max_age: should be a number of seconds, or `None` (default) if
-                    the cookie should last only as long as the client's
-                    browser session.  Additionally `timedelta` objects
-                    are accepted, too.
-    :param expires: should be a `datetime` object or unix timestamp.
-    :param path: limits the cookie to a given path, per default it will
-                 span the whole domain.
-    :param domain: Use this if you want to set a cross-domain cookie. For
-                   example, ``domain=".example.com"`` will set a cookie
-                   that is readable by the domain ``www.example.com``,
-                   ``foo.example.com`` etc. Otherwise, a cookie will only
-                   be readable by the domain that set it.
-    :param secure: The cookie will only be available via HTTPS
-    :param httponly: disallow JavaScript to access the cookie.  This is an
-                     extension to the cookie standard and probably not
-                     supported by all browsers.
-    :param charset: the encoding for unicode values.
-    :param sync_expires: automatically set expires if max_age is defined
-                         but expires not.
-    :param max_size: Warn if the final header value exceeds this size. The
-        default, 4093, should be safely `supported by most browsers
-        <cookie_>`_. Set to 0 to disable this check.
-    :param samesite: Limits the scope of the cookie such that it will only
-                     be attached to requests if those requests are "same-site".
-
-    .. _`cookie`: http://browsercookielimits.squawky.net/
-    """
-    key = to_bytes(key, charset)
-    value = to_bytes(value, charset)
-
-    if path is not None:
-        path = iri_to_uri(path, charset)
-    domain = _make_cookie_domain(domain)
-    if isinstance(max_age, timedelta):
-        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
-    if expires is not None:
-        if not isinstance(expires, string_types):
-            expires = cookie_date(expires)
-    elif max_age is not None and sync_expires:
-        expires = to_bytes(cookie_date(time() + max_age))
-
-    samesite = samesite.title() if samesite else None
-    if samesite not in ("Strict", "Lax", None):
-        raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None")
-
-    buf = [key + b"=" + _cookie_quote(value)]
-
-    # XXX: In theory all of these parameters that are not marked with `None`
-    # should be quoted.  Because stdlib did not quote it before I did not
-    # want to introduce quoting there now.
-    for k, v, q in (
-        (b"Domain", domain, True),
-        (b"Expires", expires, False),
-        (b"Max-Age", max_age, False),
-        (b"Secure", secure, None),
-        (b"HttpOnly", httponly, None),
-        (b"Path", path, False),
-        (b"SameSite", samesite, False),
-    ):
-        if q is None:
-            if v:
-                buf.append(k)
-            continue
-
-        if v is None:
-            continue
-
-        tmp = bytearray(k)
-        if not isinstance(v, (bytes, bytearray)):
-            v = to_bytes(text_type(v), charset)
-        if q:
-            v = _cookie_quote(v)
-        tmp += b"=" + v
-        buf.append(bytes(tmp))
-
-    # The return value will be an incorrectly encoded latin1 header on
-    # Python 3 for consistency with the headers object and a bytestring
-    # on Python 2 because that's how the API makes more sense.
-    rv = b"; ".join(buf)
-    if not PY2:
-        rv = rv.decode("latin1")
-
-    # Warn if the final value of the cookie is less than the limit. If the
-    # cookie is too large, then it may be silently ignored, which can be quite
-    # hard to debug.
-    cookie_size = len(rv)
-
-    if max_size and cookie_size > max_size:
-        value_size = len(value)
-        warnings.warn(
-            'The "{key}" cookie is too large: the value was {value_size} bytes'
-            " but the header required {extra_size} extra bytes. The final size"
-            " was {cookie_size} bytes but the limit is {max_size} bytes."
-            " Browsers may silently ignore cookies larger than this.".format(
-                key=key,
-                value_size=value_size,
-                extra_size=cookie_size - value_size,
-                cookie_size=cookie_size,
-                max_size=max_size,
-            ),
-            stacklevel=2,
-        )
-
-    return rv
-
-
-def is_byte_range_valid(start, stop, length):
-    """Checks if a given byte content range is valid for the given length.
-
-    .. versionadded:: 0.7
-    """
-    if (start is None) != (stop is None):
-        return False
-    elif start is None:
-        return length is None or length >= 0
-    elif length is None:
-        return 0 <= start < stop
-    elif start >= stop:
-        return False
-    return 0 <= start < length
-
-
-# circular dependency fun
-from .datastructures import Accept
-from .datastructures import Authorization
-from .datastructures import ContentRange
-from .datastructures import ETags
-from .datastructures import HeaderSet
-from .datastructures import IfRange
-from .datastructures import Range
-from .datastructures import RequestCacheControl
-from .datastructures import TypeConversionDict
-from .datastructures import WWWAuthenticate
-from .urls import iri_to_uri
diff --git a/azure/functions/_thirdparty/werkzeug/urls.py b/azure/functions/_thirdparty/werkzeug/urls.py
deleted file mode 100644
index 38e9e5ad..00000000
--- a/azure/functions/_thirdparty/werkzeug/urls.py
+++ /dev/null
@@ -1,1134 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.urls
-    ~~~~~~~~~~~~~
-
-    ``werkzeug.urls`` used to provide several wrapper functions for Python 2
-    urlparse, whose main purpose were to work around the behavior of the Py2
-    stdlib and its lack of unicode support. While this was already a somewhat
-    inconvenient situation, it got even more complicated because Python 3's
-    ``urllib.parse`` actually does handle unicode properly. In other words,
-    this module would wrap two libraries with completely different behavior. So
-    now this module contains a 2-and-3-compatible backport of Python 3's
-    ``urllib.parse``, which is mostly API-compatible.
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import codecs
-import os
-import re
-from collections import namedtuple
-
-from ._compat import fix_tuple_repr
-from ._compat import implements_to_string
-from ._compat import make_literal_wrapper
-from ._compat import normalize_string_tuple
-from ._compat import PY2
-from ._compat import text_type
-from ._compat import to_native
-from ._compat import to_unicode
-from ._compat import try_coerce_native
-from ._internal import _decode_idna
-from ._internal import _encode_idna
-from .datastructures import iter_multi_items
-from .datastructures import MultiDict
-
-# A regular expression for what a valid schema looks like
-_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$")
-
-# Characters that are safe in any part of an URL.
-_always_safe = frozenset(
-    bytearray(
-        b"abcdefghijklmnopqrstuvwxyz"
-        b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-        b"0123456789"
-        b"-._~"
-    )
-)
-
-_hexdigits = "0123456789ABCDEFabcdef"
-_hextobyte = dict(
-    ((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits
-)
-_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)]
-
-
-_URLTuple = fix_tuple_repr(
-    namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"])
-)
-
-
-class BaseURL(_URLTuple):
-    """Superclass of :py:class:`URL` and :py:class:`BytesURL`."""
-
-    __slots__ = ()
-
-    def replace(self, **kwargs):
-        """Return an URL with the same values, except for those parameters
-        given new values by whichever keyword arguments are specified."""
-        return self._replace(**kwargs)
-
-    @property
-    def host(self):
-        """The host part of the URL if available, otherwise `None`.  The
-        host is either the hostname or the IP address mentioned in the
-        URL.  It will not contain the port.
-        """
-        return self._split_host()[0]
-
-    @property
-    def ascii_host(self):
-        """Works exactly like :attr:`host` but will return a result that
-        is restricted to ASCII.  If it finds a netloc that is not ASCII
-        it will attempt to idna decode it.  This is useful for socket
-        operations when the URL might include internationalized characters.
-        """
-        rv = self.host
-        if rv is not None and isinstance(rv, text_type):
-            try:
-                rv = _encode_idna(rv)
-            except UnicodeError:
-                rv = rv.encode("ascii", "ignore")
-        return to_native(rv, "ascii", "ignore")
-
-    @property
-    def port(self):
-        """The port in the URL as an integer if it was present, `None`
-        otherwise.  This does not fill in default ports.
-        """
-        try:
-            rv = int(to_native(self._split_host()[1]))
-            if 0 <= rv <= 65535:
-                return rv
-        except (ValueError, TypeError):
-            pass
-
-    @property
-    def auth(self):
-        """The authentication part in the URL if available, `None`
-        otherwise.
-        """
-        return self._split_netloc()[0]
-
-    @property
-    def username(self):
-        """The username if it was part of the URL, `None` otherwise.
-        This undergoes URL decoding and will always be a unicode string.
-        """
-        rv = self._split_auth()[0]
-        if rv is not None:
-            return _url_unquote_legacy(rv)
-
-    @property
-    def raw_username(self):
-        """The username if it was part of the URL, `None` otherwise.
-        Unlike :attr:`username` this one is not being decoded.
-        """
-        return self._split_auth()[0]
-
-    @property
-    def password(self):
-        """The password if it was part of the URL, `None` otherwise.
-        This undergoes URL decoding and will always be a unicode string.
-        """
-        rv = self._split_auth()[1]
-        if rv is not None:
-            return _url_unquote_legacy(rv)
-
-    @property
-    def raw_password(self):
-        """The password if it was part of the URL, `None` otherwise.
-        Unlike :attr:`password` this one is not being decoded.
-        """
-        return self._split_auth()[1]
-
-    def decode_query(self, *args, **kwargs):
-        """Decodes the query part of the URL.  Ths is a shortcut for
-        calling :func:`url_decode` on the query argument.  The arguments and
-        keyword arguments are forwarded to :func:`url_decode` unchanged.
-        """
-        return url_decode(self.query, *args, **kwargs)
-
-    def join(self, *args, **kwargs):
-        """Joins this URL with another one.  This is just a convenience
-        function for calling into :meth:`url_join` and then parsing the
-        return value again.
-        """
-        return url_parse(url_join(self, *args, **kwargs))
-
-    def to_url(self):
-        """Returns a URL string or bytes depending on the type of the
-        information stored.  This is just a convenience function
-        for calling :meth:`url_unparse` for this URL.
-        """
-        return url_unparse(self)
-
-    def decode_netloc(self):
-        """Decodes the netloc part into a string."""
-        rv = _decode_idna(self.host or "")
-
-        if ":" in rv:
-            rv = "[%s]" % rv
-        port = self.port
-        if port is not None:
-            rv = "%s:%d" % (rv, port)
-        auth = ":".join(
-            filter(
-                None,
-                [
-                    _url_unquote_legacy(self.raw_username or "", "/:%@"),
-                    _url_unquote_legacy(self.raw_password or "", "/:%@"),
-                ],
-            )
-        )
-        if auth:
-            rv = "%s@%s" % (auth, rv)
-        return rv
-
-    def to_uri_tuple(self):
-        """Returns a :class:`BytesURL` tuple that holds a URI.  This will
-        encode all the information in the URL properly to ASCII using the
-        rules a web browser would follow.
-
-        It's usually more interesting to directly call :meth:`iri_to_uri` which
-        will return a string.
-        """
-        return url_parse(iri_to_uri(self).encode("ascii"))
-
-    def to_iri_tuple(self):
-        """Returns a :class:`URL` tuple that holds a IRI.  This will try
-        to decode as much information as possible in the URL without
-        losing information similar to how a web browser does it for the
-        URL bar.
-
-        It's usually more interesting to directly call :meth:`uri_to_iri` which
-        will return a string.
-        """
-        return url_parse(uri_to_iri(self))
-
-    def get_file_location(self, pathformat=None):
-        """Returns a tuple with the location of the file in the form
-        ``(server, location)``.  If the netloc is empty in the URL or
-        points to localhost, it's represented as ``None``.
-
-        The `pathformat` by default is autodetection but needs to be set
-        when working with URLs of a specific system.  The supported values
-        are ``'windows'`` when working with Windows or DOS paths and
-        ``'posix'`` when working with posix paths.
-
-        If the URL does not point to a local file, the server and location
-        are both represented as ``None``.
-
-        :param pathformat: The expected format of the path component.
-                           Currently ``'windows'`` and ``'posix'`` are
-                           supported.  Defaults to ``None`` which is
-                           autodetect.
-        """
-        if self.scheme != "file":
-            return None, None
-
-        path = url_unquote(self.path)
-        host = self.netloc or None
-
-        if pathformat is None:
-            if os.name == "nt":
-                pathformat = "windows"
-            else:
-                pathformat = "posix"
-
-        if pathformat == "windows":
-            if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
-                path = path[1:2] + ":" + path[3:]
-            windows_share = path[:3] in ("\\" * 3, "/" * 3)
-            import ntpath
-
-            path = ntpath.normpath(path)
-            # Windows shared drives are represented as ``\\host\\directory``.
-            # That results in a URL like ``file://///host/directory``, and a
-            # path like ``///host/directory``. We need to special-case this
-            # because the path contains the hostname.
-            if windows_share and host is None:
-                parts = path.lstrip("\\").split("\\", 1)
-                if len(parts) == 2:
-                    host, path = parts
-                else:
-                    host = parts[0]
-                    path = ""
-        elif pathformat == "posix":
-            import posixpath
-
-            path = posixpath.normpath(path)
-        else:
-            raise TypeError("Invalid path format %s" % repr(pathformat))
-
-        if host in ("127.0.0.1", "::1", "localhost"):
-            host = None
-
-        return host, path
-
-    def _split_netloc(self):
-        if self._at in self.netloc:
-            return self.netloc.split(self._at, 1)
-        return None, self.netloc
-
-    def _split_auth(self):
-        auth = self._split_netloc()[0]
-        if not auth:
-            return None, None
-        if self._colon not in auth:
-            return auth, None
-        return auth.split(self._colon, 1)
-
-    def _split_host(self):
-        rv = self._split_netloc()[1]
-        if not rv:
-            return None, None
-
-        if not rv.startswith(self._lbracket):
-            if self._colon in rv:
-                return rv.split(self._colon, 1)
-            return rv, None
-
-        idx = rv.find(self._rbracket)
-        if idx < 0:
-            return rv, None
-
-        host = rv[1:idx]
-        rest = rv[idx + 1 :]
-        if rest.startswith(self._colon):
-            return host, rest[1:]
-        return host, None
-
-
-@implements_to_string
-class URL(BaseURL):
-    """Represents a parsed URL.  This behaves like a regular tuple but
-    also has some extra attributes that give further insight into the
-    URL.
-    """
-
-    __slots__ = ()
-    _at = "@"
-    _colon = ":"
-    _lbracket = "["
-    _rbracket = "]"
-
-    def __str__(self):
-        return self.to_url()
-
-    def encode_netloc(self):
-        """Encodes the netloc part to an ASCII safe URL as bytes."""
-        rv = self.ascii_host or ""
-        if ":" in rv:
-            rv = "[%s]" % rv
-        port = self.port
-        if port is not None:
-            rv = "%s:%d" % (rv, port)
-        auth = ":".join(
-            filter(
-                None,
-                [
-                    url_quote(self.raw_username or "", "utf-8", "strict", "/:%"),
-                    url_quote(self.raw_password or "", "utf-8", "strict", "/:%"),
-                ],
-            )
-        )
-        if auth:
-            rv = "%s@%s" % (auth, rv)
-        return to_native(rv)
-
-    def encode(self, charset="utf-8", errors="replace"):
-        """Encodes the URL to a tuple made out of bytes.  The charset is
-        only being used for the path, query and fragment.
-        """
-        return BytesURL(
-            self.scheme.encode("ascii"),
-            self.encode_netloc(),
-            self.path.encode(charset, errors),
-            self.query.encode(charset, errors),
-            self.fragment.encode(charset, errors),
-        )
-
-
-class BytesURL(BaseURL):
-    """Represents a parsed URL in bytes."""
-
-    __slots__ = ()
-    _at = b"@"
-    _colon = b":"
-    _lbracket = b"["
-    _rbracket = b"]"
-
-    def __str__(self):
-        return self.to_url().decode("utf-8", "replace")
-
-    def encode_netloc(self):
-        """Returns the netloc unchanged as bytes."""
-        return self.netloc
-
-    def decode(self, charset="utf-8", errors="replace"):
-        """Decodes the URL to a tuple made out of strings.  The charset is
-        only being used for the path, query and fragment.
-        """
-        return URL(
-            self.scheme.decode("ascii"),
-            self.decode_netloc(),
-            self.path.decode(charset, errors),
-            self.query.decode(charset, errors),
-            self.fragment.decode(charset, errors),
-        )
-
-
-_unquote_maps = {frozenset(): _hextobyte}
-
-
-def _unquote_to_bytes(string, unsafe=""):
-    if isinstance(string, text_type):
-        string = string.encode("utf-8")
-
-    if isinstance(unsafe, text_type):
-        unsafe = unsafe.encode("utf-8")
-
-    unsafe = frozenset(bytearray(unsafe))
-    groups = iter(string.split(b"%"))
-    result = bytearray(next(groups, b""))
-
-    try:
-        hex_to_byte = _unquote_maps[unsafe]
-    except KeyError:
-        hex_to_byte = _unquote_maps[unsafe] = {
-            h: b for h, b in _hextobyte.items() if b not in unsafe
-        }
-
-    for group in groups:
-        code = group[:2]
-
-        if code in hex_to_byte:
-            result.append(hex_to_byte[code])
-            result.extend(group[2:])
-        else:
-            result.append(37)  # %
-            result.extend(group)
-
-    return bytes(result)
-
-
-def _url_encode_impl(obj, charset, encode_keys, sort, key):
-    iterable = iter_multi_items(obj)
-    if sort:
-        iterable = sorted(iterable, key=key)
-    for key, value in iterable:
-        if value is None:
-            continue
-        if not isinstance(key, bytes):
-            key = text_type(key).encode(charset)
-        if not isinstance(value, bytes):
-            value = text_type(value).encode(charset)
-        yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value)
-
-
-def _url_unquote_legacy(value, unsafe=""):
-    try:
-        return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe)
-    except UnicodeError:
-        return url_unquote(value, charset="latin1", unsafe=unsafe)
-
-
-def url_parse(url, scheme=None, allow_fragments=True):
-    """Parses a URL from a string into a :class:`URL` tuple.  If the URL
-    is lacking a scheme it can be provided as second argument. Otherwise,
-    it is ignored.  Optionally fragments can be stripped from the URL
-    by setting `allow_fragments` to `False`.
-
-    The inverse of this function is :func:`url_unparse`.
-
-    :param url: the URL to parse.
-    :param scheme: the default schema to use if the URL is schemaless.
-    :param allow_fragments: if set to `False` a fragment will be removed
-                            from the URL.
-    """
-    s = make_literal_wrapper(url)
-    is_text_based = isinstance(url, text_type)
-
-    if scheme is None:
-        scheme = s("")
-    netloc = query = fragment = s("")
-    i = url.find(s(":"))
-    if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
-        # make sure "iri" is not actually a port number (in which case
-        # "scheme" is really part of the path)
-        rest = url[i + 1 :]
-        if not rest or any(c not in s("0123456789") for c in rest):
-            # not a port number
-            scheme, url = url[:i].lower(), rest
-
-    if url[:2] == s("//"):
-        delim = len(url)
-        for c in s("/?#"):
-            wdelim = url.find(c, 2)
-            if wdelim >= 0:
-                delim = min(delim, wdelim)
-        netloc, url = url[2:delim], url[delim:]
-        if (s("[") in netloc and s("]") not in netloc) or (
-            s("]") in netloc and s("[") not in netloc
-        ):
-            raise ValueError("Invalid IPv6 URL")
-
-    if allow_fragments and s("#") in url:
-        url, fragment = url.split(s("#"), 1)
-    if s("?") in url:
-        url, query = url.split(s("?"), 1)
-
-    result_type = URL if is_text_based else BytesURL
-    return result_type(scheme, netloc, url, query, fragment)
-
-
-def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
-    """Precompile the translation table for a URL encoding function.
-
-    Unlike :func:`url_quote`, the generated function only takes the
-    string to quote.
-
-    :param charset: The charset to encode the result with.
-    :param errors: How to handle encoding errors.
-    :param safe: An optional sequence of safe characters to never encode.
-    :param unsafe: An optional sequence of unsafe characters to always encode.
-    """
-    if isinstance(safe, text_type):
-        safe = safe.encode(charset, errors)
-
-    if isinstance(unsafe, text_type):
-        unsafe = unsafe.encode(charset, errors)
-
-    safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
-    table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
-
-    if not PY2:
-
-        def quote(string):
-            return "".join([table[c] for c in string])
-
-    else:
-
-        def quote(string):
-            return "".join([table[c] for c in bytearray(string)])
-
-    return quote
-
-
-_fast_url_quote = _make_fast_url_quote()
-_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+")
-
-
-def _fast_url_quote_plus(string):
-    return _fast_quote_plus(string).replace(" ", "+")
-
-
-def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
-    """URL encode a single string with a given encoding.
-
-    :param s: the string to quote.
-    :param charset: the charset to be used.
-    :param safe: an optional sequence of safe characters.
-    :param unsafe: an optional sequence of unsafe characters.
-
-    .. versionadded:: 0.9.2
-       The `unsafe` parameter was added.
-    """
-    if not isinstance(string, (text_type, bytes, bytearray)):
-        string = text_type(string)
-    if isinstance(string, text_type):
-        string = string.encode(charset, errors)
-    if isinstance(safe, text_type):
-        safe = safe.encode(charset, errors)
-    if isinstance(unsafe, text_type):
-        unsafe = unsafe.encode(charset, errors)
-    safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
-    rv = bytearray()
-    for char in bytearray(string):
-        if char in safe:
-            rv.append(char)
-        else:
-            rv.extend(_bytetohex[char])
-    return to_native(bytes(rv))
-
-
-def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
-    """URL encode a single string with the given encoding and convert
-    whitespace to "+".
-
-    :param s: The string to quote.
-    :param charset: The charset to be used.
-    :param safe: An optional sequence of safe characters.
-    """
-    return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
-
-
-def url_unparse(components):
-    """The reverse operation to :meth:`url_parse`.  This accepts arbitrary
-    as well as :class:`URL` tuples and returns a URL as a string.
-
-    :param components: the parsed URL as tuple which should be converted
-                       into a URL string.
-    """
-    scheme, netloc, path, query, fragment = normalize_string_tuple(components)
-    s = make_literal_wrapper(scheme)
-    url = s("")
-
-    # We generally treat file:///x and file:/x the same which is also
-    # what browsers seem to do.  This also allows us to ignore a schema
-    # register for netloc utilization or having to differenciate between
-    # empty and missing netloc.
-    if netloc or (scheme and path.startswith(s("/"))):
-        if path and path[:1] != s("/"):
-            path = s("/") + path
-        url = s("//") + (netloc or s("")) + path
-    elif path:
-        url += path
-    if scheme:
-        url = scheme + s(":") + url
-    if query:
-        url = url + s("?") + query
-    if fragment:
-        url = url + s("#") + fragment
-    return url
-
-
-def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
-    """URL decode a single string with a given encoding.  If the charset
-    is set to `None` no unicode decoding is performed and raw bytes
-    are returned.
-
-    :param s: the string to unquote.
-    :param charset: the charset of the query string.  If set to `None`
-                    no unicode decoding will take place.
-    :param errors: the error handling for the charset decoding.
-    """
-    rv = _unquote_to_bytes(string, unsafe)
-    if charset is not None:
-        rv = rv.decode(charset, errors)
-    return rv
-
-
-def url_unquote_plus(s, charset="utf-8", errors="replace"):
-    """URL decode a single string with the given `charset` and decode "+" to
-    whitespace.
-
-    Per default encoding errors are ignored.  If you want a different behavior
-    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
-    :exc:`HTTPUnicodeError` is raised.
-
-    :param s: The string to unquote.
-    :param charset: the charset of the query string.  If set to `None`
-                    no unicode decoding will take place.
-    :param errors: The error handling for the `charset` decoding.
-    """
-    if isinstance(s, text_type):
-        s = s.replace(u"+", u" ")
-    else:
-        s = s.replace(b"+", b" ")
-    return url_unquote(s, charset, errors)
-
-
-def url_fix(s, charset="utf-8"):
-    r"""Sometimes you get an URL by a user that just isn't a real URL because
-    it contains unsafe characters like ' ' and so on. This function can fix
-    some of the problems in a similar way browsers handle data entered by the
-    user:
-
-    >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
-    'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
-
-    :param s: the string with the URL to fix.
-    :param charset: The target charset for the URL if the url was given as
-                    unicode string.
-    """
-    # First step is to switch to unicode processing and to convert
-    # backslashes (which are invalid in URLs anyways) to slashes.  This is
-    # consistent with what Chrome does.
-    s = to_unicode(s, charset, "replace").replace("\\", "/")
-
-    # For the specific case that we look like a malformed windows URL
-    # we want to fix this up manually:
-    if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
-        s = "file:///" + s[7:]
-
-    url = url_parse(s)
-    path = url_quote(url.path, charset, safe="/%+$!*'(),")
-    qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
-    anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
-    return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
-
-
-# not-unreserved characters remain quoted when unquoting to IRI
-_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe])
-
-
-def _codec_error_url_quote(e):
-    """Used in :func:`uri_to_iri` after unquoting to re-quote any
-    invalid bytes.
-    """
-    out = _fast_url_quote(e.object[e.start : e.end])
-
-    if PY2:
-        out = out.decode("utf-8")
-
-    return out, e.end
-
-
-codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
-
-
-def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
-    """Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
-    leaving all reserved and invalid characters quoted. If the URL has
-    a domain, it is decoded from Punycode.
-
-    >>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
-    'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
-
-    :param uri: The URI to convert.
-    :param charset: The encoding to encode unquoted bytes with.
-    :param errors: Error handler to use during ``bytes.encode``. By
-        default, invalid bytes are left quoted.
-
-    .. versionchanged:: 0.15
-        All reserved and invalid characters remain quoted. Previously,
-        only some reserved characters were preserved, and invalid bytes
-        were replaced instead of left quoted.
-
-    .. versionadded:: 0.6
-    """
-    if isinstance(uri, tuple):
-        uri = url_unparse(uri)
-
-    uri = url_parse(to_unicode(uri, charset))
-    path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
-    query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
-    fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
-    return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
-
-
-# reserved characters remain unquoted when quoting to URI
-_to_uri_safe = ":/?#[]@!$&'()*+,;=%"
-
-
-def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
-    """Convert an IRI to a URI. All non-ASCII and unsafe characters are
-    quoted. If the URL has a domain, it is encoded to Punycode.
-
-    >>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
-    'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
-
-    :param iri: The IRI to convert.
-    :param charset: The encoding of the IRI.
-    :param errors: Error handler to use during ``bytes.encode``.
-    :param safe_conversion: Return the URL unchanged if it only contains
-        ASCII characters and no whitespace. See the explanation below.
-
-    There is a general problem with IRI conversion with some protocols
-    that are in violation of the URI specification. Consider the
-    following two IRIs::
-
-        magnet:?xt=uri:whatever
-        itms-services://?action=download-manifest
-
-    After parsing, we don't know if the scheme requires the ``//``,
-    which is dropped if empty, but conveys different meanings in the
-    final URL if it's present or not. In this case, you can use
-    ``safe_conversion``, which will return the URL unchanged if it only
-    contains ASCII characters and no whitespace. This can result in a
-    URI with unquoted characters if it was not already quoted correctly,
-    but preserves the URL's semantics. Werkzeug uses this for the
-    ``Location`` header for redirects.
-
-    .. versionchanged:: 0.15
-        All reserved characters remain unquoted. Previously, only some
-        reserved characters were left unquoted.
-
-    .. versionchanged:: 0.9.6
-       The ``safe_conversion`` parameter was added.
-
-    .. versionadded:: 0.6
-    """
-    if isinstance(iri, tuple):
-        iri = url_unparse(iri)
-
-    if safe_conversion:
-        # If we're not sure if it's safe to convert the URL, and it only
-        # contains ASCII characters, return it unconverted.
-        try:
-            native_iri = to_native(iri)
-            ascii_iri = native_iri.encode("ascii")
-
-            # Only return if it doesn't have whitespace. (Why?)
-            if len(ascii_iri.split()) == 1:
-                return native_iri
-        except UnicodeError:
-            pass
-
-    iri = url_parse(to_unicode(iri, charset, errors))
-    path = url_quote(iri.path, charset, errors, _to_uri_safe)
-    query = url_quote(iri.query, charset, errors, _to_uri_safe)
-    fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
-    return to_native(
-        url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
-    )
-
-
-def url_decode(
-    s,
-    charset="utf-8",
-    decode_keys=False,
-    include_empty=True,
-    errors="replace",
-    separator="&",
-    cls=None,
-):
-    """
-    Parse a querystring and return it as :class:`MultiDict`.  There is a
-    difference in key decoding on different Python versions.  On Python 3
-    keys will always be fully decoded whereas on Python 2, keys will
-    remain bytestrings if they fit into ASCII.  On 2.x keys can be forced
-    to be unicode by setting `decode_keys` to `True`.
-
-    If the charset is set to `None` no unicode decoding will happen and
-    raw bytes will be returned.
-
-    Per default a missing value for a key will default to an empty key.  If
-    you don't want that behavior you can set `include_empty` to `False`.
-
-    Per default encoding errors are ignored.  If you want a different behavior
-    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
-    `HTTPUnicodeError` is raised.
-
-    .. versionchanged:: 0.5
-       In previous versions ";" and "&" could be used for url decoding.
-       This changed in 0.5 where only "&" is supported.  If you want to
-       use ";" instead a different `separator` can be provided.
-
-       The `cls` parameter was added.
-
-    :param s: a string with the query string to decode.
-    :param charset: the charset of the query string.  If set to `None`
-                    no unicode decoding will take place.
-    :param decode_keys: Used on Python 2.x to control whether keys should
-                        be forced to be unicode objects.  If set to `True`
-                        then keys will be unicode in all cases. Otherwise,
-                        they remain `str` if they fit into ASCII.
-    :param include_empty: Set to `False` if you don't want empty values to
-                          appear in the dict.
-    :param errors: the decoding error behavior.
-    :param separator: the pair separator to be used, defaults to ``&``
-    :param cls: an optional dict class to use.  If this is not specified
-                       or `None` the default :class:`MultiDict` is used.
-    """
-    if cls is None:
-        cls = MultiDict
-    if isinstance(s, text_type) and not isinstance(separator, text_type):
-        separator = separator.decode(charset or "ascii")
-    elif isinstance(s, bytes) and not isinstance(separator, bytes):
-        separator = separator.encode(charset or "ascii")
-    return cls(
-        _url_decode_impl(
-            s.split(separator), charset, decode_keys, include_empty, errors
-        )
-    )
-
-
-def url_decode_stream(
-    stream,
-    charset="utf-8",
-    decode_keys=False,
-    include_empty=True,
-    errors="replace",
-    separator="&",
-    cls=None,
-    limit=None,
-    return_iterator=False,
-):
-    """Works like :func:`url_decode` but decodes a stream.  The behavior
-    of stream and limit follows functions like
-    :func:`~werkzeug.wsgi.make_line_iter`.  The generator of pairs is
-    directly fed to the `cls` so you can consume the data while it's
-    parsed.
-
-    .. versionadded:: 0.8
-
-    :param stream: a stream with the encoded querystring
-    :param charset: the charset of the query string.  If set to `None`
-                    no unicode decoding will take place.
-    :param decode_keys: Used on Python 2.x to control whether keys should
-                        be forced to be unicode objects.  If set to `True`,
-                        keys will be unicode in all cases. Otherwise, they
-                        remain `str` if they fit into ASCII.
-    :param include_empty: Set to `False` if you don't want empty values to
-                          appear in the dict.
-    :param errors: the decoding error behavior.
-    :param separator: the pair separator to be used, defaults to ``&``
-    :param cls: an optional dict class to use.  If this is not specified
-                       or `None` the default :class:`MultiDict` is used.
-    :param limit: the content length of the URL data.  Not necessary if
-                  a limited stream is provided.
-    :param return_iterator: if set to `True` the `cls` argument is ignored
-                            and an iterator over all decoded pairs is
-                            returned
-    """
-    from .wsgi import make_chunk_iter
-
-    pair_iter = make_chunk_iter(stream, separator, limit)
-    decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
-
-    if return_iterator:
-        return decoder
-
-    if cls is None:
-        cls = MultiDict
-
-    return cls(decoder)
-
-
-def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
-    for pair in pair_iter:
-        if not pair:
-            continue
-        s = make_literal_wrapper(pair)
-        equal = s("=")
-        if equal in pair:
-            key, value = pair.split(equal, 1)
-        else:
-            if not include_empty:
-                continue
-            key = pair
-            value = s("")
-        key = url_unquote_plus(key, charset, errors)
-        if charset is not None and PY2 and not decode_keys:
-            key = try_coerce_native(key)
-        yield key, url_unquote_plus(value, charset, errors)
-
-
-def url_encode(
-    obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
-):
-    """URL encode a dict/`MultiDict`.  If a value is `None` it will not appear
-    in the result string.  Per default only values are encoded into the target
-    charset strings.  If `encode_keys` is set to ``True`` unicode keys are
-    supported too.
-
-    If `sort` is set to `True` the items are sorted by `key` or the default
-    sorting algorithm.
-
-    .. versionadded:: 0.5
-        `sort`, `key`, and `separator` were added.
-
-    :param obj: the object to encode into a query string.
-    :param charset: the charset of the query string.
-    :param encode_keys: set to `True` if you have unicode keys. (Ignored on
-                        Python 3.x)
-    :param sort: set to `True` if you want parameters to be sorted by `key`.
-    :param separator: the separator to be used for the pairs.
-    :param key: an optional function to be used for sorting.  For more details
-                check out the :func:`sorted` documentation.
-    """
-    separator = to_native(separator, "ascii")
-    return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
-
-
-def url_encode_stream(
-    obj,
-    stream=None,
-    charset="utf-8",
-    encode_keys=False,
-    sort=False,
-    key=None,
-    separator=b"&",
-):
-    """Like :meth:`url_encode` but writes the results to a stream
-    object.  If the stream is `None` a generator over all encoded
-    pairs is returned.
-
-    .. versionadded:: 0.8
-
-    :param obj: the object to encode into a query string.
-    :param stream: a stream to write the encoded object into or `None` if
-                   an iterator over the encoded pairs should be returned.  In
-                   that case the separator argument is ignored.
-    :param charset: the charset of the query string.
-    :param encode_keys: set to `True` if you have unicode keys. (Ignored on
-                        Python 3.x)
-    :param sort: set to `True` if you want parameters to be sorted by `key`.
-    :param separator: the separator to be used for the pairs.
-    :param key: an optional function to be used for sorting.  For more details
-                check out the :func:`sorted` documentation.
-    """
-    separator = to_native(separator, "ascii")
-    gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
-    if stream is None:
-        return gen
-    for idx, chunk in enumerate(gen):
-        if idx:
-            stream.write(separator)
-        stream.write(chunk)
-
-
-def url_join(base, url, allow_fragments=True):
-    """Join a base URL and a possibly relative URL to form an absolute
-    interpretation of the latter.
-
-    :param base: the base URL for the join operation.
-    :param url: the URL to join.
-    :param allow_fragments: indicates whether fragments should be allowed.
-    """
-    if isinstance(base, tuple):
-        base = url_unparse(base)
-    if isinstance(url, tuple):
-        url = url_unparse(url)
-
-    base, url = normalize_string_tuple((base, url))
-    s = make_literal_wrapper(base)
-
-    if not base:
-        return url
-    if not url:
-        return base
-
-    bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
-        base, allow_fragments=allow_fragments
-    )
-    scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
-    if scheme != bscheme:
-        return url
-    if netloc:
-        return url_unparse((scheme, netloc, path, query, fragment))
-    netloc = bnetloc
-
-    if path[:1] == s("/"):
-        segments = path.split(s("/"))
-    elif not path:
-        segments = bpath.split(s("/"))
-        if not query:
-            query = bquery
-    else:
-        segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
-
-    # If the rightmost part is "./" we want to keep the slash but
-    # remove the dot.
-    if segments[-1] == s("."):
-        segments[-1] = s("")
-
-    # Resolve ".." and "."
-    segments = [segment for segment in segments if segment != s(".")]
-    while 1:
-        i = 1
-        n = len(segments) - 1
-        while i < n:
-            if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
-                del segments[i - 1 : i + 1]
-                break
-            i += 1
-        else:
-            break
-
-    # Remove trailing ".." if the URL is absolute
-    unwanted_marker = [s(""), s("..")]
-    while segments[:2] == unwanted_marker:
-        del segments[1]
-
-    path = s("/").join(segments)
-    return url_unparse((scheme, netloc, path, query, fragment))
-
-
-class Href(object):
-    """Implements a callable that constructs URLs with the given base. The
-    function can be called with any number of positional and keyword
-    arguments which than are used to assemble the URL.  Works with URLs
-    and posix paths.
-
-    Positional arguments are appended as individual segments to
-    the path of the URL:
-
-    >>> href = Href('/foo')
-    >>> href('bar', 23)
-    '/foo/bar/23'
-    >>> href('foo', bar=23)
-    '/foo/foo?bar=23'
-
-    If any of the arguments (positional or keyword) evaluates to `None` it
-    will be skipped.  If no keyword arguments are given the last argument
-    can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
-    otherwise the keyword arguments are used for the query parameters, cutting
-    off the first trailing underscore of the parameter name:
-
-    >>> href(is_=42)
-    '/foo?is=42'
-    >>> href({'foo': 'bar'})
-    '/foo?foo=bar'
-
-    Combining of both methods is not allowed:
-
-    >>> href({'foo': 'bar'}, bar=42)
-    Traceback (most recent call last):
-      ...
-    TypeError: keyword arguments and query-dicts can't be combined
-
-    Accessing attributes on the href object creates a new href object with
-    the attribute name as prefix:
-
-    >>> bar_href = href.bar
-    >>> bar_href("blub")
-    '/foo/bar/blub'
-
-    If `sort` is set to `True` the items are sorted by `key` or the default
-    sorting algorithm:
-
-    >>> href = Href("/", sort=True)
-    >>> href(a=1, b=2, c=3)
-    '/?a=1&b=2&c=3'
-
-    .. versionadded:: 0.5
-        `sort` and `key` were added.
-    """
-
-    def __init__(self, base="./", charset="utf-8", sort=False, key=None):
-        if not base:
-            base = "./"
-        self.base = base
-        self.charset = charset
-        self.sort = sort
-        self.key = key
-
-    def __getattr__(self, name):
-        if name[:2] == "__":
-            raise AttributeError(name)
-        base = self.base
-        if base[-1:] != "/":
-            base += "/"
-        return Href(url_join(base, name), self.charset, self.sort, self.key)
-
-    def __call__(self, *path, **query):
-        if path and isinstance(path[-1], dict):
-            if query:
-                raise TypeError("keyword arguments and query-dicts can't be combined")
-            query, path = path[-1], path[:-1]
-        elif query:
-            query = dict(
-                [(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]
-            )
-        path = "/".join(
-            [
-                to_unicode(url_quote(x, self.charset), "ascii")
-                for x in path
-                if x is not None
-            ]
-        ).lstrip("/")
-        rv = self.base
-        if path:
-            if not rv.endswith("/"):
-                rv += "/"
-            rv = url_join(rv, "./" + path)
-        if query:
-            rv += "?" + to_unicode(
-                url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
-            )
-        return to_native(rv)
diff --git a/azure/functions/_thirdparty/werkzeug/utils.py b/azure/functions/_thirdparty/werkzeug/utils.py
deleted file mode 100644
index 2504380e..00000000
--- a/azure/functions/_thirdparty/werkzeug/utils.py
+++ /dev/null
@@ -1,748 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.utils
-    ~~~~~~~~~~~~~~
-
-    This module implements various utilities for WSGI applications.  Most of
-    them are used by the request and response wrappers but especially for
-    middleware development it makes sense to use them without the wrappers.
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import codecs
-import os
-import pkgutil
-import re
-import sys
-
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import reraise
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import unichr
-from ._internal import _DictAccessorProperty
-from ._internal import _missing
-from ._internal import _parse_signature
-
-try:
-    from html.entities import name2codepoint
-except ImportError:
-    from htmlentitydefs import name2codepoint
-
-
-_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2))
-_entity_re = re.compile(r"&([^;]+);")
-_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
-_windows_device_files = (
-    "CON",
-    "AUX",
-    "COM1",
-    "COM2",
-    "COM3",
-    "COM4",
-    "LPT1",
-    "LPT2",
-    "LPT3",
-    "PRN",
-    "NUL",
-)
-
-
-class cached_property(property):
-    """A decorator that converts a function into a lazy property.  The
-    function wrapped is called the first time to retrieve the result
-    and then that calculated result is used the next time you access
-    the value::
-
-        class Foo(object):
-
-            @cached_property
-            def foo(self):
-                # calculate something important here
-                return 42
-
-    The class has to have a `__dict__` in order for this property to
-    work.
-    """
-
-    # implementation detail: A subclass of python's builtin property
-    # decorator, we override __get__ to check for a cached value. If one
-    # chooses to invoke __get__ by hand the property will still work as
-    # expected because the lookup logic is replicated in __get__ for
-    # manual invocation.
-
-    def __init__(self, func, name=None, doc=None):
-        self.__name__ = name or func.__name__
-        self.__module__ = func.__module__
-        self.__doc__ = doc or func.__doc__
-        self.func = func
-
-    def __set__(self, obj, value):
-        obj.__dict__[self.__name__] = value
-
-    def __get__(self, obj, type=None):
-        if obj is None:
-            return self
-        value = obj.__dict__.get(self.__name__, _missing)
-        if value is _missing:
-            value = self.func(obj)
-            obj.__dict__[self.__name__] = value
-        return value
-
-
-class environ_property(_DictAccessorProperty):
-    """Maps request attributes to environment variables. This works not only
-    for the Werzeug request object, but also any other class with an
-    environ attribute:
-
-    >>> class Test(object):
-    ...     environ = {'key': 'value'}
-    ...     test = environ_property('key')
-    >>> var = Test()
-    >>> var.test
-    'value'
-
-    If you pass it a second value it's used as default if the key does not
-    exist, the third one can be a converter that takes a value and converts
-    it.  If it raises :exc:`ValueError` or :exc:`TypeError` the default value
-    is used. If no default value is provided `None` is used.
-
-    Per default the property is read only.  You have to explicitly enable it
-    by passing ``read_only=False`` to the constructor.
-    """
-
-    read_only = True
-
-    def lookup(self, obj):
-        return obj.environ
-
-
-class header_property(_DictAccessorProperty):
-    """Like `environ_property` but for headers."""
-
-    def lookup(self, obj):
-        return obj.headers
-
-
-class HTMLBuilder(object):
-    """Helper object for HTML generation.
-
-    Per default there are two instances of that class.  The `html` one, and
-    the `xhtml` one for those two dialects.  The class uses keyword parameters
-    and positional parameters to generate small snippets of HTML.
-
-    Keyword parameters are converted to XML/SGML attributes, positional
-    arguments are used as children.  Because Python accepts positional
-    arguments before keyword arguments it's a good idea to use a list with the
-    star-syntax for some children:
-
-    >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
-    ...                        html.a('bar', href='bar.html')])
-    u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
-
-    This class works around some browser limitations and can not be used for
-    arbitrary SGML/XML generation.  For that purpose lxml and similar
-    libraries exist.
-
-    Calling the builder escapes the string passed:
-
-    >>> html.p(html("<foo>"))
-    u'<p>&lt;foo&gt;</p>'
-    """
-
-    _entity_re = re.compile(r"&([^;]+);")
-    _entities = name2codepoint.copy()
-    _entities["apos"] = 39
-    _empty_elements = {
-        "area",
-        "base",
-        "basefont",
-        "br",
-        "col",
-        "command",
-        "embed",
-        "frame",
-        "hr",
-        "img",
-        "input",
-        "keygen",
-        "isindex",
-        "link",
-        "meta",
-        "param",
-        "source",
-        "wbr",
-    }
-    _boolean_attributes = {
-        "selected",
-        "checked",
-        "compact",
-        "declare",
-        "defer",
-        "disabled",
-        "ismap",
-        "multiple",
-        "nohref",
-        "noresize",
-        "noshade",
-        "nowrap",
-    }
-    _plaintext_elements = {"textarea"}
-    _c_like_cdata = {"script", "style"}
-
-    def __init__(self, dialect):
-        self._dialect = dialect
-
-    def __call__(self, s):
-        return escape(s)
-
-    def __getattr__(self, tag):
-        if tag[:2] == "__":
-            raise AttributeError(tag)
-
-        def proxy(*children, **arguments):
-            buffer = "<" + tag
-            for key, value in iteritems(arguments):
-                if value is None:
-                    continue
-                if key[-1] == "_":
-                    key = key[:-1]
-                if key in self._boolean_attributes:
-                    if not value:
-                        continue
-                    if self._dialect == "xhtml":
-                        value = '="' + key + '"'
-                    else:
-                        value = ""
-                else:
-                    value = '="' + escape(value) + '"'
-                buffer += " " + key + value
-            if not children and tag in self._empty_elements:
-                if self._dialect == "xhtml":
-                    buffer += " />"
-                else:
-                    buffer += ">"
-                return buffer
-            buffer += ">"
-
-            children_as_string = "".join(
-                [text_type(x) for x in children if x is not None]
-            )
-
-            if children_as_string:
-                if tag in self._plaintext_elements:
-                    children_as_string = escape(children_as_string)
-                elif tag in self._c_like_cdata and self._dialect == "xhtml":
-                    children_as_string = (
-                        "/*<![CDATA[*/" + children_as_string + "/*]]>*/"
-                    )
-            buffer += children_as_string + "</" + tag + ">"
-            return buffer
-
-        return proxy
-
-    def __repr__(self):
-        return "<%s for %r>" % (self.__class__.__name__, self._dialect)
-
-
-html = HTMLBuilder("html")
-xhtml = HTMLBuilder("xhtml")
-
-# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
-# https://www.iana.org/assignments/media-types/media-types.xhtml
-# Types listed in the XDG mime info that have a charset in the IANA registration.
-_charset_mimetypes = {
-    "application/ecmascript",
-    "application/javascript",
-    "application/sql",
-    "application/xml",
-    "application/xml-dtd",
-    "application/xml-external-parsed-entity",
-}
-
-
-def get_content_type(mimetype, charset):
-    """Returns the full content type string with charset for a mimetype.
-
-    If the mimetype represents text, the charset parameter will be
-    appended, otherwise the mimetype is returned unchanged.
-
-    :param mimetype: The mimetype to be used as content type.
-    :param charset: The charset to be appended for text mimetypes.
-    :return: The content type.
-
-    .. verionchanged:: 0.15
-        Any type that ends with ``+xml`` gets a charset, not just those
-        that start with ``application/``. Known text types such as
-        ``application/javascript`` are also given charsets.
-    """
-    if (
-        mimetype.startswith("text/")
-        or mimetype in _charset_mimetypes
-        or mimetype.endswith("+xml")
-    ):
-        mimetype += "; charset=" + charset
-
-    return mimetype
-
-
-def detect_utf_encoding(data):
-    """Detect which UTF encoding was used to encode the given bytes.
-
-    The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
-    accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
-    or little endian. Some editors or libraries may prepend a BOM.
-
-    :internal:
-
-    :param data: Bytes in unknown UTF encoding.
-    :return: UTF encoding name
-
-    .. versionadded:: 0.15
-    """
-    head = data[:4]
-
-    if head[:3] == codecs.BOM_UTF8:
-        return "utf-8-sig"
-
-    if b"\x00" not in head:
-        return "utf-8"
-
-    if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
-        return "utf-32"
-
-    if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
-        return "utf-16"
-
-    if len(head) == 4:
-        if head[:3] == b"\x00\x00\x00":
-            return "utf-32-be"
-
-        if head[::2] == b"\x00\x00":
-            return "utf-16-be"
-
-        if head[1:] == b"\x00\x00\x00":
-            return "utf-32-le"
-
-        if head[1::2] == b"\x00\x00":
-            return "utf-16-le"
-
-    if len(head) == 2:
-        return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
-
-    return "utf-8"
-
-
-def format_string(string, context):
-    """String-template format a string:
-
-    >>> format_string('$foo and ${foo}s', dict(foo=42))
-    '42 and 42s'
-
-    This does not do any attribute lookup etc.  For more advanced string
-    formattings have a look at the `werkzeug.template` module.
-
-    :param string: the format string.
-    :param context: a dict with the variables to insert.
-    """
-
-    def lookup_arg(match):
-        x = context[match.group(1) or match.group(2)]
-        if not isinstance(x, string_types):
-            x = type(string)(x)
-        return x
-
-    return _format_re.sub(lookup_arg, string)
-
-
-def secure_filename(filename):
-    r"""Pass it a filename and it will return a secure version of it.  This
-    filename can then safely be stored on a regular file system and passed
-    to :func:`os.path.join`.  The filename returned is an ASCII only string
-    for maximum portability.
-
-    On windows systems the function also makes sure that the file is not
-    named after one of the special device files.
-
-    >>> secure_filename("My cool movie.mov")
-    'My_cool_movie.mov'
-    >>> secure_filename("../../../etc/passwd")
-    'etc_passwd'
-    >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
-    'i_contain_cool_umlauts.txt'
-
-    The function might return an empty filename.  It's your responsibility
-    to ensure that the filename is unique and that you abort or
-    generate a random filename if the function returned an empty one.
-
-    .. versionadded:: 0.5
-
-    :param filename: the filename to secure
-    """
-    if isinstance(filename, text_type):
-        from unicodedata import normalize
-
-        filename = normalize("NFKD", filename).encode("ascii", "ignore")
-        if not PY2:
-            filename = filename.decode("ascii")
-    for sep in os.path.sep, os.path.altsep:
-        if sep:
-            filename = filename.replace(sep, " ")
-    filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
-        "._"
-    )
-
-    # on nt a couple of special files are present in each folder.  We
-    # have to ensure that the target file is not such a filename.  In
-    # this case we prepend an underline
-    if (
-        os.name == "nt"
-        and filename
-        and filename.split(".")[0].upper() in _windows_device_files
-    ):
-        filename = "_" + filename
-
-    return filename
-
-
-def escape(s):
-    """Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
-
-    There is a special handling for `None` which escapes to an empty string.
-
-    .. versionchanged:: 0.9
-       `quote` is now implicitly on.
-
-    :param s: the string to escape.
-    :param quote: ignored.
-    """
-    if s is None:
-        return ""
-    elif hasattr(s, "__html__"):
-        return text_type(s.__html__())
-
-    if not isinstance(s, string_types):
-        s = text_type(s)
-
-    return (
-        s.replace("&", "&amp;")
-        .replace("<", "&lt;")
-        .replace(">", "&gt;")
-        .replace('"', "&quot;")
-    )
-
-
-def unescape(s):
-    """The reverse function of `escape`.  This unescapes all the HTML
-    entities, not only the XML entities inserted by `escape`.
-
-    :param s: the string to unescape.
-    """
-
-    def handle_match(m):
-        name = m.group(1)
-        if name in HTMLBuilder._entities:
-            return unichr(HTMLBuilder._entities[name])
-        try:
-            if name[:2] in ("#x", "#X"):
-                return unichr(int(name[2:], 16))
-            elif name.startswith("#"):
-                return unichr(int(name[1:]))
-        except ValueError:
-            pass
-        return u""
-
-    return _entity_re.sub(handle_match, s)
-
-
-def redirect(location, code=302, Response=None):
-    """Returns a response object (a WSGI application) that, if called,
-    redirects the client to the target location. Supported codes are
-    301, 302, 303, 305, 307, and 308. 300 is not supported because
-    it's not a real redirect and 304 because it's the answer for a
-    request with a request with defined If-Modified-Since headers.
-
-    .. versionadded:: 0.6
-       The location can now be a unicode string that is encoded using
-       the :func:`iri_to_uri` function.
-
-    .. versionadded:: 0.10
-        The class used for the Response object can now be passed in.
-
-    :param location: the location the response should redirect to.
-    :param code: the redirect status code. defaults to 302.
-    :param class Response: a Response class to use when instantiating a
-        response. The default is :class:`werkzeug.wrappers.Response` if
-        unspecified.
-    """
-    if Response is None:
-        from .wrappers import Response
-
-    display_location = escape(location)
-    if isinstance(location, text_type):
-        # Safe conversion is necessary here as we might redirect
-        # to a broken URI scheme (for instance itms-services).
-        from .urls import iri_to_uri
-
-        location = iri_to_uri(location, safe_conversion=True)
-    response = Response(
-        '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
-        "<title>Redirecting...</title>\n"
-        "<h1>Redirecting...</h1>\n"
-        "<p>You should be redirected automatically to target URL: "
-        '<a href="%s">%s</a>.  If not click the link.'
-        % (escape(location), display_location),
-        code,
-        mimetype="text/html",
-    )
-    response.headers["Location"] = location
-    return response
-
-
-def append_slash_redirect(environ, code=301):
-    """Redirects to the same URL but with a slash appended.  The behavior
-    of this function is undefined if the path ends with a slash already.
-
-    :param environ: the WSGI environment for the request that triggers
-                    the redirect.
-    :param code: the status code for the redirect.
-    """
-    new_path = environ["PATH_INFO"].strip("/") + "/"
-    query_string = environ.get("QUERY_STRING")
-    if query_string:
-        new_path += "?" + query_string
-    return redirect(new_path, code)
-
-
-def import_string(import_name, silent=False):
-    """Imports an object based on a string.  This is useful if you want to
-    use import paths as endpoints or something similar.  An import path can
-    be specified either in dotted notation (``xml.sax.saxutils.escape``)
-    or with a colon as object delimiter (``xml.sax.saxutils:escape``).
-
-    If `silent` is True the return value will be `None` if the import fails.
-
-    :param import_name: the dotted name for the object to import.
-    :param silent: if set to `True` import errors are ignored and
-                   `None` is returned instead.
-    :return: imported object
-    """
-    # force the import name to automatically convert to strings
-    # __import__ is not able to handle unicode strings in the fromlist
-    # if the module is a package
-    import_name = str(import_name).replace(":", ".")
-    try:
-        try:
-            __import__(import_name)
-        except ImportError:
-            if "." not in import_name:
-                raise
-        else:
-            return sys.modules[import_name]
-
-        module_name, obj_name = import_name.rsplit(".", 1)
-        module = __import__(module_name, globals(), locals(), [obj_name])
-        try:
-            return getattr(module, obj_name)
-        except AttributeError as e:
-            raise ImportError(e)
-
-    except ImportError as e:
-        if not silent:
-            reraise(
-                ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]
-            )
-
-
-def find_modules(import_path, include_packages=False, recursive=False):
-    """Finds all the modules below a package.  This can be useful to
-    automatically import all views / controllers so that their metaclasses /
-    function decorators have a chance to register themselves on the
-    application.
-
-    Packages are not returned unless `include_packages` is `True`.  This can
-    also recursively list modules but in that case it will import all the
-    packages to get the correct load path of that module.
-
-    :param import_path: the dotted name for the package to find child modules.
-    :param include_packages: set to `True` if packages should be returned, too.
-    :param recursive: set to `True` if recursion should happen.
-    :return: generator
-    """
-    module = import_string(import_path)
-    path = getattr(module, "__path__", None)
-    if path is None:
-        raise ValueError("%r is not a package" % import_path)
-    basename = module.__name__ + "."
-    for _importer, modname, ispkg in pkgutil.iter_modules(path):
-        modname = basename + modname
-        if ispkg:
-            if include_packages:
-                yield modname
-            if recursive:
-                for item in find_modules(modname, include_packages, True):
-                    yield item
-        else:
-            yield modname
-
-
-def validate_arguments(func, args, kwargs, drop_extra=True):
-    """Checks if the function accepts the arguments and keyword arguments.
-    Returns a new ``(args, kwargs)`` tuple that can safely be passed to
-    the function without causing a `TypeError` because the function signature
-    is incompatible.  If `drop_extra` is set to `True` (which is the default)
-    any extra positional or keyword arguments are dropped automatically.
-
-    The exception raised provides three attributes:
-
-    `missing`
-        A set of argument names that the function expected but where
-        missing.
-
-    `extra`
-        A dict of keyword arguments that the function can not handle but
-        where provided.
-
-    `extra_positional`
-        A list of values that where given by positional argument but the
-        function cannot accept.
-
-    This can be useful for decorators that forward user submitted data to
-    a view function::
-
-        from werkzeug.utils import ArgumentValidationError, validate_arguments
-
-        def sanitize(f):
-            def proxy(request):
-                data = request.values.to_dict()
-                try:
-                    args, kwargs = validate_arguments(f, (request,), data)
-                except ArgumentValidationError:
-                    raise BadRequest('The browser failed to transmit all '
-                                     'the data expected.')
-                return f(*args, **kwargs)
-            return proxy
-
-    :param func: the function the validation is performed against.
-    :param args: a tuple of positional arguments.
-    :param kwargs: a dict of keyword arguments.
-    :param drop_extra: set to `False` if you don't want extra arguments
-                       to be silently dropped.
-    :return: tuple in the form ``(args, kwargs)``.
-    """
-    parser = _parse_signature(func)
-    args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
-    if missing:
-        raise ArgumentValidationError(tuple(missing))
-    elif (extra or extra_positional) and not drop_extra:
-        raise ArgumentValidationError(None, extra, extra_positional)
-    return tuple(args), kwargs
-
-
-def bind_arguments(func, args, kwargs):
-    """Bind the arguments provided into a dict.  When passed a function,
-    a tuple of arguments and a dict of keyword arguments `bind_arguments`
-    returns a dict of names as the function would see it.  This can be useful
-    to implement a cache decorator that uses the function arguments to build
-    the cache key based on the values of the arguments.
-
-    :param func: the function the arguments should be bound for.
-    :param args: tuple of positional arguments.
-    :param kwargs: a dict of keyword arguments.
-    :return: a :class:`dict` of bound keyword arguments.
-    """
-    (
-        args,
-        kwargs,
-        missing,
-        extra,
-        extra_positional,
-        arg_spec,
-        vararg_var,
-        kwarg_var,
-    ) = _parse_signature(func)(args, kwargs)
-    values = {}
-    for (name, _has_default, _default), value in zip(arg_spec, args):
-        values[name] = value
-    if vararg_var is not None:
-        values[vararg_var] = tuple(extra_positional)
-    elif extra_positional:
-        raise TypeError("too many positional arguments")
-    if kwarg_var is not None:
-        multikw = set(extra) & set([x[0] for x in arg_spec])
-        if multikw:
-            raise TypeError(
-                "got multiple values for keyword argument " + repr(next(iter(multikw)))
-            )
-        values[kwarg_var] = extra
-    elif extra:
-        raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
-    return values
-
-
-class ArgumentValidationError(ValueError):
-
-    """Raised if :func:`validate_arguments` fails to validate"""
-
-    def __init__(self, missing=None, extra=None, extra_positional=None):
-        self.missing = set(missing or ())
-        self.extra = extra or {}
-        self.extra_positional = extra_positional or []
-        ValueError.__init__(
-            self,
-            "function arguments invalid. (%d missing, %d additional)"
-            % (len(self.missing), len(self.extra) + len(self.extra_positional)),
-        )
-
-
-class ImportStringError(ImportError):
-    """Provides information about a failed :func:`import_string` attempt."""
-
-    #: String in dotted notation that failed to be imported.
-    import_name = None
-    #: Wrapped exception.
-    exception = None
-
-    def __init__(self, import_name, exception):
-        self.import_name = import_name
-        self.exception = exception
-
-        msg = (
-            "import_string() failed for %r. Possible reasons are:\n\n"
-            "- missing __init__.py in a package;\n"
-            "- package or module path not included in sys.path;\n"
-            "- duplicated package or module name taking precedence in "
-            "sys.path;\n"
-            "- missing module, class, function or variable;\n\n"
-            "Debugged import:\n\n%s\n\n"
-            "Original exception:\n\n%s: %s"
-        )
-
-        name = ""
-        tracked = []
-        for part in import_name.replace(":", ".").split("."):
-            name += (name and ".") + part
-            imported = import_string(name, silent=True)
-            if imported:
-                tracked.append((name, getattr(imported, "__file__", None)))
-            else:
-                track = ["- %r found in %r." % (n, i) for n, i in tracked]
-                track.append("- %r not found." % name)
-                msg = msg % (
-                    import_name,
-                    "\n".join(track),
-                    exception.__class__.__name__,
-                    str(exception),
-                )
-                break
-
-        ImportError.__init__(self, msg)
-
-    def __repr__(self):
-        return "<%s(%r, %r)>" % (
-            self.__class__.__name__,
-            self.import_name,
-            self.exception,
-        )
diff --git a/azure/functions/_thirdparty/werkzeug/wsgi.py b/azure/functions/_thirdparty/werkzeug/wsgi.py
deleted file mode 100644
index 807b462a..00000000
--- a/azure/functions/_thirdparty/werkzeug/wsgi.py
+++ /dev/null
@@ -1,1000 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    werkzeug.wsgi
-    ~~~~~~~~~~~~~
-
-    This module implements WSGI related helpers.
-
-    :copyright: 2007 Pallets
-    :license: BSD-3-Clause
-"""
-import io
-import re
-from functools import partial
-from functools import update_wrapper
-from itertools import chain
-
-from ._compat import BytesIO
-from ._compat import implements_iterator
-from ._compat import make_literal_wrapper
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import to_bytes
-from ._compat import to_unicode
-from ._compat import try_coerce_native
-from ._compat import wsgi_get_bytes
-from ._internal import _encode_idna
-from .urls import uri_to_iri
-from .urls import url_join
-from .urls import url_parse
-from .urls import url_quote
-
-
-def responder(f):
-    """Marks a function as responder.  Decorate a function with it and it
-    will automatically call the return value as WSGI application.
-
-    Example::
-
-        @responder
-        def application(environ, start_response):
-            return Response('Hello World!')
-    """
-    return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
-
-
-def get_current_url(
-    environ,
-    root_only=False,
-    strip_querystring=False,
-    host_only=False,
-    trusted_hosts=None,
-):
-    """A handy helper function that recreates the full URL as IRI for the
-    current request or parts of it.  Here's an example:
-
-    >>> from werkzeug.test import create_environ
-    >>> env = create_environ("/?param=foo", "http://localhost/script")
-    >>> get_current_url(env)
-    'http://localhost/script/?param=foo'
-    >>> get_current_url(env, root_only=True)
-    'http://localhost/script/'
-    >>> get_current_url(env, host_only=True)
-    'http://localhost/'
-    >>> get_current_url(env, strip_querystring=True)
-    'http://localhost/script/'
-
-    This optionally it verifies that the host is in a list of trusted hosts.
-    If the host is not in there it will raise a
-    :exc:`~werkzeug.exceptions.SecurityError`.
-
-    Note that the string returned might contain unicode characters as the
-    representation is an IRI not an URI.  If you need an ASCII only
-    representation you can use the :func:`~werkzeug.urls.iri_to_uri`
-    function:
-
-    >>> from werkzeug.urls import iri_to_uri
-    >>> iri_to_uri(get_current_url(env))
-    'http://localhost/script/?param=foo'
-
-    :param environ: the WSGI environment to get the current URL from.
-    :param root_only: set `True` if you only want the root URL.
-    :param strip_querystring: set to `True` if you don't want the querystring.
-    :param host_only: set to `True` if the host URL should be returned.
-    :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
-                          for more information.
-    """
-    tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)]
-    cat = tmp.append
-    if host_only:
-        return uri_to_iri("".join(tmp) + "/")
-    cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/"))
-    cat("/")
-    if not root_only:
-        cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/")))
-        if not strip_querystring:
-            qs = get_query_string(environ)
-            if qs:
-                cat("?" + qs)
-    return uri_to_iri("".join(tmp))
-
-
-def host_is_trusted(hostname, trusted_list):
-    """Checks if a host is trusted against a list.  This also takes care
-    of port normalization.
-
-    .. versionadded:: 0.9
-
-    :param hostname: the hostname to check
-    :param trusted_list: a list of hostnames to check against.  If a
-                         hostname starts with a dot it will match against
-                         all subdomains as well.
-    """
-    if not hostname:
-        return False
-
-    if isinstance(trusted_list, string_types):
-        trusted_list = [trusted_list]
-
-    def _normalize(hostname):
-        if ":" in hostname:
-            hostname = hostname.rsplit(":", 1)[0]
-        return _encode_idna(hostname)
-
-    try:
-        hostname = _normalize(hostname)
-    except UnicodeError:
-        return False
-    for ref in trusted_list:
-        if ref.startswith("."):
-            ref = ref[1:]
-            suffix_match = True
-        else:
-            suffix_match = False
-        try:
-            ref = _normalize(ref)
-        except UnicodeError:
-            return False
-        if ref == hostname:
-            return True
-        if suffix_match and hostname.endswith(b"." + ref):
-            return True
-    return False
-
-
-def get_host(environ, trusted_hosts=None):
-    """Return the host for the given WSGI environment. This first checks
-    the ``Host`` header. If it's not present, then ``SERVER_NAME`` and
-    ``SERVER_PORT`` are used. The host will only contain the port if it
-    is different than the standard port for the protocol.
-
-    Optionally, verify that the host is trusted using
-    :func:`host_is_trusted` and raise a
-    :exc:`~werkzeug.exceptions.SecurityError` if it is not.
-
-    :param environ: The WSGI environment to get the host from.
-    :param trusted_hosts: A list of trusted hosts.
-    :return: Host, with port if necessary.
-    :raise ~werkzeug.exceptions.SecurityError: If the host is not
-        trusted.
-    """
-    if "HTTP_HOST" in environ:
-        rv = environ["HTTP_HOST"]
-        if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
-            rv = rv[:-3]
-        elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
-            rv = rv[:-4]
-    else:
-        rv = environ["SERVER_NAME"]
-        if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
-            ("https", "443"),
-            ("http", "80"),
-        ):
-            rv += ":" + environ["SERVER_PORT"]
-    if trusted_hosts is not None:
-        if not host_is_trusted(rv, trusted_hosts):
-            from .exceptions import SecurityError
-
-            raise SecurityError('Host "%s" is not trusted' % rv)
-    return rv
-
-
-def get_content_length(environ):
-    """Returns the content length from the WSGI environment as
-    integer. If it's not available or chunked transfer encoding is used,
-    ``None`` is returned.
-
-    .. versionadded:: 0.9
-
-    :param environ: the WSGI environ to fetch the content length from.
-    """
-    if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
-        return None
-
-    content_length = environ.get("CONTENT_LENGTH")
-    if content_length is not None:
-        try:
-            return max(0, int(content_length))
-        except (ValueError, TypeError):
-            pass
-
-
-def get_input_stream(environ, safe_fallback=True):
-    """Returns the input stream from the WSGI environment and wraps it
-    in the most sensible way possible. The stream returned is not the
-    raw WSGI stream in most cases but one that is safe to read from
-    without taking into account the content length.
-
-    If content length is not set, the stream will be empty for safety reasons.
-    If the WSGI server supports chunked or infinite streams, it should set
-    the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
-
-    .. versionadded:: 0.9
-
-    :param environ: the WSGI environ to fetch the stream from.
-    :param safe_fallback: use an empty stream as a safe fallback when the
-        content length is not set. Disabling this allows infinite streams,
-        which can be a denial-of-service risk.
-    """
-    stream = environ["wsgi.input"]
-    content_length = get_content_length(environ)
-
-    # A wsgi extension that tells us if the input is terminated.  In
-    # that case we return the stream unchanged as we know we can safely
-    # read it until the end.
-    if environ.get("wsgi.input_terminated"):
-        return stream
-
-    # If the request doesn't specify a content length, returning the stream is
-    # potentially dangerous because it could be infinite, malicious or not. If
-    # safe_fallback is true, return an empty stream instead for safety.
-    if content_length is None:
-        return BytesIO() if safe_fallback else stream
-
-    # Otherwise limit the stream to the content length
-    return LimitedStream(stream, content_length)
-
-
-def get_query_string(environ):
-    """Returns the `QUERY_STRING` from the WSGI environment.  This also takes
-    care about the WSGI decoding dance on Python 3 environments as a
-    native string.  The string returned will be restricted to ASCII
-    characters.
-
-    .. versionadded:: 0.9
-
-    :param environ: the WSGI environment object to get the query string from.
-    """
-    qs = wsgi_get_bytes(environ.get("QUERY_STRING", ""))
-    # QUERY_STRING really should be ascii safe but some browsers
-    # will send us some unicode stuff (I am looking at you IE).
-    # In that case we want to urllib quote it badly.
-    return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),"))
-
-
-def get_path_info(environ, charset="utf-8", errors="replace"):
-    """Returns the `PATH_INFO` from the WSGI environment and properly
-    decodes it.  This also takes care about the WSGI decoding dance
-    on Python 3 environments.  if the `charset` is set to `None` a
-    bytestring is returned.
-
-    .. versionadded:: 0.9
-
-    :param environ: the WSGI environment object to get the path from.
-    :param charset: the charset for the path info, or `None` if no
-                    decoding should be performed.
-    :param errors: the decoding error handling.
-    """
-    path = wsgi_get_bytes(environ.get("PATH_INFO", ""))
-    return to_unicode(path, charset, errors, allow_none_charset=True)
-
-
-def get_script_name(environ, charset="utf-8", errors="replace"):
-    """Returns the `SCRIPT_NAME` from the WSGI environment and properly
-    decodes it.  This also takes care about the WSGI decoding dance
-    on Python 3 environments.  if the `charset` is set to `None` a
-    bytestring is returned.
-
-    .. versionadded:: 0.9
-
-    :param environ: the WSGI environment object to get the path from.
-    :param charset: the charset for the path, or `None` if no
-                    decoding should be performed.
-    :param errors: the decoding error handling.
-    """
-    path = wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))
-    return to_unicode(path, charset, errors, allow_none_charset=True)
-
-
-def pop_path_info(environ, charset="utf-8", errors="replace"):
-    """Removes and returns the next segment of `PATH_INFO`, pushing it onto
-    `SCRIPT_NAME`.  Returns `None` if there is nothing left on `PATH_INFO`.
-
-    If the `charset` is set to `None` a bytestring is returned.
-
-    If there are empty segments (``'/foo//bar``) these are ignored but
-    properly pushed to the `SCRIPT_NAME`:
-
-    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
-    >>> pop_path_info(env)
-    'a'
-    >>> env['SCRIPT_NAME']
-    '/foo/a'
-    >>> pop_path_info(env)
-    'b'
-    >>> env['SCRIPT_NAME']
-    '/foo/a/b'
-
-    .. versionadded:: 0.5
-
-    .. versionchanged:: 0.9
-       The path is now decoded and a charset and encoding
-       parameter can be provided.
-
-    :param environ: the WSGI environment that is modified.
-    """
-    path = environ.get("PATH_INFO")
-    if not path:
-        return None
-
-    script_name = environ.get("SCRIPT_NAME", "")
-
-    # shift multiple leading slashes over
-    old_path = path
-    path = path.lstrip("/")
-    if path != old_path:
-        script_name += "/" * (len(old_path) - len(path))
-
-    if "/" not in path:
-        environ["PATH_INFO"] = ""
-        environ["SCRIPT_NAME"] = script_name + path
-        rv = wsgi_get_bytes(path)
-    else:
-        segment, path = path.split("/", 1)
-        environ["PATH_INFO"] = "/" + path
-        environ["SCRIPT_NAME"] = script_name + segment
-        rv = wsgi_get_bytes(segment)
-
-    return to_unicode(rv, charset, errors, allow_none_charset=True)
-
-
-def peek_path_info(environ, charset="utf-8", errors="replace"):
-    """Returns the next segment on the `PATH_INFO` or `None` if there
-    is none.  Works like :func:`pop_path_info` without modifying the
-    environment:
-
-    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
-    >>> peek_path_info(env)
-    'a'
-    >>> peek_path_info(env)
-    'a'
-
-    If the `charset` is set to `None` a bytestring is returned.
-
-    .. versionadded:: 0.5
-
-    .. versionchanged:: 0.9
-       The path is now decoded and a charset and encoding
-       parameter can be provided.
-
-    :param environ: the WSGI environment that is checked.
-    """
-    segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
-    if segments:
-        return to_unicode(
-            wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True
-        )
-
-
-def extract_path_info(
-    environ_or_baseurl,
-    path_or_url,
-    charset="utf-8",
-    errors="werkzeug.url_quote",
-    collapse_http_schemes=True,
-):
-    """Extracts the path info from the given URL (or WSGI environment) and
-    path.  The path info returned is a unicode string, not a bytestring
-    suitable for a WSGI environment.  The URLs might also be IRIs.
-
-    If the path info could not be determined, `None` is returned.
-
-    Some examples:
-
-    >>> extract_path_info('http://example.com/app', '/app/hello')
-    u'/hello'
-    >>> extract_path_info('http://example.com/app',
-    ...                   'https://example.com/app/hello')
-    u'/hello'
-    >>> extract_path_info('http://example.com/app',
-    ...                   'https://example.com/app/hello',
-    ...                   collapse_http_schemes=False) is None
-    True
-
-    Instead of providing a base URL you can also pass a WSGI environment.
-
-    :param environ_or_baseurl: a WSGI environment dict, a base URL or
-                               base IRI.  This is the root of the
-                               application.
-    :param path_or_url: an absolute path from the server root, a
-                        relative path (in which case it's the path info)
-                        or a full URL.  Also accepts IRIs and unicode
-                        parameters.
-    :param charset: the charset for byte data in URLs
-    :param errors: the error handling on decode
-    :param collapse_http_schemes: if set to `False` the algorithm does
-                                  not assume that http and https on the
-                                  same server point to the same
-                                  resource.
-
-    .. versionchanged:: 0.15
-        The ``errors`` parameter defaults to leaving invalid bytes
-        quoted instead of replacing them.
-
-    .. versionadded:: 0.6
-    """
-
-    def _normalize_netloc(scheme, netloc):
-        parts = netloc.split(u"@", 1)[-1].split(u":", 1)
-        if len(parts) == 2:
-            netloc, port = parts
-            if (scheme == u"http" and port == u"80") or (
-                scheme == u"https" and port == u"443"
-            ):
-                port = None
-        else:
-            netloc = parts[0]
-            port = None
-        if port is not None:
-            netloc += u":" + port
-        return netloc
-
-    # make sure whatever we are working on is a IRI and parse it
-    path = uri_to_iri(path_or_url, charset, errors)
-    if isinstance(environ_or_baseurl, dict):
-        environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
-    base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
-    base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
-    cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3]
-
-    # normalize the network location
-    base_netloc = _normalize_netloc(base_scheme, base_netloc)
-    cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
-
-    # is that IRI even on a known HTTP scheme?
-    if collapse_http_schemes:
-        for scheme in base_scheme, cur_scheme:
-            if scheme not in (u"http", u"https"):
-                return None
-    else:
-        if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme):
-            return None
-
-    # are the netlocs compatible?
-    if base_netloc != cur_netloc:
-        return None
-
-    # are we below the application path?
-    base_path = base_path.rstrip(u"/")
-    if not cur_path.startswith(base_path):
-        return None
-
-    return u"/" + cur_path[len(base_path) :].lstrip(u"/")
-
-
-@implements_iterator
-class ClosingIterator(object):
-    """The WSGI specification requires that all middlewares and gateways
-    respect the `close` callback of the iterable returned by the application.
-    Because it is useful to add another close action to a returned iterable
-    and adding a custom iterable is a boring task this class can be used for
-    that::
-
-        return ClosingIterator(app(environ, start_response), [cleanup_session,
-                                                              cleanup_locals])
-
-    If there is just one close function it can be passed instead of the list.
-
-    A closing iterator is not needed if the application uses response objects
-    and finishes the processing if the response is started::
-
-        try:
-            return response(environ, start_response)
-        finally:
-            cleanup_session()
-            cleanup_locals()
-    """
-
-    def __init__(self, iterable, callbacks=None):
-        iterator = iter(iterable)
-        self._next = partial(next, iterator)
-        if callbacks is None:
-            callbacks = []
-        elif callable(callbacks):
-            callbacks = [callbacks]
-        else:
-            callbacks = list(callbacks)
-        iterable_close = getattr(iterable, "close", None)
-        if iterable_close:
-            callbacks.insert(0, iterable_close)
-        self._callbacks = callbacks
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        return self._next()
-
-    def close(self):
-        for callback in self._callbacks:
-            callback()
-
-
-def wrap_file(environ, file, buffer_size=8192):
-    """Wraps a file.  This uses the WSGI server's file wrapper if available
-    or otherwise the generic :class:`FileWrapper`.
-
-    .. versionadded:: 0.5
-
-    If the file wrapper from the WSGI server is used it's important to not
-    iterate over it from inside the application but to pass it through
-    unchanged.  If you want to pass out a file wrapper inside a response
-    object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
-
-    More information about file wrappers are available in :pep:`333`.
-
-    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
-    :param buffer_size: number of bytes for one iteration.
-    """
-    return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size)
-
-
-@implements_iterator
-class FileWrapper(object):
-    """This class can be used to convert a :class:`file`-like object into
-    an iterable.  It yields `buffer_size` blocks until the file is fully
-    read.
-
-    You should not use this class directly but rather use the
-    :func:`wrap_file` function that uses the WSGI server's file wrapper
-    support if it's available.
-
-    .. versionadded:: 0.5
-
-    If you're using this object together with a :class:`BaseResponse` you have
-    to use the `direct_passthrough` mode.
-
-    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
-    :param buffer_size: number of bytes for one iteration.
-    """
-
-    def __init__(self, file, buffer_size=8192):
-        self.file = file
-        self.buffer_size = buffer_size
-
-    def close(self):
-        if hasattr(self.file, "close"):
-            self.file.close()
-
-    def seekable(self):
-        if hasattr(self.file, "seekable"):
-            return self.file.seekable()
-        if hasattr(self.file, "seek"):
-            return True
-        return False
-
-    def seek(self, *args):
-        if hasattr(self.file, "seek"):
-            self.file.seek(*args)
-
-    def tell(self):
-        if hasattr(self.file, "tell"):
-            return self.file.tell()
-        return None
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        data = self.file.read(self.buffer_size)
-        if data:
-            return data
-        raise StopIteration()
-
-
-@implements_iterator
-class _RangeWrapper(object):
-    # private for now, but should we make it public in the future ?
-
-    """This class can be used to convert an iterable object into
-    an iterable that will only yield a piece of the underlying content.
-    It yields blocks until the underlying stream range is fully read.
-    The yielded blocks will have a size that can't exceed the original
-    iterator defined block size, but that can be smaller.
-
-    If you're using this object together with a :class:`BaseResponse` you have
-    to use the `direct_passthrough` mode.
-
-    :param iterable: an iterable object with a :meth:`__next__` method.
-    :param start_byte: byte from which read will start.
-    :param byte_range: how many bytes to read.
-    """
-
-    def __init__(self, iterable, start_byte=0, byte_range=None):
-        self.iterable = iter(iterable)
-        self.byte_range = byte_range
-        self.start_byte = start_byte
-        self.end_byte = None
-        if byte_range is not None:
-            self.end_byte = self.start_byte + self.byte_range
-        self.read_length = 0
-        self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
-        self.end_reached = False
-
-    def __iter__(self):
-        return self
-
-    def _next_chunk(self):
-        try:
-            chunk = next(self.iterable)
-            self.read_length += len(chunk)
-            return chunk
-        except StopIteration:
-            self.end_reached = True
-            raise
-
-    def _first_iteration(self):
-        chunk = None
-        if self.seekable:
-            self.iterable.seek(self.start_byte)
-            self.read_length = self.iterable.tell()
-            contextual_read_length = self.read_length
-        else:
-            while self.read_length <= self.start_byte:
-                chunk = self._next_chunk()
-            if chunk is not None:
-                chunk = chunk[self.start_byte - self.read_length :]
-            contextual_read_length = self.start_byte
-        return chunk, contextual_read_length
-
-    def _next(self):
-        if self.end_reached:
-            raise StopIteration()
-        chunk = None
-        contextual_read_length = self.read_length
-        if self.read_length == 0:
-            chunk, contextual_read_length = self._first_iteration()
-        if chunk is None:
-            chunk = self._next_chunk()
-        if self.end_byte is not None and self.read_length >= self.end_byte:
-            self.end_reached = True
-            return chunk[: self.end_byte - contextual_read_length]
-        return chunk
-
-    def __next__(self):
-        chunk = self._next()
-        if chunk:
-            return chunk
-        self.end_reached = True
-        raise StopIteration()
-
-    def close(self):
-        if hasattr(self.iterable, "close"):
-            self.iterable.close()
-
-
-def _make_chunk_iter(stream, limit, buffer_size):
-    """Helper for the line and chunk iter functions."""
-    if isinstance(stream, (bytes, bytearray, text_type)):
-        raise TypeError(
-            "Passed a string or byte object instead of true iterator or stream."
-        )
-    if not hasattr(stream, "read"):
-        for item in stream:
-            if item:
-                yield item
-        return
-    if not isinstance(stream, LimitedStream) and limit is not None:
-        stream = LimitedStream(stream, limit)
-    _read = stream.read
-    while 1:
-        item = _read(buffer_size)
-        if not item:
-            break
-        yield item
-
-
-def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False):
-    """Safely iterates line-based over an input stream.  If the input stream
-    is not a :class:`LimitedStream` the `limit` parameter is mandatory.
-
-    This uses the stream's :meth:`~file.read` method internally as opposite
-    to the :meth:`~file.readline` method that is unsafe and can only be used
-    in violation of the WSGI specification.  The same problem applies to the
-    `__iter__` function of the input stream which calls :meth:`~file.readline`
-    without arguments.
-
-    If you need line-by-line processing it's strongly recommended to iterate
-    over the input stream using this helper function.
-
-    .. versionchanged:: 0.8
-       This function now ensures that the limit was reached.
-
-    .. versionadded:: 0.9
-       added support for iterators as input stream.
-
-    .. versionadded:: 0.11.10
-       added support for the `cap_at_buffer` parameter.
-
-    :param stream: the stream or iterate to iterate over.
-    :param limit: the limit in bytes for the stream.  (Usually
-                  content length.  Not necessary if the `stream`
-                  is a :class:`LimitedStream`.
-    :param buffer_size: The optional buffer size.
-    :param cap_at_buffer: if this is set chunks are split if they are longer
-                          than the buffer size.  Internally this is implemented
-                          that the buffer size might be exhausted by a factor
-                          of two however.
-    """
-    _iter = _make_chunk_iter(stream, limit, buffer_size)
-
-    first_item = next(_iter, "")
-    if not first_item:
-        return
-
-    s = make_literal_wrapper(first_item)
-    empty = s("")
-    cr = s("\r")
-    lf = s("\n")
-    crlf = s("\r\n")
-
-    _iter = chain((first_item,), _iter)
-
-    def _iter_basic_lines():
-        _join = empty.join
-        buffer = []
-        while 1:
-            new_data = next(_iter, "")
-            if not new_data:
-                break
-            new_buf = []
-            buf_size = 0
-            for item in chain(buffer, new_data.splitlines(True)):
-                new_buf.append(item)
-                buf_size += len(item)
-                if item and item[-1:] in crlf:
-                    yield _join(new_buf)
-                    new_buf = []
-                elif cap_at_buffer and buf_size >= buffer_size:
-                    rv = _join(new_buf)
-                    while len(rv) >= buffer_size:
-                        yield rv[:buffer_size]
-                        rv = rv[buffer_size:]
-                    new_buf = [rv]
-            buffer = new_buf
-        if buffer:
-            yield _join(buffer)
-
-    # This hackery is necessary to merge 'foo\r' and '\n' into one item
-    # of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
-    previous = empty
-    for item in _iter_basic_lines():
-        if item == lf and previous[-1:] == cr:
-            previous += item
-            item = empty
-        if previous:
-            yield previous
-        previous = item
-    if previous:
-        yield previous
-
-
-def make_chunk_iter(
-    stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False
-):
-    """Works like :func:`make_line_iter` but accepts a separator
-    which divides chunks.  If you want newline based processing
-    you should use :func:`make_line_iter` instead as it
-    supports arbitrary newline markers.
-
-    .. versionadded:: 0.8
-
-    .. versionadded:: 0.9
-       added support for iterators as input stream.
-
-    .. versionadded:: 0.11.10
-       added support for the `cap_at_buffer` parameter.
-
-    :param stream: the stream or iterate to iterate over.
-    :param separator: the separator that divides chunks.
-    :param limit: the limit in bytes for the stream.  (Usually
-                  content length.  Not necessary if the `stream`
-                  is otherwise already limited).
-    :param buffer_size: The optional buffer size.
-    :param cap_at_buffer: if this is set chunks are split if they are longer
-                          than the buffer size.  Internally this is implemented
-                          that the buffer size might be exhausted by a factor
-                          of two however.
-    """
-    _iter = _make_chunk_iter(stream, limit, buffer_size)
-
-    first_item = next(_iter, "")
-    if not first_item:
-        return
-
-    _iter = chain((first_item,), _iter)
-    if isinstance(first_item, text_type):
-        separator = to_unicode(separator)
-        _split = re.compile(r"(%s)" % re.escape(separator)).split
-        _join = u"".join
-    else:
-        separator = to_bytes(separator)
-        _split = re.compile(b"(" + re.escape(separator) + b")").split
-        _join = b"".join
-
-    buffer = []
-    while 1:
-        new_data = next(_iter, "")
-        if not new_data:
-            break
-        chunks = _split(new_data)
-        new_buf = []
-        buf_size = 0
-        for item in chain(buffer, chunks):
-            if item == separator:
-                yield _join(new_buf)
-                new_buf = []
-                buf_size = 0
-            else:
-                buf_size += len(item)
-                new_buf.append(item)
-
-                if cap_at_buffer and buf_size >= buffer_size:
-                    rv = _join(new_buf)
-                    while len(rv) >= buffer_size:
-                        yield rv[:buffer_size]
-                        rv = rv[buffer_size:]
-                    new_buf = [rv]
-                    buf_size = len(rv)
-
-        buffer = new_buf
-    if buffer:
-        yield _join(buffer)
-
-
-@implements_iterator
-class LimitedStream(io.IOBase):
-    """Wraps a stream so that it doesn't read more than n bytes.  If the
-    stream is exhausted and the caller tries to get more bytes from it
-    :func:`on_exhausted` is called which by default returns an empty
-    string.  The return value of that function is forwarded
-    to the reader function.  So if it returns an empty string
-    :meth:`read` will return an empty string as well.
-
-    The limit however must never be higher than what the stream can
-    output.  Otherwise :meth:`readlines` will try to read past the
-    limit.
-
-    .. admonition:: Note on WSGI compliance
-
-       calls to :meth:`readline` and :meth:`readlines` are not
-       WSGI compliant because it passes a size argument to the
-       readline methods.  Unfortunately the WSGI PEP is not safely
-       implementable without a size argument to :meth:`readline`
-       because there is no EOF marker in the stream.  As a result
-       of that the use of :meth:`readline` is discouraged.
-
-       For the same reason iterating over the :class:`LimitedStream`
-       is not portable.  It internally calls :meth:`readline`.
-
-       We strongly suggest using :meth:`read` only or using the
-       :func:`make_line_iter` which safely iterates line-based
-       over a WSGI input stream.
-
-    :param stream: the stream to wrap.
-    :param limit: the limit for the stream, must not be longer than
-                  what the string can provide if the stream does not
-                  end with `EOF` (like `wsgi.input`)
-    """
-
-    def __init__(self, stream, limit):
-        self._read = stream.read
-        self._readline = stream.readline
-        self._pos = 0
-        self.limit = limit
-
-    def __iter__(self):
-        return self
-
-    @property
-    def is_exhausted(self):
-        """If the stream is exhausted this attribute is `True`."""
-        return self._pos >= self.limit
-
-    def on_exhausted(self):
-        """This is called when the stream tries to read past the limit.
-        The return value of this function is returned from the reading
-        function.
-        """
-        # Read null bytes from the stream so that we get the
-        # correct end of stream marker.
-        return self._read(0)
-
-    def on_disconnect(self):
-        """What should happen if a disconnect is detected?  The return
-        value of this function is returned from read functions in case
-        the client went away.  By default a
-        :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
-        """
-        from .exceptions import ClientDisconnected
-
-        raise ClientDisconnected()
-
-    def exhaust(self, chunk_size=1024 * 64):
-        """Exhaust the stream.  This consumes all the data left until the
-        limit is reached.
-
-        :param chunk_size: the size for a chunk.  It will read the chunk
-                           until the stream is exhausted and throw away
-                           the results.
-        """
-        to_read = self.limit - self._pos
-        chunk = chunk_size
-        while to_read > 0:
-            chunk = min(to_read, chunk)
-            self.read(chunk)
-            to_read -= chunk
-
-    def read(self, size=None):
-        """Read `size` bytes or if size is not provided everything is read.
-
-        :param size: the number of bytes read.
-        """
-        if self._pos >= self.limit:
-            return self.on_exhausted()
-        if size is None or size == -1:  # -1 is for consistence with file
-            size = self.limit
-        to_read = min(self.limit - self._pos, size)
-        try:
-            read = self._read(to_read)
-        except (IOError, ValueError):
-            return self.on_disconnect()
-        if to_read and len(read) != to_read:
-            return self.on_disconnect()
-        self._pos += len(read)
-        return read
-
-    def readline(self, size=None):
-        """Reads one line from the stream."""
-        if self._pos >= self.limit:
-            return self.on_exhausted()
-        if size is None:
-            size = self.limit - self._pos
-        else:
-            size = min(size, self.limit - self._pos)
-        try:
-            line = self._readline(size)
-        except (ValueError, IOError):
-            return self.on_disconnect()
-        if size and not line:
-            return self.on_disconnect()
-        self._pos += len(line)
-        return line
-
-    def readlines(self, size=None):
-        """Reads a file into a list of strings.  It calls :meth:`readline`
-        until the file is read to the end.  It does support the optional
-        `size` argument if the underlaying stream supports it for
-        `readline`.
-        """
-        last_pos = self._pos
-        result = []
-        if size is not None:
-            end = min(self.limit, last_pos + size)
-        else:
-            end = self.limit
-        while 1:
-            if size is not None:
-                size -= last_pos - self._pos
-            if self._pos >= end:
-                break
-            result.append(self.readline(size))
-            if size is not None:
-                last_pos = self._pos
-        return result
-
-    def tell(self):
-        """Returns the position of the stream.
-
-        .. versionadded:: 0.9
-        """
-        return self._pos
-
-    def __next__(self):
-        line = self.readline()
-        if not line:
-            raise StopIteration()
-        return line
-
-    def readable(self):
-        return True
diff --git a/azure/functions/http.py b/azure/functions/http.py
index 61b303ec..734f43eb 100644
--- a/azure/functions/http.py
+++ b/azure/functions/http.py
@@ -10,7 +10,7 @@
 from azure.functions import _abc as azf_abc
 from azure.functions import _http as azf_http
 from . import meta
-from ._thirdparty.werkzeug.datastructures import Headers
+from werkzeug.datastructures import Headers
 
 
 class HttpRequest(azf_http.HttpRequest):
diff --git a/setup.py b/setup.py
index b9e84ac5..43d96c05 100644
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,8 @@
 from setuptools import find_packages, setup
 from azure.functions import __version__
 
+INSTALL_REQUIRES = ["werkzeug"]
+
 EXTRA_REQUIRES = {
     'dev': [
         'flake8-logging-format',
@@ -59,6 +61,7 @@
     package_data={
         'azure.functions': ['py.typed']
     },
+    install_requires=INSTALL_REQUIRES,
     extras_require=EXTRA_REQUIRES,
     include_package_data=True,
     test_suite='tests'

From b2f4595ed6edbb53b494e24a5939a596c8aa73be Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Fri, 15 Nov 2024 11:05:51 -0600
Subject: [PATCH 02/17] Updating build templates

---
 eng/templates/build.yml      | 5 ++++-
 eng/templates/jobs/build.yml | 1 +
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/eng/templates/build.yml b/eng/templates/build.yml
index 44603de8..605b3722 100644
--- a/eng/templates/build.yml
+++ b/eng/templates/build.yml
@@ -15,7 +15,10 @@ jobs:
           python --version
         displayName: 'Check python version'
       - bash: |
+          python -m venv .env
+          .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          pip install twine wheel
+          python -m pip install .
+          python -m pip install twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'
diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index faf01484..5c67bc73 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -11,6 +11,7 @@ jobs:
         displayName: 'Check python version'
       - bash: |
           python -m pip install -U pip
+          python -m pip install .
           pip install twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'

From b955a6face5882467d3fa3eecc425c0234ed7486 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Fri, 15 Nov 2024 15:37:35 -0600
Subject: [PATCH 03/17] Added a venv

---
 eng/templates/jobs/build.yml | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index 5c67bc73..42da6905 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -10,8 +10,10 @@ jobs:
           python --version
         displayName: 'Check python version'
       - bash: |
+           python -m venv .env
+          .env\Scripts\Activate.ps1
           python -m pip install -U pip
           python -m pip install .
-          pip install twine wheel
+          python -m pip install twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'

From 69fbd0821c467c4fb96a5964ddc736d8a88aff1f Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Fri, 15 Nov 2024 15:51:04 -0600
Subject: [PATCH 04/17] Fixed indentation

---
 eng/templates/jobs/build.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index 42da6905..0cedc143 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -10,7 +10,7 @@ jobs:
           python --version
         displayName: 'Check python version'
       - bash: |
-           python -m venv .env
+          python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
           python -m pip install .

From 005b1989f74e827afa8e79e00681741b2f1db4ca Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Fri, 15 Nov 2024 16:01:06 -0600
Subject: [PATCH 05/17] Changed to editable install

---
 eng/templates/build.yml      | 2 +-
 eng/templates/jobs/build.yml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/eng/templates/build.yml b/eng/templates/build.yml
index 605b3722..d3e9983d 100644
--- a/eng/templates/build.yml
+++ b/eng/templates/build.yml
@@ -18,7 +18,7 @@ jobs:
           python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          python -m pip install .
+          python -m pip install -e .
           python -m pip install twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'
diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index 0cedc143..eba4fd0f 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -13,7 +13,7 @@ jobs:
           python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          python -m pip install .
+          python -m pip install -e .
           python -m pip install twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'

From 7a1b9950483c600252392dd4a8a5e3f50a20e27a Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Fri, 15 Nov 2024 16:09:21 -0600
Subject: [PATCH 06/17] Installing werkzeug directly

---
 eng/templates/build.yml      | 3 +--
 eng/templates/jobs/build.yml | 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/eng/templates/build.yml b/eng/templates/build.yml
index d3e9983d..47013de5 100644
--- a/eng/templates/build.yml
+++ b/eng/templates/build.yml
@@ -18,7 +18,6 @@ jobs:
           python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          python -m pip install -e .
-          python -m pip install twine wheel
+          python -m pip install werkzeug twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'
diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index eba4fd0f..54d71e92 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -13,7 +13,6 @@ jobs:
           python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          python -m pip install -e .
-          python -m pip install twine wheel
+          python -m pip install werkzeug twine wheel
           python setup.py sdist bdist_wheel
         displayName: 'Build Python SDK'

From da567628dc513266a81f0cd320380487c2da16d7 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Mon, 18 Nov 2024 11:24:44 -0600
Subject: [PATCH 07/17] Updated dependencies based on version

---
 azure/functions/_http.py | 11 ++++++-----
 setup.py                 |  7 ++++++-
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/azure/functions/_http.py b/azure/functions/_http.py
index 9cae82d7..49faea60 100644
--- a/azure/functions/_http.py
+++ b/azure/functions/_http.py
@@ -8,10 +8,10 @@
 import types
 import typing
 
-from multidict._multidict import MultiDict
+import werkzeug.datastructures
 from werkzeug import formparser as _wk_parser
 from werkzeug import http as _wk_http
-from werkzeug.datastructures import Headers, FileStorage
+from werkzeug.datastructures import Headers, FileStorage, MultiDict
 
 from . import _abc
 
@@ -175,8 +175,8 @@ def __init__(self,
         self.__route_params = types.MappingProxyType(route_params or {})
         self.__body_bytes = body
         self.__form_parsed = False
-        self.__form: MultiDict[str, str] = None
-        self.__files:MultiDict[str, FileStorage] = None
+        self.__form: MultiDict[str, str]
+        self.__files: MultiDict[str, FileStorage]
 
     @property
     def url(self):
@@ -231,7 +231,8 @@ def _parse_form_data(self):
         content_length = len(body)
         mimetype, options = _wk_http.parse_options_header(content_type)
         parser = _wk_parser.FormDataParser(
-            _wk_parser.default_stream_factory
+            _wk_parser.default_stream_factory, None, None,
+            werkzeug.datastructures.ImmutableMultiDict
         )
 
         body_stream = io.BytesIO(body)
diff --git a/setup.py b/setup.py
index 43d96c05..1e3af5b1 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,12 @@
 from setuptools import find_packages, setup
 from azure.functions import __version__
 
-INSTALL_REQUIRES = ["werkzeug"]
+if sys.version_info[:2] >= (3, 9):
+    INSTALL_REQUIRES = ["werkzeug~=3.1.3"]
+elif sys.version_info[:2] == (3, 8):
+    INSTALL_REQUIRES = ["werkzeug~=3.0.6"]
+else:
+    INSTALL_REQUIRES = ["werkzeug"]
 
 EXTRA_REQUIRES = {
     'dev': [

From 5a3d7e66e7583d4d948817d54aa2b8c9fe7ca9b8 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Mon, 18 Nov 2024 14:04:42 -0600
Subject: [PATCH 08/17] Adding werkzeug in tests pipelines

---
 eng/templates/ci-tests.yml      | 1 +
 eng/templates/jobs/ci-tests.yml | 1 +
 2 files changed, 2 insertions(+)

diff --git a/eng/templates/ci-tests.yml b/eng/templates/ci-tests.yml
index 883486ff..ec7de5e5 100644
--- a/eng/templates/ci-tests.yml
+++ b/eng/templates/ci-tests.yml
@@ -23,6 +23,7 @@ jobs:
       - bash: |
           python -m pip install --upgrade pip
           python -m pip install -U -e .[dev]
+          python -m pip install werkzeug
         displayName: 'Install dependencies'
       - bash: |
           python -m pytest --cache-clear --cov=./azure --cov-report=xml --cov-branch tests
diff --git a/eng/templates/jobs/ci-tests.yml b/eng/templates/jobs/ci-tests.yml
index 883486ff..ec7de5e5 100644
--- a/eng/templates/jobs/ci-tests.yml
+++ b/eng/templates/jobs/ci-tests.yml
@@ -23,6 +23,7 @@ jobs:
       - bash: |
           python -m pip install --upgrade pip
           python -m pip install -U -e .[dev]
+          python -m pip install werkzeug
         displayName: 'Install dependencies'
       - bash: |
           python -m pytest --cache-clear --cov=./azure --cov-report=xml --cov-branch tests

From f32b152daa29d93d6eeb0c37fbe7881afd5d4112 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 10:36:56 -0600
Subject: [PATCH 09/17] Updated tests pipeline

---
 eng/templates/ci-tests.yml      | 2 +-
 eng/templates/jobs/ci-tests.yml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/eng/templates/ci-tests.yml b/eng/templates/ci-tests.yml
index ec7de5e5..d632c85b 100644
--- a/eng/templates/ci-tests.yml
+++ b/eng/templates/ci-tests.yml
@@ -22,8 +22,8 @@ jobs:
           versionSpec: $(PYTHON_VERSION)
       - bash: |
           python -m pip install --upgrade pip
-          python -m pip install -U -e .[dev]
           python -m pip install werkzeug
+          python -m pip install -U -e .[dev]
         displayName: 'Install dependencies'
       - bash: |
           python -m pytest --cache-clear --cov=./azure --cov-report=xml --cov-branch tests
diff --git a/eng/templates/jobs/ci-tests.yml b/eng/templates/jobs/ci-tests.yml
index ec7de5e5..d632c85b 100644
--- a/eng/templates/jobs/ci-tests.yml
+++ b/eng/templates/jobs/ci-tests.yml
@@ -22,8 +22,8 @@ jobs:
           versionSpec: $(PYTHON_VERSION)
       - bash: |
           python -m pip install --upgrade pip
-          python -m pip install -U -e .[dev]
           python -m pip install werkzeug
+          python -m pip install -U -e .[dev]
         displayName: 'Install dependencies'
       - bash: |
           python -m pytest --cache-clear --cov=./azure --cov-report=xml --cov-branch tests

From 94d234c39718f4f13cede18fd3be6ee5c685cdb9 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 12:24:48 -0600
Subject: [PATCH 10/17] Added pyproject.toml

---
 pyproject.toml | 45 +++++++++++++++++++++++++++++++
 setup.py       | 73 --------------------------------------------------
 2 files changed, 45 insertions(+), 73 deletions(-)
 create mode 100644 pyproject.toml
 delete mode 100644 setup.py

diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..f42ce157
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,45 @@
+[build-system]
+requires = ["setuptools >= 61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "azure-functions"
+dynamic = ["version"]
+requires-python = ">=3.7"
+authors = [{ name = "Azure Functions team at Microsoft Corp.", email = "azurefunctions@microsoft.com" }]
+description = "Python library for Azure Functions."
+readme = "README.md"
+license = { text = "MIT License" }
+classifiers = [
+    'License :: OSI Approved :: MIT License',
+    'Intended Audience :: Developers',
+    'Programming Language :: Python :: 3',
+    'Programming Language :: Python :: 3.9',
+    'Programming Language :: Python :: 3.10',
+    'Programming Language :: Python :: 3.11',
+    'Operating System :: Microsoft :: Windows',
+    'Operating System :: POSIX',
+    'Operating System :: MacOS :: MacOS X',
+    'Environment :: Web Environment',
+    'Development Status :: 5 - Production/Stable',
+]
+dependencies = [
+    'werkzeug~=3.1.3; python_version >= "3.9"',
+    'werkzeug~=3.0.6; python_version == "3.8"',
+    'werkzeug; python_version < "3.8"'
+]
+[project.optional-dependencies]
+dev = [
+    'pytest',
+    'pytest-cov',
+    'coverage',
+    'pytest-instafail',
+    'pre-commit',
+    'flake8~=4.0.1; python_version < "3.11"',
+    'flake8~=7.1.1; python_version >= "3.11"'
+]
+
+[tool.setuptools.packages.find]
+exclude = [
+    'azure', 'eng', 'docs', 'tests'
+]
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 1e3af5b1..00000000
--- a/setup.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License.
-
-import sys
-
-from setuptools import find_packages, setup
-from azure.functions import __version__
-
-if sys.version_info[:2] >= (3, 9):
-    INSTALL_REQUIRES = ["werkzeug~=3.1.3"]
-elif sys.version_info[:2] == (3, 8):
-    INSTALL_REQUIRES = ["werkzeug~=3.0.6"]
-else:
-    INSTALL_REQUIRES = ["werkzeug"]
-
-EXTRA_REQUIRES = {
-    'dev': [
-        'flake8-logging-format',
-        'mypy',
-        'pytest',
-        'pytest-cov',
-        'requests==2.*',
-        'coverage',
-        'azure-functions-durable'
-    ]
-}
-
-if sys.version_info[:2] <= (3, 11):
-    EXTRA_REQUIRES.get('dev').append(
-        "flake8~=4.0.1"
-    )
-else:
-    EXTRA_REQUIRES.get('dev').append(
-        "flake8~=7.1.1"
-    )
-
-with open("README.md") as readme:
-    long_description = readme.read()
-
-setup(
-    name='azure-functions',
-    version=__version__,
-    description='Azure Functions for Python',
-    long_description=long_description,
-    long_description_content_type='text/markdown',
-    author='Microsoft Corporation',
-    author_email='azpysdkhelp@microsoft.com',
-    classifiers=[
-        'License :: OSI Approved :: MIT License',
-        'Intended Audience :: Developers',
-        'Programming Language :: Python :: 3',
-        'Programming Language :: Python :: 3.6',
-        'Programming Language :: Python :: 3.7',
-        'Programming Language :: Python :: 3.8',
-        'Programming Language :: Python :: 3.9',
-        'Operating System :: Microsoft :: Windows',
-        'Operating System :: POSIX',
-        'Operating System :: MacOS :: MacOS X',
-        'Environment :: Web Environment',
-        'Development Status :: 5 - Production/Stable',
-    ],
-    license='MIT',
-    packages=find_packages(exclude=[
-        'azure', 'tests'
-    ]),
-    package_data={
-        'azure.functions': ['py.typed']
-    },
-    install_requires=INSTALL_REQUIRES,
-    extras_require=EXTRA_REQUIRES,
-    include_package_data=True,
-    test_suite='tests'
-)

From 770e477a5e435156a973dcd23edde27cf1cee269 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 14:58:28 -0600
Subject: [PATCH 11/17] Updated build files

---
 eng/templates/build.yml      | 4 ++--
 eng/templates/jobs/build.yml | 4 ++--
 pyproject.toml               | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/eng/templates/build.yml b/eng/templates/build.yml
index 47013de5..0897d4c8 100644
--- a/eng/templates/build.yml
+++ b/eng/templates/build.yml
@@ -18,6 +18,6 @@ jobs:
           python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          python -m pip install werkzeug twine wheel
-          python setup.py sdist bdist_wheel
+          python -m pip install build
+          python -m build
         displayName: 'Build Python SDK'
diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index 54d71e92..cdfba871 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -13,6 +13,6 @@ jobs:
           python -m venv .env
           .env\Scripts\Activate.ps1
           python -m pip install -U pip
-          python -m pip install werkzeug twine wheel
-          python setup.py sdist bdist_wheel
+          python -m pip install build
+          python -m build
         displayName: 'Build Python SDK'
diff --git a/pyproject.toml b/pyproject.toml
index f42ce157..c5069f51 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,5 @@
 [build-system]
-requires = ["setuptools >= 61.0"]
+requires = ["setuptools >= 61.0", "wheel", "build"]
 build-backend = "setuptools.build_meta"
 
 [project]

From bc464b02dab650df570c2032c97a0ecc68acf673 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 15:09:01 -0600
Subject: [PATCH 12/17] Added missing dependency

---
 pyproject.toml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/pyproject.toml b/pyproject.toml
index c5069f51..afb4e812 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -35,8 +35,10 @@ dev = [
     'coverage',
     'pytest-instafail',
     'pre-commit',
+    'azure-functions-durable',
     'flake8~=4.0.1; python_version < "3.11"',
     'flake8~=7.1.1; python_version >= "3.11"'
+
 ]
 
 [tool.setuptools.packages.find]

From 5c97f0768165fc9849e04d2497367a5bf747d020 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 15:21:36 -0600
Subject: [PATCH 13/17] Updated verion in pyproject

---
 pyproject.toml | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/pyproject.toml b/pyproject.toml
index afb4e812..150ac917 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,3 +45,6 @@ dev = [
 exclude = [
     'azure', 'eng', 'docs', 'tests'
 ]
+
+[tool.setuptools.dynamic]
+version = {attr = "azure.functions.__version__"}

From 72f6ddd62bd0f2ffe5e086f452afa5baa4e8f8f4 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 15:50:15 -0600
Subject: [PATCH 14/17] Updated http_wsgi

---
 azure/functions/_http_wsgi.py | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/azure/functions/_http_wsgi.py b/azure/functions/_http_wsgi.py
index f3065004..f51317e1 100644
--- a/azure/functions/_http_wsgi.py
+++ b/azure/functions/_http_wsgi.py
@@ -1,9 +1,9 @@
 # Copyright (c) Microsoft Corporation. All rights reserved.
 # Licensed under the MIT License.
-from typing import Dict, List, Optional, Any
 import logging
 from io import BytesIO, StringIO
 from os import linesep
+from typing import Dict, List, Optional, Any
 from urllib.parse import ParseResult, urlparse, unquote_to_bytes
 from wsgiref.headers import Headers
 
@@ -12,9 +12,7 @@
 
 
 def wsgi_encoding_dance(value):
-    if isinstance(value, str):
-        return value.encode("latin-1")
-    return value
+    return value.encode().decode("latin1")
 
 
 class WsgiRequest:

From dace0ca757d6d7edbddb5f8e9c3fe7f894d89e62 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Tue, 19 Nov 2024 16:54:25 -0600
Subject: [PATCH 15/17] Fixed http test error

---
 azure/functions/_http.py        | 5 +++--
 eng/templates/ci-tests.yml      | 1 -
 eng/templates/jobs/ci-tests.yml | 1 -
 pyproject.toml                  | 1 -
 4 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/azure/functions/_http.py b/azure/functions/_http.py
index 49faea60..3f9f25a9 100644
--- a/azure/functions/_http.py
+++ b/azure/functions/_http.py
@@ -231,8 +231,9 @@ def _parse_form_data(self):
         content_length = len(body)
         mimetype, options = _wk_http.parse_options_header(content_type)
         parser = _wk_parser.FormDataParser(
-            _wk_parser.default_stream_factory, None, None,
-            werkzeug.datastructures.ImmutableMultiDict
+            _wk_parser.default_stream_factory, max_form_memory_size=None,
+            max_content_length=None,
+            cls=werkzeug.datastructures.ImmutableMultiDict
         )
 
         body_stream = io.BytesIO(body)
diff --git a/eng/templates/ci-tests.yml b/eng/templates/ci-tests.yml
index d632c85b..883486ff 100644
--- a/eng/templates/ci-tests.yml
+++ b/eng/templates/ci-tests.yml
@@ -22,7 +22,6 @@ jobs:
           versionSpec: $(PYTHON_VERSION)
       - bash: |
           python -m pip install --upgrade pip
-          python -m pip install werkzeug
           python -m pip install -U -e .[dev]
         displayName: 'Install dependencies'
       - bash: |
diff --git a/eng/templates/jobs/ci-tests.yml b/eng/templates/jobs/ci-tests.yml
index d632c85b..883486ff 100644
--- a/eng/templates/jobs/ci-tests.yml
+++ b/eng/templates/jobs/ci-tests.yml
@@ -22,7 +22,6 @@ jobs:
           versionSpec: $(PYTHON_VERSION)
       - bash: |
           python -m pip install --upgrade pip
-          python -m pip install werkzeug
           python -m pip install -U -e .[dev]
         displayName: 'Install dependencies'
       - bash: |
diff --git a/pyproject.toml b/pyproject.toml
index 150ac917..c2d95618 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,7 +38,6 @@ dev = [
     'azure-functions-durable',
     'flake8~=4.0.1; python_version < "3.11"',
     'flake8~=7.1.1; python_version >= "3.11"'
-
 ]
 
 [tool.setuptools.packages.find]

From 23c1824662e4c51a7bd6681f62a2520f7171b42e Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Wed, 20 Nov 2024 10:33:13 -0600
Subject: [PATCH 16/17] Minor fixes and refactoring

---
 azure/functions/_http.py                        | 15 +++------------
 eng/templates/build.yml                         |  2 --
 eng/templates/jobs/build.yml                    |  2 --
 eng/templates/official/jobs/build-artifacts.yml |  4 ++--
 pyproject.toml                                  |  2 ++
 5 files changed, 7 insertions(+), 18 deletions(-)

diff --git a/azure/functions/_http.py b/azure/functions/_http.py
index 3f9f25a9..4303fca3 100644
--- a/azure/functions/_http.py
+++ b/azure/functions/_http.py
@@ -8,10 +8,9 @@
 import types
 import typing
 
-import werkzeug.datastructures
 from werkzeug import formparser as _wk_parser
 from werkzeug import http as _wk_http
-from werkzeug.datastructures import Headers, FileStorage, MultiDict
+from werkzeug.datastructures import Headers, FileStorage, MultiDict, ImmutableMultiDict
 
 from . import _abc
 
@@ -217,15 +216,7 @@ def get_json(self) -> typing.Any:
     def _parse_form_data(self):
         if self.__form_parsed:
             return
-        """
-          stream_factory: TStreamFactory | None = None,
-        max_form_memory_size: int | None = None,
-        max_content_length: int | None = None,
-        cls: type[MultiDict[str, t.Any]] | None = None,
-        silent: bool = True,
-        *,
-        max_form_parts: int | None = None,
-        """
+
         body = self.get_body()
         content_type = self.headers.get('Content-Type', '')
         content_length = len(body)
@@ -233,7 +224,7 @@ def _parse_form_data(self):
         parser = _wk_parser.FormDataParser(
             _wk_parser.default_stream_factory, max_form_memory_size=None,
             max_content_length=None,
-            cls=werkzeug.datastructures.ImmutableMultiDict
+            cls=ImmutableMultiDict
         )
 
         body_stream = io.BytesIO(body)
diff --git a/eng/templates/build.yml b/eng/templates/build.yml
index 0897d4c8..55888017 100644
--- a/eng/templates/build.yml
+++ b/eng/templates/build.yml
@@ -15,8 +15,6 @@ jobs:
           python --version
         displayName: 'Check python version'
       - bash: |
-          python -m venv .env
-          .env\Scripts\Activate.ps1
           python -m pip install -U pip
           python -m pip install build
           python -m build
diff --git a/eng/templates/jobs/build.yml b/eng/templates/jobs/build.yml
index cdfba871..19b94dea 100644
--- a/eng/templates/jobs/build.yml
+++ b/eng/templates/jobs/build.yml
@@ -10,8 +10,6 @@ jobs:
           python --version
         displayName: 'Check python version'
       - bash: |
-          python -m venv .env
-          .env\Scripts\Activate.ps1
           python -m pip install -U pip
           python -m pip install build
           python -m build
diff --git a/eng/templates/official/jobs/build-artifacts.yml b/eng/templates/official/jobs/build-artifacts.yml
index bb2171e6..b7ff4ef9 100644
--- a/eng/templates/official/jobs/build-artifacts.yml
+++ b/eng/templates/official/jobs/build-artifacts.yml
@@ -23,6 +23,6 @@ jobs:
         displayName: 'Check python version'
       - bash: |
           python -m pip install -U pip
-          pip install twine wheel
-          python setup.py sdist bdist_wheel
+          python -m pip install build
+          python -m build
         displayName: 'Build Python SDK'
diff --git a/pyproject.toml b/pyproject.toml
index c2d95618..68187961 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -14,6 +14,8 @@ classifiers = [
     'License :: OSI Approved :: MIT License',
     'Intended Audience :: Developers',
     'Programming Language :: Python :: 3',
+    'Programming Language :: Python :: 3.7',
+    'Programming Language :: Python :: 3.8',
     'Programming Language :: Python :: 3.9',
     'Programming Language :: Python :: 3.10',
     'Programming Language :: Python :: 3.11',

From 9ccb3e71c8845c87f91d60c6f57dedfd4efc5c09 Mon Sep 17 00:00:00 2001
From: Gavin Aguiar <gaaguiar@microsoft.com>
Date: Thu, 21 Nov 2024 15:08:46 -0600
Subject: [PATCH 17/17] Flake8 fixes

---
 azure/functions/_http.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/azure/functions/_http.py b/azure/functions/_http.py
index 4303fca3..7e349b1b 100644
--- a/azure/functions/_http.py
+++ b/azure/functions/_http.py
@@ -10,7 +10,8 @@
 
 from werkzeug import formparser as _wk_parser
 from werkzeug import http as _wk_http
-from werkzeug.datastructures import Headers, FileStorage, MultiDict, ImmutableMultiDict
+from werkzeug.datastructures import (Headers, FileStorage, MultiDict,
+                                     ImmutableMultiDict)
 
 from . import _abc