[cleanup] Mark some compat variables for removal (#2173)

Authored by fstirlitz, pukkandan
This commit is contained in:
pukkandan 2022-04-12 01:39:26 +05:30
parent cfb0511d82
commit f9934b9614
No known key found for this signature in database
GPG Key ID: 7EEE9E1E817D0A39
26 changed files with 134 additions and 199 deletions

View File

@ -13,14 +13,10 @@
from yt_dlp.compat import ( from yt_dlp.compat import (
compat_getenv, compat_getenv,
compat_setenv, compat_setenv,
compat_etree_Element,
compat_etree_fromstring, compat_etree_fromstring,
compat_expanduser, compat_expanduser,
compat_shlex_split,
compat_str, compat_str,
compat_struct_unpack, compat_struct_unpack,
compat_urllib_parse_quote,
compat_urllib_parse_quote_plus,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus, compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
@ -55,27 +51,6 @@ def test_all_present(self):
dir(yt_dlp.compat))) - set(['unicode_literals']) dir(yt_dlp.compat))) - set(['unicode_literals'])
self.assertEqual(all_names, sorted(present_names)) self.assertEqual(all_names, sorted(present_names))
def test_compat_urllib_parse_quote(self):
self.assertEqual(compat_urllib_parse_quote('abc def'), 'abc%20def')
self.assertEqual(compat_urllib_parse_quote('/user/abc+def'), '/user/abc%2Bdef')
self.assertEqual(compat_urllib_parse_quote('/user/abc+def', safe='+'), '%2Fuser%2Fabc+def')
self.assertEqual(compat_urllib_parse_quote(''), '')
self.assertEqual(compat_urllib_parse_quote('%'), '%25')
self.assertEqual(compat_urllib_parse_quote('%', safe='%'), '%')
self.assertEqual(compat_urllib_parse_quote('津波'), '%E6%B4%A5%E6%B3%A2')
self.assertEqual(
compat_urllib_parse_quote('''<meta property="og:description" content="▁▂▃▄%▅▆▇█" />
%<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''', safe='<>=":%/ \r\n'),
'''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%%E2%96%85%E2%96%86%E2%96%87%E2%96%88" />
%<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a''')
self.assertEqual(
compat_urllib_parse_quote('''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%25Things%''', safe='% '),
'''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%''')
def test_compat_urllib_parse_quote_plus(self):
self.assertEqual(compat_urllib_parse_quote_plus('abc def'), 'abc+def')
self.assertEqual(compat_urllib_parse_quote_plus('/abc def'), '%2Fabc+def')
def test_compat_urllib_parse_unquote(self): def test_compat_urllib_parse_unquote(self):
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def') self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def') self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def')
@ -109,17 +84,6 @@ def test_compat_urllib_parse_urlencode(self):
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def')
def test_compat_shlex_split(self):
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
self.assertEqual(compat_shlex_split('-option "one\ntwo" \n -flag'), ['-option', 'one\ntwo', '-flag'])
self.assertEqual(compat_shlex_split('-val 中文'), ['-val', '中文'])
def test_compat_etree_Element(self):
try:
compat_etree_Element.items
except AttributeError:
self.fail('compat_etree_Element is not a type')
def test_compat_etree_fromstring(self): def test_compat_etree_fromstring(self):
xml = ''' xml = '''
<root foo="bar" spam="中文"> <root foo="bar" spam="中文">

View File

@ -11,11 +11,12 @@
import io import io
import re import re
import string import string
import urllib.request
from test.helper import FakeYDL, is_download_test from test.helper import FakeYDL, is_download_test
from yt_dlp.extractor import YoutubeIE from yt_dlp.extractor import YoutubeIE
from yt_dlp.jsinterp import JSInterpreter from yt_dlp.jsinterp import JSInterpreter
from yt_dlp.compat import compat_str, compat_urlretrieve from yt_dlp.compat import compat_str
_SIG_TESTS = [ _SIG_TESTS = [
( (
@ -147,7 +148,7 @@ def test_func(self):
fn = os.path.join(self.TESTDATA_DIR, basename) fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn): if not os.path.exists(fn):
compat_urlretrieve(url, fn) urllib.request.urlretrieve(url, fn)
with io.open(fn, encoding='utf-8') as testf: with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read() jscode = testf.read()
self.assertEqual(sig_func(jscode, sig_input), expected_sig) self.assertEqual(sig_func(jscode, sig_input), expected_sig)

View File

@ -26,24 +26,20 @@
import traceback import traceback
import random import random
import unicodedata import unicodedata
import urllib.request
from enum import Enum from enum import Enum
from string import ascii_letters from string import ascii_letters
from .compat import ( from .compat import (
compat_basestring,
compat_brotli, compat_brotli,
compat_get_terminal_size, compat_get_terminal_size,
compat_kwargs,
compat_numeric_types,
compat_os_name, compat_os_name,
compat_pycrypto_AES, compat_pycrypto_AES,
compat_shlex_quote, compat_shlex_quote,
compat_str, compat_str,
compat_tokenize_tokenize,
compat_urllib_error, compat_urllib_error,
compat_urllib_request, compat_urllib_request,
compat_urllib_request_DataHandler,
windows_enable_vt_mode, windows_enable_vt_mode,
) )
from .cookies import load_cookies from .cookies import load_cookies
@ -682,7 +678,7 @@ def check_deprecated(param, option, suggestion):
pp_def = dict(pp_def_raw) pp_def = dict(pp_def_raw)
when = pp_def.pop('when', 'post_process') when = pp_def.pop('when', 'post_process')
self.add_post_processor( self.add_post_processor(
get_postprocessor(pp_def.pop('key'))(self, **compat_kwargs(pp_def)), get_postprocessor(pp_def.pop('key'))(self, **pp_def),
when=when) when=when)
self._setup_opener() self._setup_opener()
@ -2244,7 +2240,7 @@ def final_selector(ctx):
stream = io.BytesIO(format_spec.encode('utf-8')) stream = io.BytesIO(format_spec.encode('utf-8'))
try: try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
except tokenize.TokenError: except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
@ -2406,7 +2402,7 @@ def sanitize_string_field(info, string_field):
def sanitize_numeric_fields(info): def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS: for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field) field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types): if field is None or isinstance(field, (int, float)):
continue continue
report_force_conversion(numeric_field, 'numeric', 'int') report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field) info[numeric_field] = int_or_none(field)
@ -3589,7 +3585,7 @@ def list_subtitles(self, video_id, subtitles, name='subtitles'):
def urlopen(self, req): def urlopen(self, req):
""" Start an HTTP download """ """ Start an HTTP download """
if isinstance(req, compat_basestring): if isinstance(req, str):
req = sanitized_Request(req) req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout) return self._opener.open(req, timeout=self._socket_timeout)
@ -3739,7 +3735,7 @@ def _setup_opener(self):
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler() redirect_handler = YoutubeDLRedirectHandler()
data_handler = compat_urllib_request_DataHandler() data_handler = urllib.request.DataHandler()
# When passing our own FileHandler instance, build_opener won't add the # When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which # default FileHandler and allows us to disable the file protocol, which

View File

@ -81,10 +81,6 @@ def compat_realpath(path):
compat_realpath = os.path.realpath compat_realpath = os.path.realpath
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
try: try:
compat_Pattern = re.Pattern compat_Pattern = re.Pattern
except AttributeError: except AttributeError:
@ -173,62 +169,65 @@ def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.pytho
# Deprecated # Deprecated
compat_basestring = str compat_b64decode = base64.b64decode
compat_chr = chr compat_chr = chr
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = http.cookiejar.Cookie
compat_cookies_SimpleCookie = http.cookies.SimpleCookie
compat_get_terminal_size = shutil.get_terminal_size
compat_getenv = os.getenv
compat_getpass = getpass.getpass
compat_html_entities = html.entities
compat_html_entities_html5 = html.entities.html5
compat_HTMLParser = html.parser.HTMLParser
compat_http_client = http.client
compat_http_server = http.server
compat_HTTPError = urllib.error.HTTPError
compat_itertools_count = itertools.count
compat_parse_qs = urllib.parse.parse_qs
compat_str = str
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_tokenize_tokenize = tokenize.tokenize
compat_urllib_error = urllib.error
compat_urllib_parse_unquote = urllib.parse.unquote
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
compat_urllib_parse_urlencode = urllib.parse.urlencode
compat_urllib_parse_urlparse = urllib.parse.urlparse
compat_urllib_request = urllib.request
compat_urlparse = compat_urllib_parse = urllib.parse
# To be removed
compat_basestring = str
compat_collections_abc = collections.abc
compat_cookies = http.cookies
compat_etree_Element = etree.Element
compat_etree_register_namespace = etree.register_namespace
compat_filter = filter compat_filter = filter
compat_input = input compat_input = input
compat_integer_types = (int, ) compat_integer_types = (int, )
compat_kwargs = lambda kwargs: kwargs compat_kwargs = lambda kwargs: kwargs
compat_map = map compat_map = map
compat_numeric_types = (int, float, complex) compat_numeric_types = (int, float, complex)
compat_str = str compat_print = print
compat_shlex_split = shlex.split
compat_socket_create_connection = socket.create_connection
compat_Struct = struct.Struct
compat_subprocess_get_DEVNULL = lambda: DEVNULL
compat_urllib_parse_quote = urllib.parse.quote
compat_urllib_parse_quote_plus = urllib.parse.quote_plus
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
compat_urllib_request_DataHandler = urllib.request.DataHandler
compat_urllib_response = urllib.response
compat_urlretrieve = urllib.request.urlretrieve
compat_xml_parse_error = etree.ParseError
compat_xpath = lambda xpath: xpath compat_xpath = lambda xpath: xpath
compat_zip = zip compat_zip = zip
workaround_optparse_bug9161 = lambda: None workaround_optparse_bug9161 = lambda: None
compat_collections_abc = collections.abc
compat_HTMLParser = html.parser.HTMLParser
compat_HTTPError = urllib.error.HTTPError
compat_Struct = struct.Struct
compat_b64decode = base64.b64decode
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = compat_cookiejar.Cookie
compat_cookies = http.cookies
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
compat_etree_Element = etree.Element
compat_etree_register_namespace = etree.register_namespace
compat_get_terminal_size = shutil.get_terminal_size
compat_getenv = os.getenv
compat_getpass = getpass.getpass
compat_html_entities = html.entities
compat_html_entities_html5 = compat_html_entities.html5
compat_http_client = http.client
compat_http_server = http.server
compat_itertools_count = itertools.count
compat_parse_qs = urllib.parse.parse_qs
compat_shlex_split = shlex.split
compat_socket_create_connection = socket.create_connection
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_subprocess_get_DEVNULL = lambda: DEVNULL
compat_tokenize_tokenize = tokenize.tokenize
compat_urllib_error = urllib.error
compat_urllib_parse = urllib.parse
compat_urllib_parse_quote = urllib.parse.quote
compat_urllib_parse_quote_plus = urllib.parse.quote_plus
compat_urllib_parse_unquote = urllib.parse.unquote
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
compat_urllib_parse_urlencode = urllib.parse.urlencode
compat_urllib_parse_urlparse = urllib.parse.urlparse
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
compat_urllib_request = urllib.request
compat_urllib_request_DataHandler = urllib.request.DataHandler
compat_urllib_response = urllib.response
compat_urlparse = urllib.parse
compat_urlretrieve = urllib.request.urlretrieve
compat_xml_parse_error = etree.ParseError
# Set public objects # Set public objects

View File

@ -3,25 +3,25 @@
import time import time
import binascii import binascii
import io import io
import struct
from .fragment import FragmentFD from .fragment import FragmentFD
from ..compat import ( from ..compat import (
compat_Struct,
compat_urllib_error, compat_urllib_error,
) )
u8 = compat_Struct('>B') u8 = struct.Struct('>B')
u88 = compat_Struct('>Bx') u88 = struct.Struct('>Bx')
u16 = compat_Struct('>H') u16 = struct.Struct('>H')
u1616 = compat_Struct('>Hxx') u1616 = struct.Struct('>Hxx')
u32 = compat_Struct('>I') u32 = struct.Struct('>I')
u64 = compat_Struct('>Q') u64 = struct.Struct('>Q')
s88 = compat_Struct('>bx') s88 = struct.Struct('>bx')
s16 = compat_Struct('>h') s16 = struct.Struct('>h')
s1616 = compat_Struct('>hxx') s1616 = struct.Struct('>hxx')
s32 = compat_Struct('>i') s32 = struct.Struct('>i')
unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000) unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000)

View File

@ -5,13 +5,14 @@
import hmac import hmac
import re import re
import struct import struct
import urllib.response
import uuid
from base64 import urlsafe_b64encode from base64 import urlsafe_b64encode
from binascii import unhexlify from binascii import unhexlify
from .common import InfoExtractor from .common import InfoExtractor
from ..aes import aes_ecb_decrypt from ..aes import aes_ecb_decrypt
from ..compat import ( from ..compat import (
compat_urllib_response,
compat_urllib_parse_urlparse, compat_urllib_parse_urlparse,
compat_urllib_request, compat_urllib_request,
) )
@ -19,7 +20,6 @@
ExtractorError, ExtractorError,
decode_base, decode_base,
int_or_none, int_or_none,
random_uuidv4,
request_to_url, request_to_url,
time_seconds, time_seconds,
update_url_query, update_url_query,
@ -141,7 +141,7 @@ def abematv_license_open(self, url):
url = request_to_url(url) url = request_to_url(url)
ticket = compat_urllib_parse_urlparse(url).netloc ticket = compat_urllib_parse_urlparse(url).netloc
response_data = self._get_videokey_from_ticket(ticket) response_data = self._get_videokey_from_ticket(ticket)
return compat_urllib_response.addinfourl(io.BytesIO(response_data), headers={ return urllib.response.addinfourl(io.BytesIO(response_data), headers={
'Content-Length': len(response_data), 'Content-Length': len(response_data),
}, url=url, code=200) }, url=url, code=200)
@ -253,7 +253,7 @@ def _get_device_token(self):
if self._USERTOKEN: if self._USERTOKEN:
return self._USERTOKEN return self._USERTOKEN
self._DEVICE_ID = random_uuidv4() self._DEVICE_ID = str(uuid.uuid4())
aks = self._generate_aks(self._DEVICE_ID) aks = self._generate_aks(self._DEVICE_ID)
user_data = self._download_json( user_data = self._download_json(
'https://api.abema.io/v1/users', None, note='Authorizing', 'https://api.abema.io/v1/users', None, note='Authorizing',

View File

@ -8,7 +8,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_kwargs,
compat_urlparse, compat_urlparse,
compat_getpass compat_getpass
) )
@ -1365,7 +1364,7 @@ def _download_webpage_handle(self, *args, **kwargs):
headers.update(kwargs.get('headers', {})) headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers kwargs['headers'] = headers
return super(AdobePassIE, self)._download_webpage_handle( return super(AdobePassIE, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs)) *args, **kwargs)
@staticmethod @staticmethod
def _get_mvpd_resource(provider_id, title, guid, rating): def _get_mvpd_resource(provider_id, title, guid, rating):

View File

@ -5,7 +5,6 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_xpath
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
OnDemandPagedList, OnDemandPagedList,
@ -282,7 +281,7 @@ def _real_extract(self, url):
else: else:
raise ExtractorError('Unable to download video info') raise ExtractorError('Unable to download video info')
video_element = video_xml.findall(compat_xpath('./track/video'))[-1] video_element = video_xml.findall('./track/video')[-1]
if video_element is None or video_element.text is None: if video_element is None or video_element.text is None:
raise ExtractorError( raise ExtractorError(
'Video %s does not exist' % video_id, expected=True) 'Video %s does not exist' % video_id, expected=True)
@ -312,7 +311,7 @@ def _real_extract(self, url):
if not video_url: if not video_url:
entries = [] entries = []
file_elements = video_element.findall(compat_xpath('./file')) file_elements = video_element.findall('./file')
one = len(file_elements) == 1 one = len(file_elements) == 1
for file_num, file_element in enumerate(file_elements, start=1): for file_num, file_element in enumerate(file_elements, start=1):
file_url = url_or_none(file_element.text) file_url = url_or_none(file_element.text)

View File

@ -1,6 +1,7 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import xml.etree.ElementTree
import functools import functools
import itertools import itertools
import json import json
@ -8,7 +9,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_etree_Element,
compat_HTTPError, compat_HTTPError,
compat_str, compat_str,
compat_urllib_error, compat_urllib_error,
@ -318,7 +318,7 @@ def _get_subtitles(self, media, programme_id):
continue continue
captions = self._download_xml( captions = self._download_xml(
cc_url, programme_id, 'Downloading captions', fatal=False) cc_url, programme_id, 'Downloading captions', fatal=False)
if not isinstance(captions, compat_etree_Element): if not isinstance(captions, xml.etree.ElementTree.Element):
continue continue
subtitles['en'] = [ subtitles['en'] = [
{ {

View File

@ -4,6 +4,7 @@
import base64 import base64
import re import re
import struct import struct
import xml.etree.ElementTree
from .adobepass import AdobePassIE from .adobepass import AdobePassIE
from .common import InfoExtractor from .common import InfoExtractor
@ -12,7 +13,6 @@
compat_HTTPError, compat_HTTPError,
compat_parse_qs, compat_parse_qs,
compat_urlparse, compat_urlparse,
compat_xml_parse_error,
) )
from ..utils import ( from ..utils import (
clean_html, clean_html,
@ -166,7 +166,7 @@ def _build_brightcove_url(cls, object_str):
try: try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8')) object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error: except xml.etree.ElementTree.ParseError:
return return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars') fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')

View File

@ -3,6 +3,7 @@
import base64 import base64
import collections import collections
import xml.etree.ElementTree
import hashlib import hashlib
import itertools import itertools
import json import json
@ -17,7 +18,6 @@
from ..compat import ( from ..compat import (
compat_cookiejar_Cookie, compat_cookiejar_Cookie,
compat_cookies_SimpleCookie, compat_cookies_SimpleCookie,
compat_etree_Element,
compat_etree_fromstring, compat_etree_fromstring,
compat_expanduser, compat_expanduser,
compat_getpass, compat_getpass,
@ -30,7 +30,6 @@
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
compat_urllib_request, compat_urllib_request,
compat_urlparse, compat_urlparse,
compat_xml_parse_error,
) )
from ..downloader import FileDownloader from ..downloader import FileDownloader
from ..downloader.f4m import ( from ..downloader.f4m import (
@ -951,7 +950,7 @@ def _download_xml_handle(
fatal=True, encoding=None, data=None, headers={}, query={}, fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None): expected_status=None):
""" """
Return a tuple (xml as an compat_etree_Element, URL handle). Return a tuple (xml as an xml.etree.ElementTree.Element, URL handle).
See _download_webpage docstring for arguments specification. See _download_webpage docstring for arguments specification.
""" """
@ -972,7 +971,7 @@ def _download_xml(
transform_source=None, fatal=True, encoding=None, transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None): data=None, headers={}, query={}, expected_status=None):
""" """
Return the xml as an compat_etree_Element. Return the xml as an xml.etree.ElementTree.Element.
See _download_webpage docstring for arguments specification. See _download_webpage docstring for arguments specification.
""" """
@ -988,7 +987,7 @@ def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
xml_string = transform_source(xml_string) xml_string = transform_source(xml_string)
try: try:
return compat_etree_fromstring(xml_string.encode('utf-8')) return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve: except xml.etree.ElementTree.ParseError as ve:
errmsg = '%s: Failed to parse XML ' % video_id errmsg = '%s: Failed to parse XML ' % video_id
if fatal: if fatal:
raise ExtractorError(errmsg, cause=ve) raise ExtractorError(errmsg, cause=ve)
@ -2008,7 +2007,7 @@ def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None, def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(), transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None): fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal: if not isinstance(manifest, xml.etree.ElementTree.Element) and not fatal:
return [] return []
# currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy

View File

@ -6,13 +6,13 @@
import json import json
import zlib import zlib
import xml.etree.ElementTree
from hashlib import sha1 from hashlib import sha1
from math import pow, sqrt, floor from math import pow, sqrt, floor
from .common import InfoExtractor from .common import InfoExtractor
from .vrv import VRVBaseIE from .vrv import VRVBaseIE
from ..compat import ( from ..compat import (
compat_b64decode, compat_b64decode,
compat_etree_Element,
compat_etree_fromstring, compat_etree_fromstring,
compat_str, compat_str,
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
@ -395,7 +395,7 @@ def _get_subtitles(self, video_id, webpage):
'Downloading subtitles for ' + sub_name, data={ 'Downloading subtitles for ' + sub_name, data={
'subtitle_script_id': sub_id, 'subtitle_script_id': sub_id,
}) })
if not isinstance(sub_doc, compat_etree_Element): if not isinstance(sub_doc, xml.etree.ElementTree.Element):
continue continue
sid = sub_doc.get('id') sid = sub_doc.get('id')
iv = xpath_text(sub_doc, 'iv', 'subtitle iv') iv = xpath_text(sub_doc, 'iv', 'subtitle iv')
@ -525,7 +525,7 @@ def _real_extract(self, url):
'video_quality': stream_quality, 'video_quality': stream_quality,
'current_page': url, 'current_page': url,
}) })
if isinstance(streamdata, compat_etree_Element): if isinstance(streamdata, xml.etree.ElementTree.Element):
stream_info = streamdata.find('./{default}preload/stream_info') stream_info = streamdata.find('./{default}preload/stream_info')
if stream_info is not None: if stream_info is not None:
stream_infos.append(stream_info) stream_infos.append(stream_info)
@ -536,7 +536,7 @@ def _real_extract(self, url):
'video_format': stream_format, 'video_format': stream_format,
'video_encode_quality': stream_quality, 'video_encode_quality': stream_quality,
}) })
if isinstance(stream_info, compat_etree_Element): if isinstance(stream_info, xml.etree.ElementTree.Element):
stream_infos.append(stream_info) stream_infos.append(stream_info)
for stream_info in stream_infos: for stream_info in stream_infos:
video_encode_id = xpath_text(stream_info, './video_encode_id') video_encode_id = xpath_text(stream_info, './video_encode_id')
@ -611,7 +611,7 @@ def _real_extract(self, url):
season = episode = episode_number = duration = None season = episode = episode_number = duration = None
if isinstance(metadata, compat_etree_Element): if isinstance(metadata, xml.etree.ElementTree.Element):
season = xpath_text(metadata, 'series_title') season = xpath_text(metadata, 'series_title')
episode = xpath_text(metadata, 'episode_title') episode = xpath_text(metadata, 'episode_title')
episode_number = int_or_none(xpath_text(metadata, 'episode_number')) episode_number = int_or_none(xpath_text(metadata, 'episode_number'))

View File

@ -4,6 +4,7 @@
import os import os
import re import re
import xml.etree.ElementTree
from .common import InfoExtractor from .common import InfoExtractor
from .youtube import YoutubeIE from .youtube import YoutubeIE
@ -12,7 +13,6 @@
compat_str, compat_str,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urlparse, compat_urlparse,
compat_xml_parse_error,
) )
from ..utils import ( from ..utils import (
determine_ext, determine_ext,
@ -2827,7 +2827,7 @@ def _real_extract(self, url):
try: try:
try: try:
doc = compat_etree_fromstring(webpage) doc = compat_etree_fromstring(webpage)
except compat_xml_parse_error: except xml.etree.ElementTree.ParseError:
doc = compat_etree_fromstring(webpage.encode('utf-8')) doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss': if doc.tag == 'rss':
self.report_detected('RSS feed') self.report_detected('RSS feed')
@ -2862,7 +2862,7 @@ def _real_extract(self, url):
self.report_detected('F4M manifest') self.report_detected('F4M manifest')
self._sort_formats(info_dict['formats']) self._sort_formats(info_dict['formats'])
return info_dict return info_dict
except compat_xml_parse_error: except xml.etree.ElementTree.ParseError:
pass pass
# Is it a Camtasia project? # Is it a Camtasia project?

View File

@ -3,9 +3,6 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import (
compat_xpath,
)
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
parse_duration, parse_duration,
@ -70,9 +67,9 @@ def _real_extract(self, url):
formats = [] formats = []
for sources in settings.findall(compat_xpath('.//MediaSources')): for sources in settings.findall('.//MediaSources'):
sources_type = sources.get('videoType') sources_type = sources.get('videoType')
for source in sources.findall(compat_xpath('./MediaSource')): for source in sources.findall('./MediaSource'):
video_url = source.text video_url = source.text
if not video_url or not video_url.startswith('http'): if not video_url or not video_url.startswith('http'):
continue continue
@ -101,7 +98,7 @@ def _real_extract(self, url):
self._sort_formats(formats) self._sort_formats(formats)
subtitles = {} subtitles = {}
for source in settings.findall(compat_xpath('.//MarkerResourceSource')): for source in settings.findall('.//MarkerResourceSource'):
subtitle_url = source.text subtitle_url = source.text
if not subtitle_url: if not subtitle_url:
continue continue

View File

@ -3,6 +3,7 @@
import functools import functools
import json import json
import uuid
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -11,7 +12,6 @@
ExtractorError, ExtractorError,
float_or_none, float_or_none,
OnDemandPagedList, OnDemandPagedList,
random_uuidv4,
traverse_obj, traverse_obj,
) )
@ -21,7 +21,7 @@ class MildomBaseIE(InfoExtractor):
def _call_api(self, url, video_id, query=None, note='Downloading JSON metadata', body=None): def _call_api(self, url, video_id, query=None, note='Downloading JSON metadata', body=None):
if not self._GUEST_ID: if not self._GUEST_ID:
self._GUEST_ID = f'pc-gp-{random_uuidv4()}' self._GUEST_ID = f'pc-gp-{str(uuid.uuid4())}'
content = self._download_json( content = self._download_json(
url, video_id, note=note, data=json.dumps(body).encode() if body else None, url, video_id, note=note, data=json.dumps(body).encode() if body else None,

View File

@ -9,7 +9,6 @@
compat_ord, compat_ord,
compat_str, compat_str,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_zip
) )
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
@ -76,7 +75,7 @@ def _decrypt_xor_cipher(key, ciphertext):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR.""" """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([ return ''.join([
compat_chr(compat_ord(ch) ^ compat_ord(k)) compat_chr(compat_ord(ch) ^ compat_ord(k))
for ch, k in compat_zip(ciphertext, itertools.cycle(key))]) for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url): def _real_extract(self, url):
username, slug = self._match_valid_url(url).groups() username, slug = self._match_valid_url(url).groups()

View File

@ -6,7 +6,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_str, compat_str,
compat_xpath,
) )
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
@ -167,9 +166,9 @@ def _get_video_info(self, itemdoc, use_hls=True):
itemdoc, './/{http://search.yahoo.com/mrss/}category', itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:video_title') 'scheme', 'urn:mtvn:video_title')
if title_el is None: if title_el is None:
title_el = itemdoc.find(compat_xpath('.//{http://search.yahoo.com/mrss/}title')) title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
if title_el is None: if title_el is None:
title_el = itemdoc.find(compat_xpath('.//title')) title_el = itemdoc.find('.//title')
if title_el.text is None: if title_el.text is None:
title_el = None title_el = None

View File

@ -4,7 +4,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_xpath,
) )
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
@ -50,7 +49,7 @@ def _real_extract(self, url):
duration = int_or_none(xpath_text( duration = int_or_none(xpath_text(
doc, './/article/movie/file/duration')) doc, './/article/movie/file/duration'))
formats = [] formats = []
for qnode in doc.findall(compat_xpath('.//article/movie/file/qualities/qual')): for qnode in doc.findall('.//article/movie/file/qualities/qual'):
http_url_ele = find_xpath_attr( http_url_ele = find_xpath_attr(
qnode, './html_urls/video_url', 'format', 'video/mp4') qnode, './html_urls/video_url', 'format', 'video/mp4')
http_url = http_url_ele.text if http_url_ele is not None else None http_url = http_url_ele.text if http_url_ele is not None else None

View File

@ -8,7 +8,6 @@
from ..compat import ( from ..compat import (
compat_urlparse, compat_urlparse,
compat_kwargs,
) )
from ..utils import ( from ..utils import (
check_executable, check_executable,
@ -158,7 +157,7 @@ def _load_cookies(self):
cookie['rest'] = {'httpOnly': None} cookie['rest'] = {'httpOnly': None}
if 'expiry' in cookie: if 'expiry' in cookie:
cookie['expire_time'] = cookie['expiry'] cookie['expire_time'] = cookie['expiry']
self.extractor._set_cookie(**compat_kwargs(cookie)) self.extractor._set_cookie(**cookie)
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'): def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'):
""" """

View File

@ -12,7 +12,6 @@
) )
from ..compat import ( from ..compat import (
compat_HTTPError, compat_HTTPError,
compat_kwargs,
compat_str, compat_str,
) )
from ..utils import ( from ..utils import (
@ -96,7 +95,7 @@ def _download_json(self, *args, **kwargs):
query['client_id'] = self._CLIENT_ID query['client_id'] = self._CLIENT_ID
kwargs['query'] = query kwargs['query'] = query
try: try:
return super()._download_json(*args, **compat_kwargs(kwargs)) return super()._download_json(*args, **kwargs)
except ExtractorError as e: except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403): if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403):
self._store_client_id(None) self._store_client_id(None)

View File

@ -5,7 +5,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_HTTPError, compat_HTTPError,
compat_kwargs,
compat_str, compat_str,
compat_urllib_request, compat_urllib_request,
compat_urlparse, compat_urlparse,
@ -132,7 +131,7 @@ def _download_webpage_handle(self, *args, **kwargs):
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36' headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'
kwargs['headers'] = headers kwargs['headers'] = headers
ret = super(UdemyIE, self)._download_webpage_handle( ret = super(UdemyIE, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs)) *args, **kwargs)
if not ret: if not ret:
return ret return ret
webpage, _ = ret webpage, _ = ret

View File

@ -8,7 +8,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_kwargs,
compat_HTTPError, compat_HTTPError,
compat_str, compat_str,
compat_urlparse, compat_urlparse,
@ -109,7 +108,7 @@ def _extract_xsrft_and_vuid(self, webpage):
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs): def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
vimeo_config = self._search_regex( vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
webpage, 'vimeo config', *args, **compat_kwargs(kwargs)) webpage, 'vimeo config', *args, **kwargs)
if vimeo_config: if vimeo_config:
return self._parse_json(vimeo_config, video_id) return self._parse_json(vimeo_config, video_id)

View File

@ -3,14 +3,13 @@
import os.path import os.path
import optparse import optparse
import re import re
import shlex
import sys import sys
from .compat import ( from .compat import (
compat_expanduser, compat_expanduser,
compat_get_terminal_size, compat_get_terminal_size,
compat_getenv, compat_getenv,
compat_kwargs,
compat_shlex_split,
) )
from .utils import ( from .utils import (
Config, Config,
@ -223,14 +222,12 @@ def _dict_from_options_callback(
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string fmt.format_option_strings = _format_option_string
kw = { parser = _YoutubeDLOptionParser(
'version': __version__, version=__version__,
'formatter': fmt, formatter=fmt,
'usage': '%prog [OPTIONS] URL [URL...]', usage='%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve', conflict_handler='resolve'
} )
parser = _YoutubeDLOptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options') general = optparse.OptionGroup(parser, 'General Options')
general.add_option( general.add_option(
@ -833,7 +830,7 @@ def _dict_from_options_callback(
callback_kwargs={ callback_kwargs={
'allowed_keys': r'ffmpeg_[io]\d*|%s' % '|'.join(map(re.escape, list_external_downloaders())), 'allowed_keys': r'ffmpeg_[io]\d*|%s' % '|'.join(map(re.escape, list_external_downloaders())),
'default_key': 'default', 'default_key': 'default',
'process': compat_shlex_split 'process': shlex.split
}, help=( }, help=(
'Give these arguments to the external downloader. ' 'Give these arguments to the external downloader. '
'Specify the downloader name and the arguments separated by a colon ":". ' 'Specify the downloader name and the arguments separated by a colon ":". '
@ -1339,7 +1336,7 @@ def _dict_from_options_callback(
callback_kwargs={ callback_kwargs={
'allowed_keys': r'\w+(?:\+\w+)?', 'allowed_keys': r'\w+(?:\+\w+)?',
'default_key': 'default-compat', 'default_key': 'default-compat',
'process': compat_shlex_split, 'process': shlex.split,
'multiple_keys': False 'multiple_keys': False
}, help=( }, help=(
'Give these arguments to the postprocessors. ' 'Give these arguments to the postprocessors. '

View File

@ -1,9 +1,9 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import os import os
import shlex
import subprocess import subprocess
from .common import PostProcessor from .common import PostProcessor
from ..compat import compat_shlex_split
from ..utils import ( from ..utils import (
check_executable, check_executable,
cli_option, cli_option,
@ -79,7 +79,7 @@ def run(self, information):
if not self.cutout: if not self.cutout:
cmd += ['-chapter'] cmd += ['-chapter']
cmd += cli_option(self._downloader.params, '-proxy', 'proxy') cmd += cli_option(self._downloader.params, '-proxy', 'proxy')
cmd += compat_shlex_split(self.args) # For backward compatibility cmd += shlex.split(self.args) # For backward compatibility
cmd += self._configuration_args(self._exe_name, use_compat=False) cmd += self._configuration_args(self._exe_name, use_compat=False)
cmd += ['--', information['id'], filename, temp_filename] cmd += ['--', information['id'], filename, temp_filename]
cmd = [encodeArgument(i) for i in cmd] cmd = [encodeArgument(i) for i in cmd]

View File

@ -41,12 +41,13 @@
import xml.etree.ElementTree import xml.etree.ElementTree
import zlib import zlib
import mimetypes import mimetypes
import urllib.parse
import shlex
from .compat import ( from .compat import (
compat_HTMLParseError, compat_HTMLParseError,
compat_HTMLParser, compat_HTMLParser,
compat_HTTPError, compat_HTTPError,
compat_basestring,
compat_brotli, compat_brotli,
compat_chr, compat_chr,
compat_cookiejar, compat_cookiejar,
@ -55,28 +56,19 @@
compat_html_entities, compat_html_entities,
compat_html_entities_html5, compat_html_entities_html5,
compat_http_client, compat_http_client,
compat_integer_types,
compat_numeric_types,
compat_kwargs,
compat_os_name, compat_os_name,
compat_parse_qs, compat_parse_qs,
compat_shlex_split,
compat_shlex_quote, compat_shlex_quote,
compat_str, compat_str,
compat_struct_pack, compat_struct_pack,
compat_struct_unpack, compat_struct_unpack,
compat_urllib_error, compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse, compat_urllib_parse_urlparse,
compat_urllib_parse_urlunparse,
compat_urllib_parse_quote,
compat_urllib_parse_quote_plus,
compat_urllib_parse_unquote_plus, compat_urllib_parse_unquote_plus,
compat_urllib_request, compat_urllib_request,
compat_urlparse, compat_urlparse,
compat_websockets, compat_websockets,
compat_xpath,
) )
from .socks import ( from .socks import (
@ -340,7 +332,7 @@ def xpath_with_ns(path, ns_map):
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath): def _find_xpath(xpath):
return node.find(compat_xpath(xpath)) return node.find(xpath)
if isinstance(xpath, (str, compat_str)): if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath) n = _find_xpath(xpath)
@ -1193,7 +1185,7 @@ class XAttrUnavailableError(YoutubeDLError):
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
hc = http_class(*args, **compat_kwargs(kwargs)) hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address') source_address = ydl_handler._params.get('source_address')
if source_address is not None: if source_address is not None:
@ -2401,7 +2393,7 @@ def str_or_none(v, default=None):
def str_to_int(int_str): def str_to_int(int_str):
""" A more relaxed version of int_or_none """ """ A more relaxed version of int_or_none """
if isinstance(int_str, compat_integer_types): if isinstance(int_str, int):
return int_str return int_str
elif isinstance(int_str, compat_str): elif isinstance(int_str, compat_str):
int_str = re.sub(r'[,\.\+]', '', int_str) int_str = re.sub(r'[,\.\+]', '', int_str)
@ -2442,7 +2434,7 @@ def request_to_url(req):
def strftime_or_none(timestamp, date_format, default=None): def strftime_or_none(timestamp, date_format, default=None):
datetime_object = None datetime_object = None
try: try:
if isinstance(timestamp, compat_numeric_types): # unix timestamp if isinstance(timestamp, (int, float)): # unix timestamp
datetime_object = datetime.datetime.utcfromtimestamp(timestamp) datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
elif isinstance(timestamp, compat_str): # assume YYYYMMDD elif isinstance(timestamp, compat_str): # assume YYYYMMDD
datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d') datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
@ -2452,7 +2444,7 @@ def strftime_or_none(timestamp, date_format, default=None):
def parse_duration(s): def parse_duration(s):
if not isinstance(s, compat_basestring): if not isinstance(s, str):
return None return None
s = s.strip() s = s.strip()
if not s: if not s:
@ -2789,7 +2781,7 @@ def lowercase_escape(s):
def escape_rfc3986(s): def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986""" """Escape non-ASCII characters as suggested by RFC 3986"""
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url): def escape_url(url):
@ -2975,7 +2967,7 @@ def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
def parse_age_limit(s): def parse_age_limit(s):
if type(s) == int: if type(s) == int:
return s if 0 <= s <= 21 else None return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring): if not isinstance(s, str):
return None return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s) m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m: if m:
@ -3405,7 +3397,7 @@ def _match_one(filter_part, dct, incomplete):
comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote']) comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
actual_value = dct.get(m['key']) actual_value = dct.get(m['key'])
numeric_comparison = None numeric_comparison = None
if isinstance(actual_value, compat_numeric_types): if isinstance(actual_value, (int, float)):
# If the original field is a string and matching comparisonvalue is # If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field # a number we should respect the origin of the original field
# and process comparison value as a string (see # and process comparison value as a string (see
@ -4859,9 +4851,9 @@ def iri_to_uri(iri):
net_location = '' net_location = ''
if iri_parts.username: if iri_parts.username:
net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~") net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
if iri_parts.password is not None: if iri_parts.password is not None:
net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~") net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
net_location += '@' net_location += '@'
net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames. net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
@ -4869,19 +4861,19 @@ def iri_to_uri(iri):
if iri_parts.port is not None and iri_parts.port != 80: if iri_parts.port is not None and iri_parts.port != 80:
net_location += ':' + str(iri_parts.port) net_location += ':' + str(iri_parts.port)
return compat_urllib_parse_urlunparse( return urllib.parse.urlunparse(
(iri_parts.scheme, (iri_parts.scheme,
net_location, net_location,
compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"), urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
# Unsure about the `safe` argument, since this is a legacy way of handling parameters. # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"), urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
# Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component. # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"), urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~"))) urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
# Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes. # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
@ -5233,7 +5225,7 @@ def read_file(filename, default=[]):
try: try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read() contents = optionf.read()
res = compat_shlex_split(contents, comments=True) res = shlex.split(contents, comments=True)
finally: finally:
optionf.close() optionf.close()
return res return res

View File

@ -15,7 +15,6 @@
import io import io
from .utils import int_or_none, timetuple_from_msec from .utils import int_or_none, timetuple_from_msec
from .compat import ( from .compat import (
compat_str as str,
compat_Pattern, compat_Pattern,
compat_Match, compat_Match,
) )