mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-23 00:52:01 +00:00
[extractor] Add write_debug
and get_param
This commit is contained in:
parent
681de68e9d
commit
a06916d98e
@ -1332,6 +1332,7 @@ #### Not recommended
|
||||
--list-formats-as-table --compat-options -list-formats [Default] (Alias: --no-list-formats-old)
|
||||
--sponskrub-args ARGS --ppa "sponskrub:ARGS"
|
||||
--test Used by developers for testing extractors. Not intended for the end user
|
||||
--youtube-print-sig-code Used for testing youtube signatures
|
||||
|
||||
|
||||
#### Old aliases
|
||||
@ -1362,7 +1363,6 @@ #### No longer supported
|
||||
--no-call-home Default
|
||||
--include-ads No longer supported
|
||||
--no-include-ads Default
|
||||
--youtube-print-sig-code No longer supported
|
||||
|
||||
#### Removed
|
||||
These options were deprecated since 2014 and have now been entirely removed
|
||||
|
@ -48,7 +48,6 @@
|
||||
date_from_str,
|
||||
DateRange,
|
||||
DEFAULT_OUTTMPL,
|
||||
OUTTMPL_TYPES,
|
||||
determine_ext,
|
||||
determine_protocol,
|
||||
DOT_DESKTOP_LINK_TEMPLATE,
|
||||
@ -57,8 +56,8 @@
|
||||
DownloadError,
|
||||
encode_compat_str,
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
EntryNotInPlaylist,
|
||||
error_to_compat_str,
|
||||
ExistingVideoReached,
|
||||
expand_path,
|
||||
ExtractorError,
|
||||
@ -77,6 +76,7 @@
|
||||
MaxDownloadsReached,
|
||||
network_exceptions,
|
||||
orderedSet,
|
||||
OUTTMPL_TYPES,
|
||||
PagedList,
|
||||
parse_filesize,
|
||||
PerRequestProxyHandler,
|
||||
@ -84,11 +84,12 @@
|
||||
PostProcessingError,
|
||||
preferredencoding,
|
||||
prepend_extension,
|
||||
process_communicate_or_kill,
|
||||
random_uuidv4,
|
||||
register_socks_protocols,
|
||||
RejectedVideoReached,
|
||||
render_table,
|
||||
replace_extension,
|
||||
RejectedVideoReached,
|
||||
SameFileError,
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
@ -109,7 +110,6 @@
|
||||
YoutubeDLCookieProcessor,
|
||||
YoutubeDLHandler,
|
||||
YoutubeDLRedirectHandler,
|
||||
process_communicate_or_kill,
|
||||
)
|
||||
from .cache import Cache
|
||||
from .extractor import (
|
||||
|
@ -1414,7 +1414,7 @@ def extract_redirect_url(html, url=None, fatal=False):
|
||||
authn_token = None
|
||||
if not authn_token:
|
||||
# TODO add support for other TV Providers
|
||||
mso_id = self._downloader.params.get('ap_mso')
|
||||
mso_id = self.get_param('ap_mso')
|
||||
if not mso_id:
|
||||
raise_mvpd_required()
|
||||
username, password = self._get_login_info('ap_username', 'ap_password', mso_id)
|
||||
|
@ -323,7 +323,7 @@ def _real_extract(self, url):
|
||||
'url': file_url,
|
||||
'format_id': 'http',
|
||||
}]
|
||||
if not formats and not self._downloader.params.get('ignore_no_formats'):
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
file_info = common_entry.copy()
|
||||
|
@ -1271,7 +1271,7 @@ def extract_all(pattern):
|
||||
entries = []
|
||||
for num, media_meta in enumerate(medias, start=1):
|
||||
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
|
||||
if not formats and not self._downloader.params.get('ignore_no_formats'):
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
|
||||
|
@ -153,7 +153,7 @@ def _real_extract(self, url):
|
||||
# Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
|
||||
# If the video has no page argument, check to see if it's an anthology
|
||||
if page_id is None:
|
||||
if not self._downloader.params.get('noplaylist'):
|
||||
if not self.get_param('noplaylist'):
|
||||
r = self._extract_anthology_entries(bv_id, video_id, webpage)
|
||||
if r is not None:
|
||||
self.to_screen('Downloading anthology %s - add --no-playlist to just download video' % video_id)
|
||||
@ -299,7 +299,7 @@ def _real_extract(self, url):
|
||||
'tags': tags,
|
||||
'raw_tags': raw_tags,
|
||||
}
|
||||
if self._downloader.params.get('getcomments', False):
|
||||
if self.get_param('getcomments', False):
|
||||
def get_comments():
|
||||
comments = self._get_all_comment_pages(video_id)
|
||||
return {
|
||||
|
@ -478,7 +478,7 @@ def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
||||
container = source.get('container')
|
||||
ext = mimetype2ext(source.get('type'))
|
||||
src = source.get('src')
|
||||
skip_unplayable = not self._downloader.params.get('allow_unplayable_formats')
|
||||
skip_unplayable = not self.get_param('allow_unplayable_formats')
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if skip_unplayable and (container == 'WVM' or source.get('key_systems')):
|
||||
num_drm_sources += 1
|
||||
@ -547,7 +547,7 @@ def build_format_id(kind):
|
||||
error = errors[0]
|
||||
self.raise_no_formats(
|
||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||
elif (not self._downloader.params.get('allow_unplayable_formats')
|
||||
elif (not self.get_param('allow_unplayable_formats')
|
||||
and sources and num_drm_sources == len(sources)):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
|
@ -147,7 +147,7 @@ def _real_extract(self, url):
|
||||
is_live = item.get('type') == 'LIVE'
|
||||
formats = []
|
||||
for format_id, stream_url in item.get('streamUrls', {}).items():
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and 'drmOnly=true' in stream_url):
|
||||
continue
|
||||
if 'playerType=flash' in stream_url:
|
||||
|
@ -491,7 +491,7 @@ def _initialize_geo_bypass(self, geo_bypass_context):
|
||||
if not self._x_forwarded_for_ip:
|
||||
|
||||
# Geo bypass mechanism is explicitly disabled by user
|
||||
if not self._downloader.params.get('geo_bypass', True):
|
||||
if not self.get_param('geo_bypass', True):
|
||||
return
|
||||
|
||||
if not geo_bypass_context:
|
||||
@ -513,7 +513,7 @@ def _initialize_geo_bypass(self, geo_bypass_context):
|
||||
|
||||
# Explicit IP block specified by user, use it right away
|
||||
# regardless of whether extractor is geo bypassable or not
|
||||
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
|
||||
ip_block = self.get_param('geo_bypass_ip_block', None)
|
||||
|
||||
# Otherwise use random IP block from geo bypass context but only
|
||||
# if extractor is known as geo bypassable
|
||||
@ -532,7 +532,7 @@ def _initialize_geo_bypass(self, geo_bypass_context):
|
||||
|
||||
# Explicit country code specified by user, use it right away
|
||||
# regardless of whether extractor is geo bypassable or not
|
||||
country = self._downloader.params.get('geo_bypass_country', None)
|
||||
country = self.get_param('geo_bypass_country', None)
|
||||
|
||||
# Otherwise use random country code from geo bypass context but
|
||||
# only if extractor is known as geo bypassable
|
||||
@ -552,12 +552,13 @@ def extract(self, url):
|
||||
for _ in range(2):
|
||||
try:
|
||||
self.initialize()
|
||||
self.write_debug('Extracting URL: %s' % url)
|
||||
ie_result = self._real_extract(url)
|
||||
if self._x_forwarded_for_ip:
|
||||
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
|
||||
subtitles = ie_result.get('subtitles')
|
||||
if (subtitles and 'live_chat' in subtitles
|
||||
and 'no-live-chat' in self._downloader.params.get('compat_opts', [])):
|
||||
and 'no-live-chat' in self.get_param('compat_opts', [])):
|
||||
del subtitles['live_chat']
|
||||
return ie_result
|
||||
except GeoRestrictedError as e:
|
||||
@ -572,9 +573,9 @@ def extract(self, url):
|
||||
raise ExtractorError('An extractor error has occurred.', cause=e)
|
||||
|
||||
def __maybe_fake_ip_and_retry(self, countries):
|
||||
if (not self._downloader.params.get('geo_bypass_country', None)
|
||||
if (not self.get_param('geo_bypass_country', None)
|
||||
and self._GEO_BYPASS
|
||||
and self._downloader.params.get('geo_bypass', True)
|
||||
and self.get_param('geo_bypass', True)
|
||||
and not self._x_forwarded_for_ip
|
||||
and countries):
|
||||
country_code = random.choice(countries)
|
||||
@ -628,7 +629,7 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa
|
||||
See _download_webpage docstring for arguments specification.
|
||||
"""
|
||||
if not self._downloader._first_webpage_request:
|
||||
sleep_interval = float_or_none(self._downloader.params.get('sleep_interval_requests')) or 0
|
||||
sleep_interval = float_or_none(self.get_param('sleep_interval_requests')) or 0
|
||||
if sleep_interval > 0:
|
||||
self.to_screen('Sleeping %s seconds ...' % sleep_interval)
|
||||
time.sleep(sleep_interval)
|
||||
@ -753,11 +754,11 @@ def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errno
|
||||
webpage_bytes = prefix + webpage_bytes
|
||||
if not encoding:
|
||||
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
|
||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||
if self.get_param('dump_intermediate_pages', False):
|
||||
self.to_screen('Dumping request to ' + urlh.geturl())
|
||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||
self._downloader.to_screen(dump)
|
||||
if self._downloader.params.get('write_pages', False):
|
||||
if self.get_param('write_pages', False):
|
||||
basen = '%s_%s' % (video_id, urlh.geturl())
|
||||
if len(basen) > 240:
|
||||
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
|
||||
@ -941,14 +942,22 @@ def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
|
||||
else:
|
||||
self.report_warning(errmsg + str(ve))
|
||||
|
||||
def report_warning(self, msg, video_id=None):
|
||||
def report_warning(self, msg, video_id=None, *args, **kwargs):
|
||||
idstr = '' if video_id is None else '%s: ' % video_id
|
||||
self._downloader.report_warning(
|
||||
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
|
||||
'[%s] %s%s' % (self.IE_NAME, idstr, msg), *args, **kwargs)
|
||||
|
||||
def to_screen(self, msg):
|
||||
def to_screen(self, msg, *args, **kwargs):
|
||||
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
||||
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
|
||||
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
|
||||
|
||||
def write_debug(self, msg, *args, **kwargs):
|
||||
self._downloader.write_debug('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
|
||||
|
||||
def get_param(self, name, default=None, *args, **kwargs):
|
||||
if self._downloader:
|
||||
return self._downloader.params.get(name, default, *args, **kwargs)
|
||||
return default
|
||||
|
||||
def report_extraction(self, id_or_name):
|
||||
"""Report information extraction."""
|
||||
@ -968,7 +977,7 @@ def report_login(self):
|
||||
|
||||
def raise_login_required(
|
||||
self, msg='This video is only available for registered users', metadata_available=False):
|
||||
if metadata_available and self._downloader.params.get('ignore_no_formats_error'):
|
||||
if metadata_available and self.get_param('ignore_no_formats_error'):
|
||||
self.report_warning(msg)
|
||||
raise ExtractorError(
|
||||
'%s. Use --cookies, --username and --password or --netrc to provide account credentials' % msg,
|
||||
@ -977,13 +986,13 @@ def raise_login_required(
|
||||
def raise_geo_restricted(
|
||||
self, msg='This video is not available from your location due to geo restriction',
|
||||
countries=None, metadata_available=False):
|
||||
if metadata_available and self._downloader.params.get('ignore_no_formats_error'):
|
||||
if metadata_available and self.get_param('ignore_no_formats_error'):
|
||||
self.report_warning(msg)
|
||||
else:
|
||||
raise GeoRestrictedError(msg, countries=countries)
|
||||
|
||||
def raise_no_formats(self, msg, expected=False, video_id=None):
|
||||
if expected and self._downloader.params.get('ignore_no_formats_error'):
|
||||
if expected and self.get_param('ignore_no_formats_error'):
|
||||
self.report_warning(msg, video_id)
|
||||
else:
|
||||
raise ExtractorError(msg, expected=expected, video_id=video_id)
|
||||
@ -1038,7 +1047,7 @@ def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, f
|
||||
if mobj:
|
||||
break
|
||||
|
||||
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
|
||||
if not self.get_param('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
|
||||
_name = '\033[0;34m%s\033[0m' % name
|
||||
else:
|
||||
_name = name
|
||||
@ -1072,7 +1081,7 @@ def _get_netrc_login_info(self, netrc_machine=None):
|
||||
password = None
|
||||
netrc_machine = netrc_machine or self._NETRC_MACHINE
|
||||
|
||||
if self._downloader.params.get('usenetrc', False):
|
||||
if self.get_param('usenetrc', False):
|
||||
try:
|
||||
info = netrc.netrc().authenticators(netrc_machine)
|
||||
if info is not None:
|
||||
@ -1096,15 +1105,11 @@ def _get_login_info(self, username_option='username', password_option='password'
|
||||
value.
|
||||
If there's no info available, return (None, None)
|
||||
"""
|
||||
if self._downloader is None:
|
||||
return (None, None)
|
||||
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
# Attempt to use provided username and password or .netrc data
|
||||
if downloader_params.get(username_option) is not None:
|
||||
username = downloader_params[username_option]
|
||||
password = downloader_params[password_option]
|
||||
username = self.get_param(username_option)
|
||||
if username is not None:
|
||||
password = self.get_param(password_option)
|
||||
else:
|
||||
username, password = self._get_netrc_login_info(netrc_machine)
|
||||
|
||||
@ -1117,12 +1122,10 @@ def _get_tfa_info(self, note='two-factor verification code'):
|
||||
currently just uses the command line option
|
||||
If there's no info available, return None
|
||||
"""
|
||||
if self._downloader is None:
|
||||
return None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
if downloader_params.get('twofactor') is not None:
|
||||
return downloader_params['twofactor']
|
||||
tfa = self.get_param('twofactor')
|
||||
if tfa is not None:
|
||||
return tfa
|
||||
|
||||
return compat_getpass('Type %s and press [Return]: ' % note)
|
||||
|
||||
@ -1683,12 +1686,12 @@ def calculate_preference(self, format):
|
||||
|
||||
def _sort_formats(self, formats, field_preference=[]):
|
||||
if not formats:
|
||||
if self._downloader.params.get('ignore_no_formats_error'):
|
||||
if self.get_param('ignore_no_formats_error'):
|
||||
return
|
||||
raise ExtractorError('No video formats found')
|
||||
format_sort = self.FormatSort() # params and to_screen are taken from the downloader
|
||||
format_sort.evaluate_params(self._downloader.params, field_preference)
|
||||
if self._downloader.params.get('verbose', False):
|
||||
if self.get_param('verbose', False):
|
||||
format_sort.print_verbose_info(self._downloader.write_debug)
|
||||
formats.sort(key=lambda f: format_sort.calculate_preference(f))
|
||||
|
||||
@ -1728,7 +1731,7 @@ def http_scheme(self):
|
||||
""" Either "http:" or "https:", depending on the user's preferences """
|
||||
return (
|
||||
'http:'
|
||||
if self._downloader.params.get('prefer_insecure', False)
|
||||
if self.get_param('prefer_insecure', False)
|
||||
else 'https:')
|
||||
|
||||
def _proto_relative_url(self, url, scheme=None):
|
||||
@ -1922,7 +1925,7 @@ def _parse_m3u8_formats_and_subtitles(
|
||||
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
|
||||
return [], {}
|
||||
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc)): # Apple FairPlay
|
||||
return [], {}
|
||||
|
||||
@ -1935,7 +1938,7 @@ def _parse_m3u8_formats_and_subtitles(
|
||||
if re.match(r'^https?://', u)
|
||||
else compat_urlparse.urljoin(m3u8_url, u))
|
||||
|
||||
split_discontinuity = self._downloader.params.get('hls_split_discontinuity', False)
|
||||
split_discontinuity = self.get_param('hls_split_discontinuity', False)
|
||||
|
||||
# References:
|
||||
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
|
||||
@ -2478,7 +2481,7 @@ def _parse_mpd_formats_and_subtitles(
|
||||
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
|
||||
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
|
||||
"""
|
||||
if not self._downloader.params.get('dynamic_mpd', True):
|
||||
if not self.get_param('dynamic_mpd', True):
|
||||
if mpd_doc.get('type') == 'dynamic':
|
||||
return [], {}
|
||||
|
||||
@ -2548,7 +2551,7 @@ def extract_Initialization(source):
|
||||
extract_Initialization(segment_template)
|
||||
return ms_info
|
||||
|
||||
skip_unplayable = not self._downloader.params.get('allow_unplayable_formats')
|
||||
skip_unplayable = not self.get_param('allow_unplayable_formats')
|
||||
|
||||
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
|
||||
formats = []
|
||||
@ -2797,7 +2800,7 @@ def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
|
||||
"""
|
||||
if ism_doc.get('IsLive') == 'TRUE':
|
||||
return [], {}
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and ism_doc.find('Protection') is not None):
|
||||
return [], {}
|
||||
|
||||
@ -3402,8 +3405,8 @@ def is_suitable(self, age_limit):
|
||||
return not any_restricted
|
||||
|
||||
def extract_subtitles(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('writesubtitles', False)
|
||||
or self._downloader.params.get('listsubtitles')):
|
||||
if (self.get_param('writesubtitles', False)
|
||||
or self.get_param('listsubtitles')):
|
||||
return self._get_subtitles(*args, **kwargs)
|
||||
return {}
|
||||
|
||||
@ -3438,8 +3441,8 @@ def _merge_subtitles(cls, *dicts, **kwargs):
|
||||
return target
|
||||
|
||||
def extract_automatic_captions(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('writeautomaticsub', False)
|
||||
or self._downloader.params.get('listsubtitles')):
|
||||
if (self.get_param('writeautomaticsub', False)
|
||||
or self.get_param('listsubtitles')):
|
||||
return self._get_automatic_captions(*args, **kwargs)
|
||||
return {}
|
||||
|
||||
@ -3447,9 +3450,9 @@ def _get_automatic_captions(self, *args, **kwargs):
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
def mark_watched(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('mark_watched', False)
|
||||
if (self.get_param('mark_watched', False)
|
||||
and (self._get_login_info()[0] is not None
|
||||
or self._downloader.params.get('cookiefile') is not None)):
|
||||
or self.get_param('cookiefile') is not None)):
|
||||
self._mark_watched(*args, **kwargs)
|
||||
|
||||
def _mark_watched(self, *args, **kwargs):
|
||||
@ -3457,7 +3460,7 @@ def _mark_watched(self, *args, **kwargs):
|
||||
|
||||
def geo_verification_headers(self):
|
||||
headers = {}
|
||||
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
|
||||
geo_verification_proxy = self.get_param('geo_verification_proxy')
|
||||
if geo_verification_proxy:
|
||||
headers['Ytdl-request-proxy'] = geo_verification_proxy
|
||||
return headers
|
||||
|
@ -26,7 +26,7 @@ def _real_extract(self, url):
|
||||
'That doesn\'t make any sense. '
|
||||
'Simply remove the parameter in your command or configuration.'
|
||||
) % url
|
||||
if not self._downloader.params.get('verbose'):
|
||||
if not self.get_param('verbose'):
|
||||
msg += ' Add -v to the command line to see what arguments and configuration yt-dlp has'
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
|
@ -81,7 +81,7 @@ def _download_json(self, url, *args, **kwargs):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
geo_bypass_country = self._downloader.params.get('geo_bypass_country', None)
|
||||
geo_bypass_country = self.get_param('geo_bypass_country', None)
|
||||
countries = orderedSet((geo_bypass_country, 'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI', ''))
|
||||
num_countries, num = len(countries) - 1, 0
|
||||
|
||||
@ -128,8 +128,8 @@ def _real_extract(self, url):
|
||||
if isinstance(media.get('MediaURLs'), list):
|
||||
break
|
||||
|
||||
ignore_no_formats = self._downloader.params.get('ignore_no_formats_error')
|
||||
allow_unplayable_formats = self._downloader.params.get('allow_unplayable_formats')
|
||||
ignore_no_formats = self.get_param('ignore_no_formats_error')
|
||||
allow_unplayable_formats = self.get_param('allow_unplayable_formats')
|
||||
|
||||
if not media or (not media.get('MediaURLs') and not ignore_no_formats):
|
||||
raise ExtractorError(
|
||||
|
@ -42,7 +42,7 @@ def _set_dailymotion_cookie(self, name, value):
|
||||
def _real_initialize(self):
|
||||
cookies = self._get_dailymotion_cookies()
|
||||
ff = self._get_cookie_value(cookies, 'ff')
|
||||
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self._downloader.params.get('age_limit'))
|
||||
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit'))
|
||||
self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off')
|
||||
|
||||
def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
||||
@ -207,14 +207,14 @@ def _real_extract(self, url):
|
||||
video_id, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
if playlist_id:
|
||||
if not self._downloader.params.get('noplaylist'):
|
||||
if not self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id)
|
||||
return self.url_result(
|
||||
'http://www.dailymotion.com/playlist/' + playlist_id,
|
||||
'DailymotionPlaylist', playlist_id)
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
|
||||
|
||||
password = self._downloader.params.get('videopassword')
|
||||
password = self.get_param('videopassword')
|
||||
media = self._call_api(
|
||||
'media', video_id, '''... on Video {
|
||||
%s
|
||||
@ -232,7 +232,7 @@ def _real_extract(self, url):
|
||||
audienceCount
|
||||
isOnAir
|
||||
}''' % (self._COMMON_MEDIA_FIELDS, self._COMMON_MEDIA_FIELDS), 'Downloading media JSON metadata',
|
||||
'password: "%s"' % self._downloader.params.get('videopassword') if password else None)
|
||||
'password: "%s"' % self.get_param('videopassword') if password else None)
|
||||
xid = media['xid']
|
||||
|
||||
metadata = self._download_json(
|
||||
|
@ -158,7 +158,7 @@ def _check_clip(self, url, list_id):
|
||||
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
if 'clipid' in query_dict:
|
||||
clip_id = query_dict['clipid'][0]
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % clip_id)
|
||||
return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip')
|
||||
else:
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
class DeezerBaseInfoExtractor(InfoExtractor):
|
||||
def get_data(self, url):
|
||||
if not self._downloader.params.get('test'):
|
||||
if not self.get_param('test'):
|
||||
self.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
@ -2370,7 +2370,7 @@ def _real_extract(self, url):
|
||||
|
||||
parsed_url = compat_urlparse.urlparse(url)
|
||||
if not parsed_url.scheme:
|
||||
default_search = self._downloader.params.get('default_search')
|
||||
default_search = self.get_param('default_search')
|
||||
if default_search is None:
|
||||
default_search = 'fixup_error'
|
||||
|
||||
@ -2461,8 +2461,8 @@ def _real_extract(self, url):
|
||||
info_dict['subtitles'] = subtitles
|
||||
return info_dict
|
||||
|
||||
if not self._downloader.params.get('test', False) and not is_intentional:
|
||||
force = self._downloader.params.get('force_generic_extractor', False)
|
||||
if not self.get_param('test', False) and not is_intentional:
|
||||
force = self.get_param('force_generic_extractor', False)
|
||||
self.report_warning(
|
||||
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
|
||||
|
||||
|
@ -96,7 +96,7 @@ def _real_extract(self, url):
|
||||
video = self._download_json(
|
||||
'http://api.globovideos.com/videos/%s/playlist' % video_id,
|
||||
video_id)['videos'][0]
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and video.get('encrypted') is True:
|
||||
if not self.get_param('allow_unplayable_formats') and video.get('encrypted') is True:
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
title = video['title']
|
||||
|
@ -141,7 +141,7 @@ def _real_extract(self, url):
|
||||
|
||||
title = video_data['title']
|
||||
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and video_data.get('drmProtected'):
|
||||
if not self.get_param('allow_unplayable_formats') and video_data.get('drmProtected'):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
headers = {'Referer': url}
|
||||
|
@ -65,7 +65,7 @@ def _real_extract(self, url):
|
||||
domain, media_type, media_id, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
if playlist_id:
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % media_id)
|
||||
else:
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id)
|
||||
|
@ -165,7 +165,7 @@ def _real_extract(self, url):
|
||||
content_format = f.get('content_format')
|
||||
if not f_url:
|
||||
continue
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and ('-MDRM-' in content_format or '-FPS-' in content_format)):
|
||||
continue
|
||||
formats.append({
|
||||
|
@ -309,7 +309,7 @@ def sign_url(unsigned_url):
|
||||
if f.get('fileExt') == 'chun':
|
||||
continue
|
||||
# DRM-protected video, cannot be decrypted
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and f.get('fileExt') == 'wvm':
|
||||
if not self.get_param('allow_unplayable_formats') and f.get('fileExt') == 'wvm':
|
||||
continue
|
||||
if not f.get('fileExt'):
|
||||
# QT indicates QuickTime; some videos have broken fileExt
|
||||
|
@ -98,7 +98,7 @@ def _extract_info(self, pc, mobile, i, referer):
|
||||
stream_url = stream.get('url')
|
||||
if not stream_url or stream_url in urls:
|
||||
continue
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and stream.get('drmProtected'):
|
||||
if not self.get_param('allow_unplayable_formats') and stream.get('drmProtected'):
|
||||
continue
|
||||
urls.append(stream_url)
|
||||
ext = determine_ext(stream_url)
|
||||
@ -163,7 +163,7 @@ def _extract_info(self, pc, mobile, i, referer):
|
||||
if not media_url or media_url in urls:
|
||||
continue
|
||||
if (format_id in ('Widevine', 'SmoothStreaming')
|
||||
and not self._downloader.params.get('allow_unplayable_formats', False)):
|
||||
and not self.get_param('allow_unplayable_formats', False)):
|
||||
continue
|
||||
urls.append(media_url)
|
||||
ext = determine_ext(media_url)
|
||||
|
@ -71,7 +71,7 @@ def _real_extract(self, url):
|
||||
|
||||
video_id = self._match_id(url)
|
||||
|
||||
noplaylist = self._downloader.params.get('noplaylist')
|
||||
noplaylist = self.get_param('noplaylist')
|
||||
noplaylist_prompt = True
|
||||
if 'force_noplaylist' in data:
|
||||
noplaylist = data['force_noplaylist']
|
||||
|
@ -167,7 +167,7 @@ def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
collection_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('collection', [None])[0]
|
||||
if collection_id:
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % display_id)
|
||||
else:
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % collection_id)
|
||||
|
@ -405,7 +405,7 @@ def _real_extract(self, url):
|
||||
name = info['name']
|
||||
description = info['description']
|
||||
|
||||
if not info['songs'] or self._downloader.params.get('noplaylist'):
|
||||
if not info['songs'] or self.get_param('noplaylist'):
|
||||
if info['songs']:
|
||||
self.to_screen(
|
||||
'Downloading just the main audio %s because of --no-playlist'
|
||||
|
@ -34,7 +34,7 @@ def _real_extract(self, url):
|
||||
'$include': '[HasClosedCaptions]',
|
||||
})
|
||||
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and try_get(content_package, lambda x: x['Constraints']['Security']['Type'])):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
|
@ -66,7 +66,7 @@ def _real_extract(self, url):
|
||||
|
||||
video_data = common_data['video']
|
||||
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and video_data.get('drm'):
|
||||
if not self.get_param('allow_unplayable_formats') and video_data.get('drm'):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
brightcove_id = video_data.get('brightcoveId') or 'ref:' + video_data['referenceId']
|
||||
|
@ -246,7 +246,7 @@ def _get_info(self, url, video_id):
|
||||
})
|
||||
|
||||
if not formats:
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and drm:
|
||||
if not self.get_param('allow_unplayable_formats') and drm:
|
||||
self.raise_no_formats('This video is DRM protected.', expected=True)
|
||||
return
|
||||
|
||||
|
@ -182,7 +182,7 @@ def _real_extract(self, url):
|
||||
video_id = remove_start(current_clip_info['ckmId'], 'mvp:')
|
||||
video_name = url_basename(current_clip_info['url'])
|
||||
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen(
|
||||
'Downloading just video %s because of --no-playlist' % video_name)
|
||||
return self._extract_from_id(video_id, webpage)
|
||||
|
@ -79,7 +79,7 @@ def extract_entry(source):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
if not formats and not self._downloader.params.get('ignore_no_formats'):
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
return
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
|
@ -337,11 +337,11 @@ def _real_extract(self, url):
|
||||
# In order to minimize the number of calls to ViewClip API and reduce
|
||||
# the probability of being throttled or banned by Pluralsight we will request
|
||||
# only single format until formats listing was explicitly requested.
|
||||
if self._downloader.params.get('listformats', False):
|
||||
if self.get_param('listformats', False):
|
||||
allowed_qualities = ALLOWED_QUALITIES
|
||||
else:
|
||||
def guess_allowed_qualities():
|
||||
req_format = self._downloader.params.get('format') or 'best'
|
||||
req_format = self.get_param('format') or 'best'
|
||||
req_format_split = req_format.split('-', 1)
|
||||
if len(req_format_split) > 1:
|
||||
req_ext, req_quality = req_format_split
|
||||
@ -349,7 +349,7 @@ def guess_allowed_qualities():
|
||||
for allowed_quality in ALLOWED_QUALITIES:
|
||||
if req_ext == allowed_quality.ext and req_quality in allowed_quality.qualities:
|
||||
return (AllowedQuality(req_ext, (req_quality, )), )
|
||||
req_ext = 'webm' if self._downloader.params.get('prefer_free_formats') else 'mp4'
|
||||
req_ext = 'webm' if self.get_param('prefer_free_formats') else 'mp4'
|
||||
return (AllowedQuality(req_ext, (best_quality, )), )
|
||||
allowed_qualities = guess_allowed_qualities()
|
||||
|
||||
|
@ -109,7 +109,7 @@ def _to_ad_free_formats(self, video_id, formats, subtitles):
|
||||
if ad_free_formats:
|
||||
formats, subtitles = ad_free_formats, ad_free_subtitles
|
||||
else:
|
||||
self._downloader.report_warning('Unable to find ad-free formats')
|
||||
self.report_warning('Unable to find ad-free formats')
|
||||
return formats, subtitles
|
||||
|
||||
def _get_video_info(self, video_json, slug, series_name=None):
|
||||
|
@ -112,7 +112,7 @@ def _real_extract(self, url):
|
||||
}
|
||||
|
||||
# API call can be avoided entirely if we are listing formats
|
||||
if self._downloader.params.get('listformats', False):
|
||||
if self.get_param('listformats', False):
|
||||
return info
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
@ -34,7 +34,7 @@ def _extract_video_info(self, url, clip_id):
|
||||
'ids': clip_id,
|
||||
})[0]
|
||||
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and video.get('is_protected') is True:
|
||||
if not self.get_param('allow_unplayable_formats') and video.get('is_protected') is True:
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
formats = []
|
||||
|
@ -275,7 +275,7 @@ def _real_extract(self, url):
|
||||
media = self._download_json(
|
||||
base + '.json', video_id, 'Downloading video JSON')
|
||||
|
||||
if not self._downloader.params.get('allow_unplayable_formats'):
|
||||
if not self.get_param('allow_unplayable_formats'):
|
||||
if try_get(
|
||||
media,
|
||||
(lambda x: x['rights_management']['rights']['drm'],
|
||||
|
@ -125,7 +125,7 @@ def _real_extract(self, url):
|
||||
})
|
||||
|
||||
mpd_url = data.get('urlDash')
|
||||
if mpd_url and (self._downloader.params.get('allow_unplayable_formats') or not data.get('drm')):
|
||||
if mpd_url and (self.get_param('allow_unplayable_formats') or not data.get('drm')):
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
mpd_url, media_id, mpd_id='dash', fatal=False))
|
||||
|
||||
|
@ -200,7 +200,7 @@ def pv(name):
|
||||
return node.get('value')
|
||||
|
||||
if not formats:
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and xpath_text(video_xml, './Clip/DRM', default=None)):
|
||||
self.raise_no_formats('This video is DRM protected.', expected=True)
|
||||
ns_st_cds = pv('ns_st_cds')
|
||||
|
@ -114,7 +114,7 @@ def _real_extract(self, url):
|
||||
playout = self._call_api(
|
||||
'playout/new/url/' + video_id, video_id)['playout']
|
||||
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and playout.get('drm'):
|
||||
if not self.get_param('allow_unplayable_formats') and playout.get('drm'):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
formats = self._extract_m3u8_formats(re.sub(
|
||||
|
@ -75,7 +75,7 @@ def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
content = self._call_api(
|
||||
'1.5', 'IN/CONTENT/VIDEOURL/VOD/' + video_id, video_id)
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and content.get('isEncrypted'):
|
||||
if not self.get_param('allow_unplayable_formats') and content.get('isEncrypted'):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
dash_url = content['videoURL']
|
||||
headers = {
|
||||
|
@ -63,7 +63,7 @@ def _real_extract(self, url):
|
||||
if len(videos) > 1:
|
||||
playlist_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('playlistId', [None])[0]
|
||||
if playlist_id:
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
videos = [videos[int(playlist_id)]]
|
||||
self.to_screen('Downloading just a single video because of --no-playlist')
|
||||
else:
|
||||
@ -77,7 +77,7 @@ def entries():
|
||||
continue
|
||||
formats = self._extract_m3u8_formats(
|
||||
video_url.replace('.smil', '.m3u8'), video_id, 'mp4', fatal=False)
|
||||
if not formats and not self._downloader.params.get('ignore_no_formats'):
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
continue
|
||||
yield {
|
||||
'id': video_id,
|
||||
|
@ -139,7 +139,7 @@ def _real_extract(self, url):
|
||||
'format_id': ext + quality,
|
||||
'url': video_url,
|
||||
})
|
||||
if not formats and not self._downloader.params.get('ignore_no_formats'):
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
continue
|
||||
entry['formats'] = formats
|
||||
entries.append(entry)
|
||||
|
@ -153,7 +153,7 @@ def _real_extract(self, url):
|
||||
})
|
||||
if not formats:
|
||||
for meta in (info.get('Metas') or []):
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and meta.get('Key') == 'Encryption' and meta.get('Value') == '1'):
|
||||
self.raise_no_formats(
|
||||
'This video is DRM protected.', expected=True)
|
||||
|
@ -74,7 +74,7 @@ def _real_extract(self, url):
|
||||
})
|
||||
# IsDrm does not necessarily mean the video is DRM protected (see
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/13994).
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and metadata.get('IsDrm'):
|
||||
if not self.get_param('allow_unplayable_formats') and metadata.get('IsDrm'):
|
||||
self.report_warning('This video is probably DRM protected.', path)
|
||||
video_id = metadata['IdMedia']
|
||||
details = metadata['Details']
|
||||
|
@ -69,7 +69,7 @@ def make_urls(proto, suffix):
|
||||
if formats:
|
||||
break
|
||||
else:
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and info.get('isDrm'):
|
||||
if not self.get_param('allow_unplayable_formats') and info.get('isDrm'):
|
||||
raise ExtractorError(
|
||||
'Video %s is DRM protected' % video_id, expected=True)
|
||||
if info.get('geoblocked'):
|
||||
|
@ -59,7 +59,7 @@ class TwitCastingIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
uploader_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
video_password = self._downloader.params.get('videopassword')
|
||||
video_password = self.get_param('videopassword')
|
||||
request_data = None
|
||||
if video_password:
|
||||
request_data = urlencode_postdata({
|
||||
|
@ -324,7 +324,7 @@ def add_format(format_id, format_dict, protocol='http'):
|
||||
# Despite CODECS metadata in m3u8 all video-only formats
|
||||
# are actually video+audio
|
||||
for f in m3u8_formats:
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and '_drm/index_' in f['url']:
|
||||
if not self.get_param('allow_unplayable_formats') and '_drm/index_' in f['url']:
|
||||
continue
|
||||
if f.get('acodec') == 'none' and f.get('vcodec') != 'none':
|
||||
f['acodec'] = None
|
||||
|
@ -76,7 +76,7 @@ def _login(self):
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
def _get_video_password(self):
|
||||
password = self._downloader.params.get('videopassword')
|
||||
password = self.get_param('videopassword')
|
||||
if password is None:
|
||||
raise ExtractorError(
|
||||
'This video is protected by a password, use the --video-password option',
|
||||
@ -603,7 +603,7 @@ def _try_album_password(self, url):
|
||||
album_id, headers={'Authorization': 'jwt ' + jwt},
|
||||
query={'fields': 'description,name,privacy'})
|
||||
if try_get(album, lambda x: x['privacy']['view']) == 'password':
|
||||
password = self._downloader.params.get('videopassword')
|
||||
password = self.get_param('videopassword')
|
||||
if not password:
|
||||
raise ExtractorError(
|
||||
'This album is protected by a password, use the --video-password option',
|
||||
@ -1058,7 +1058,7 @@ def _real_extract(self, url):
|
||||
query={'fields': 'description,name,privacy'})
|
||||
hashed_pass = None
|
||||
if try_get(album, lambda x: x['privacy']['view']) == 'password':
|
||||
password = self._downloader.params.get('videopassword')
|
||||
password = self.get_param('videopassword')
|
||||
if not password:
|
||||
raise ExtractorError(
|
||||
'This album is protected by a password, use the --video-password option',
|
||||
|
@ -287,7 +287,7 @@ def _real_extract(self, url):
|
||||
raise ExtractorError('This video is not available in your region.', expected=True)
|
||||
|
||||
series_id = video_data.get('series_id')
|
||||
if not self._downloader.params.get('noplaylist') and not idata.get('force_noplaylist'):
|
||||
if not self.get_param('noplaylist') and not idata.get('force_noplaylist'):
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % series_id)
|
||||
series = product_data.get('series', {})
|
||||
product = series.get('product')
|
||||
@ -308,7 +308,7 @@ def _real_extract(self, url):
|
||||
|
||||
return self.playlist_result(entries, series_id, series.get('name'), series.get('description'))
|
||||
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
|
||||
|
||||
duration_limit = False
|
||||
|
@ -136,7 +136,7 @@ def _real_extract(self, url):
|
||||
'author{nickname},channel{channelCode,channelName},officialVideo{commentCount,exposeStatus,likeCount,playCount,playTime,status,title,type,vodId},playlist{playlistSeq,totalCount,name}')
|
||||
|
||||
playlist = post.get('playlist')
|
||||
if not playlist or self._downloader.params.get('noplaylist'):
|
||||
if not playlist or self.get_param('noplaylist'):
|
||||
if playlist:
|
||||
self.to_screen(
|
||||
'Downloading just video %s because of --no-playlist'
|
||||
|
@ -41,7 +41,7 @@ def _real_extract(self, url):
|
||||
m3u8_url = urljoin(url, self._search_regex(
|
||||
r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'm3u8 url',
|
||||
group='url'))
|
||||
if not self._downloader.params.get('allow_unplayable_formats'):
|
||||
if not self.get_param('allow_unplayable_formats'):
|
||||
# https://docs.microsoft.com/en-us/azure/media-services/previous/media-services-content-protection-overview#streaming-urls
|
||||
encryption = self._search_regex(
|
||||
r'encryption%3D(c(?:enc|bc(?:s-aapl)?))',
|
||||
|
@ -160,7 +160,7 @@ def _real_extract(self, url):
|
||||
'client_ts': time.time() / 1000,
|
||||
}
|
||||
|
||||
video_password = self._downloader.params.get('videopassword')
|
||||
video_password = self.get_param('videopassword')
|
||||
if video_password:
|
||||
basic_data_params['password'] = video_password
|
||||
|
||||
|
@ -88,9 +88,9 @@ def _login(self):
|
||||
username, password = self._get_login_info()
|
||||
# No authentication to be performed
|
||||
if username is None:
|
||||
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
|
||||
if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None:
|
||||
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
|
||||
# if self._downloader.params.get('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
|
||||
# if self.get_param('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
|
||||
# self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!')
|
||||
return True
|
||||
|
||||
@ -1460,7 +1460,7 @@ def _decrypt_signature(self, s, video_id, player_url):
|
||||
)
|
||||
self._player_cache[player_id] = func
|
||||
func = self._player_cache[player_id]
|
||||
if self._downloader.params.get('youtube_print_sig_code'):
|
||||
if self.get_param('youtube_print_sig_code'):
|
||||
self._print_sig_code(func, s)
|
||||
return func(s)
|
||||
except Exception as e:
|
||||
@ -1690,7 +1690,7 @@ def extract_thread(parent_renderer):
|
||||
if not continuation:
|
||||
break
|
||||
headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
|
||||
retries = self._downloader.params.get('extractor_retries', 3)
|
||||
retries = self.get_param('extractor_retries', 3)
|
||||
count = -1
|
||||
last_error = None
|
||||
|
||||
@ -1948,7 +1948,7 @@ def get_text(x):
|
||||
video_description = video_details.get('shortDescription')
|
||||
|
||||
if not smuggled_data.get('force_singlefeed', False):
|
||||
if not self._downloader.params.get('noplaylist'):
|
||||
if not self.get_param('noplaylist'):
|
||||
multifeed_metadata_list = try_get(
|
||||
player_response,
|
||||
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
|
||||
@ -2092,7 +2092,7 @@ def feed_entry(name):
|
||||
f['format_id'] = itag
|
||||
formats.append(f)
|
||||
|
||||
if self._downloader.params.get('youtube_include_dash_manifest', True):
|
||||
if self.get_param('youtube_include_dash_manifest', True):
|
||||
for sd in (streaming_data, ytm_streaming_data):
|
||||
dash_manifest_url = sd.get('dashManifestUrl')
|
||||
if dash_manifest_url:
|
||||
@ -2114,7 +2114,7 @@ def feed_entry(name):
|
||||
formats.append(f)
|
||||
|
||||
if not formats:
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and streaming_data.get('licenseInfos'):
|
||||
if not self.get_param('allow_unplayable_formats') and streaming_data.get('licenseInfos'):
|
||||
self.raise_no_formats(
|
||||
'This video is DRM protected.', expected=True)
|
||||
pemr = try_get(
|
||||
@ -2473,8 +2473,8 @@ def chapter_time(mmlir):
|
||||
is_unlisted=None if is_private is None else is_unlisted)
|
||||
|
||||
# get xsrf for annotations or comments
|
||||
get_annotations = self._downloader.params.get('writeannotations', False)
|
||||
get_comments = self._downloader.params.get('getcomments', False)
|
||||
get_annotations = self.get_param('writeannotations', False)
|
||||
get_comments = self.get_param('getcomments', False)
|
||||
if get_annotations or get_comments:
|
||||
xsrf_token = None
|
||||
ytcfg = self._extract_ytcfg(video_id, webpage)
|
||||
@ -3475,7 +3475,7 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers
|
||||
response = None
|
||||
last_error = None
|
||||
count = -1
|
||||
retries = self._downloader.params.get('extractor_retries', 3)
|
||||
retries = self.get_param('extractor_retries', 3)
|
||||
if check_get_keys is None:
|
||||
check_get_keys = []
|
||||
while count < retries:
|
||||
@ -3519,7 +3519,7 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers
|
||||
return response
|
||||
|
||||
def _extract_webpage(self, url, item_id):
|
||||
retries = self._downloader.params.get('extractor_retries', 3)
|
||||
retries = self.get_param('extractor_retries', 3)
|
||||
count = -1
|
||||
last_error = 'Incomplete yt initial data recieved'
|
||||
while count < retries:
|
||||
@ -3559,7 +3559,7 @@ def __real_extract(self, url):
|
||||
item_id = self._match_id(url)
|
||||
url = compat_urlparse.urlunparse(
|
||||
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
|
||||
compat_opts = self._downloader.params.get('compat_opts', [])
|
||||
compat_opts = self.get_param('compat_opts', [])
|
||||
|
||||
# This is not matched in a channel page with a tab selected
|
||||
mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
|
||||
@ -3584,7 +3584,7 @@ def __real_extract(self, url):
|
||||
url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
|
||||
|
||||
if video_id and playlist_id:
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
|
||||
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
|
||||
self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
|
||||
|
@ -35,7 +35,7 @@ def _real_extract(self, url):
|
||||
except ExtractorError:
|
||||
form = None
|
||||
if form:
|
||||
password = self._downloader.params.get('videopassword')
|
||||
password = self.get_param('videopassword')
|
||||
if not password:
|
||||
raise ExtractorError(
|
||||
'This video is protected by a passcode, use the --video-password option', expected=True)
|
||||
|
Loading…
Reference in New Issue
Block a user