Update to ytdl v2021-04-01

14f29f087e

Closes #205
This commit is contained in:
pukkandan 2021-04-01 13:58:33 +05:30
parent a6ae61a4c2
commit cce889b900
7 changed files with 162 additions and 104 deletions

View File

@ -401,7 +401,7 @@ def _real_extract(self, url):
(r'player\.load[^;]+src:\s*["\']([^"\']+)', (r'player\.load[^;]+src:\s*["\']([^"\']+)',
r'id-video=([^@]+@[^"]+)', r'id-video=([^@]+@[^"]+)',
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"', r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"',
r'data-id="([^"]+)"'), r'data-id=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
webpage, 'video id') webpage, 'video id')
return self._make_url_result(video_id) return self._make_url_result(video_id)

View File

@ -12,6 +12,7 @@
) )
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
float_or_none,
get_element_by_attribute, get_element_by_attribute,
int_or_none, int_or_none,
lowercase_escape, lowercase_escape,
@ -32,6 +33,7 @@ class InstagramIE(InfoExtractor):
'title': 'Video by naomipq', 'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8', 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'duration': 0,
'timestamp': 1371748545, 'timestamp': 1371748545,
'upload_date': '20130620', 'upload_date': '20130620',
'uploader_id': 'naomipq', 'uploader_id': 'naomipq',
@ -48,6 +50,7 @@ class InstagramIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Video by britneyspears', 'title': 'Video by britneyspears',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'duration': 0,
'timestamp': 1453760977, 'timestamp': 1453760977,
'upload_date': '20160125', 'upload_date': '20160125',
'uploader_id': 'britneyspears', 'uploader_id': 'britneyspears',
@ -86,6 +89,24 @@ class InstagramIE(InfoExtractor):
'title': 'Post by instagram', 'title': 'Post by instagram',
'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957', 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
}, },
}, {
# IGTV
'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
'info_dict': {
'id': 'BkfuX9UB-eK',
'ext': 'mp4',
'title': 'Fingerboarding Tricks with @cass.fb',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 53.83,
'timestamp': 1530032919,
'upload_date': '20180626',
'uploader_id': 'instagram',
'uploader': 'Instagram',
'like_count': int,
'comment_count': int,
'comments': list,
'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
}
}, { }, {
'url': 'https://instagram.com/p/-Cmh1cukG2/', 'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True, 'only_matching': True,
@ -159,7 +180,9 @@ def _real_extract(self, url):
description = try_get( description = try_get(
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'], media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str) or media.get('caption') compat_str) or media.get('caption')
title = media.get('title')
thumbnail = media.get('display_src') or media.get('display_url') thumbnail = media.get('display_src') or media.get('display_url')
duration = float_or_none(media.get('video_duration'))
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date')) timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
uploader = media.get('owner', {}).get('full_name') uploader = media.get('owner', {}).get('full_name')
uploader_id = media.get('owner', {}).get('username') uploader_id = media.get('owner', {}).get('username')
@ -200,9 +223,10 @@ def get_count(keys, kind):
continue continue
entries.append({ entries.append({
'id': node.get('shortcode') or node['id'], 'id': node.get('shortcode') or node['id'],
'title': 'Video %d' % edge_num, 'title': node.get('title') or 'Video %d' % edge_num,
'url': node_video_url, 'url': node_video_url,
'thumbnail': node.get('display_url'), 'thumbnail': node.get('display_url'),
'duration': float_or_none(node.get('video_duration')),
'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])), 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])), 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
'view_count': int_or_none(node.get('video_view_count')), 'view_count': int_or_none(node.get('video_view_count')),
@ -239,8 +263,9 @@ def get_count(keys, kind):
'id': video_id, 'id': video_id,
'formats': formats, 'formats': formats,
'ext': 'mp4', 'ext': 'mp4',
'title': 'Video by %s' % uploader_id, 'title': title or 'Video by %s' % uploader_id,
'description': description, 'description': description,
'duration': duration,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'timestamp': timestamp, 'timestamp': timestamp,
'uploader_id': uploader_id, 'uploader_id': uploader_id,

View File

@ -1,22 +1,15 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re
import time
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
js_to_json, js_to_json,
try_get,
update_url_query,
urlencode_postdata,
) )
class PicartoIE(InfoExtractor): class PicartoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)(?:/(?P<token>[a-zA-Z0-9]+))?' _VALID_URL = r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)'
_TEST = { _TEST = {
'url': 'https://picarto.tv/Setz', 'url': 'https://picarto.tv/Setz',
'info_dict': { 'info_dict': {
@ -34,65 +27,46 @@ def suitable(cls, url):
return False if PicartoVodIE.suitable(url) else super(PicartoIE, cls).suitable(url) return False if PicartoVodIE.suitable(url) else super(PicartoIE, cls).suitable(url)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) channel_id = self._match_id(url)
channel_id = mobj.group('id')
metadata = self._download_json( data = self._download_json(
'https://api.picarto.tv/v1/channel/name/' + channel_id, 'https://ptvintern.picarto.tv/ptvapi', channel_id, query={
channel_id) 'query': '''{
channel(name: "%s") {
adult
id
online
stream_name
title
}
getLoadBalancerUrl(channel_name: "%s") {
url
}
}''' % (channel_id, channel_id),
})['data']
metadata = data['channel']
if metadata.get('online') is False: if metadata.get('online') == 0:
raise ExtractorError('Stream is offline', expected=True) raise ExtractorError('Stream is offline', expected=True)
title = metadata['title']
cdn_data = self._download_json( cdn_data = self._download_json(
'https://picarto.tv/process/channel', channel_id, data['getLoadBalancerUrl']['url'] + '/stream/json_' + metadata['stream_name'] + '.js',
data=urlencode_postdata({'loadbalancinginfo': channel_id}), channel_id, 'Downloading load balancing info')
note='Downloading load balancing info')
token = mobj.group('token') or 'public'
params = {
'con': int(time.time() * 1000),
'token': token,
}
prefered_edge = cdn_data.get('preferedEdge')
formats = [] formats = []
for source in (cdn_data.get('source') or []):
for edge in cdn_data['edges']: source_url = source.get('url')
edge_ep = edge.get('ep') if not source_url:
if not edge_ep or not isinstance(edge_ep, compat_str):
continue continue
edge_id = edge.get('id') source_type = source.get('type')
for tech in cdn_data['techs']: if source_type == 'html5/application/vnd.apple.mpegurl':
tech_label = tech.get('label') formats.extend(self._extract_m3u8_formats(
tech_type = tech.get('type') source_url, channel_id, 'mp4', m3u8_id='hls', fatal=False))
preference = 0 elif source_type == 'html5/video/mp4':
if edge_id == prefered_edge: formats.append({
preference += 1 'url': source_url,
format_id = [] })
if edge_id:
format_id.append(edge_id)
if tech_type == 'application/x-mpegurl' or tech_label == 'HLS':
format_id.append('hls')
formats.extend(self._extract_m3u8_formats(
update_url_query(
'https://%s/hls/%s/index.m3u8'
% (edge_ep, channel_id), params),
channel_id, 'mp4', quality=preference,
m3u8_id='-'.join(format_id), fatal=False))
continue
elif tech_type == 'video/mp4' or tech_label == 'MP4':
format_id.append('mp4')
formats.append({
'url': update_url_query(
'https://%s/mp4/%s.mp4' % (edge_ep, channel_id),
params),
'format_id': '-'.join(format_id),
'quality': preference,
})
else:
# rtmp format does not seem to work
continue
self._sort_formats(formats) self._sort_formats(formats)
mature = metadata.get('adult') mature = metadata.get('adult')
@ -103,10 +77,10 @@ def _real_extract(self, url):
return { return {
'id': channel_id, 'id': channel_id,
'title': self._live_title(metadata.get('title') or channel_id), 'title': self._live_title(title.strip()),
'is_live': True, 'is_live': True,
'thumbnail': try_get(metadata, lambda x: x['thumbnails']['web']),
'channel': channel_id, 'channel': channel_id,
'channel_id': metadata.get('id'),
'channel_url': 'https://picarto.tv/%s' % channel_id, 'channel_url': 'https://picarto.tv/%s' % channel_id,
'age_limit': age_limit, 'age_limit': age_limit,
'formats': formats, 'formats': formats,

View File

@ -10,7 +10,7 @@
class SBSIE(InfoExtractor): class SBSIE(InfoExtractor):
IE_DESC = 'sbs.com.au' IE_DESC = 'sbs.com.au'
_VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=)|news/(?:embeds/)?video/)(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=|/watch/)|news/(?:embeds/)?video/)(?P<id>[0-9]+)'
_TESTS = [{ _TESTS = [{
# Original URL is handled by the generic IE which finds the iframe: # Original URL is handled by the generic IE which finds the iframe:
@ -43,6 +43,9 @@ class SBSIE(InfoExtractor):
}, { }, {
'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866', 'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/watch/1698704451971',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -24,6 +24,7 @@
merge_dicts, merge_dicts,
OnDemandPagedList, OnDemandPagedList,
parse_filesize, parse_filesize,
parse_iso8601,
RegexNotFoundError, RegexNotFoundError,
sanitized_Request, sanitized_Request,
smuggle_url, smuggle_url,
@ -74,25 +75,28 @@ def _login(self):
expected=True) expected=True)
raise ExtractorError('Unable to log in') raise ExtractorError('Unable to log in')
def _verify_video_password(self, url, video_id, webpage): def _get_video_password(self):
password = self._downloader.params.get('videopassword') password = self._downloader.params.get('videopassword')
if password is None: if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True) raise ExtractorError(
token, vuid = self._extract_xsrft_and_vuid(webpage) 'This video is protected by a password, use the --video-password option',
data = urlencode_postdata({ expected=True)
'password': password, return password
'token': token,
}) def _verify_video_password(self, url, video_id, password, token, vuid):
if url.startswith('http://'): if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url # vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://') url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid) self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage( return self._download_webpage(
password_request, video_id, url + '/password', video_id, 'Verifying the password',
'Verifying the password', 'Wrong password') 'Wrong password', data=urlencode_postdata({
'password': password,
'token': token,
}), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': url,
})
def _extract_xsrft_and_vuid(self, webpage): def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex( xsrft = self._search_regex(
@ -273,7 +277,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
)? )?
(?:videos?/)? (?:videos?/)?
(?P<id>[0-9]+) (?P<id>[0-9]+)
(?:/[\da-f]+)? (?:/(?P<unlisted_hash>[\da-f]{10}))?
/?(?:[?&].*)?(?:[#].*)?$ /?(?:[?&].*)?(?:[#].*)?$
''' '''
IE_NAME = 'vimeo' IE_NAME = 'vimeo'
@ -326,9 +330,9 @@ class VimeoIE(VimeoBaseInfoExtractor):
'id': '54469442', 'id': '54469442',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012', 'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software', 'uploader': 'Business of Software',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware', 'uploader_url': r're:https?://(?:www\.)?vimeo\.com/businessofsoftware',
'uploader_id': 'theblnbusinessofsoftware', 'uploader_id': 'businessofsoftware',
'duration': 3610, 'duration': 3610,
'description': None, 'description': None,
}, },
@ -463,6 +467,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
'skip_download': True, 'skip_download': True,
}, },
'expected_warnings': ['Unable to download JSON metadata'], 'expected_warnings': ['Unable to download JSON metadata'],
'skip': 'this page is no longer available.',
}, },
{ {
'url': 'http://player.vimeo.com/video/68375962', 'url': 'http://player.vimeo.com/video/68375962',
@ -563,9 +568,7 @@ def _extract_url(url, webpage):
return urls[0] if urls else None return urls[0] if urls else None
def _verify_player_video_password(self, url, video_id, headers): def _verify_player_video_password(self, url, video_id, headers):
password = self._downloader.params.get('videopassword') password = self._get_video_password()
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
data = urlencode_postdata({ data = urlencode_postdata({
'password': base64.b64encode(password.encode()), 'password': base64.b64encode(password.encode()),
}) })
@ -628,11 +631,37 @@ def _real_extract(self, url):
if 'Referer' not in headers: if 'Referer' not in headers:
headers['Referer'] = url headers['Referer'] = url
channel_id = self._search_regex(
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
# Extract ID from URL # Extract ID from URL
video_id = self._match_id(url) video_id, unlisted_hash = re.match(self._VALID_URL, url).groups()
if unlisted_hash:
token = self._download_json(
'https://vimeo.com/_rv/jwt', video_id, headers={
'X-Requested-With': 'XMLHttpRequest'
})['token']
video = self._download_json(
'https://api.vimeo.com/videos/%s:%s' % (video_id, unlisted_hash),
video_id, headers={
'Authorization': 'jwt ' + token,
}, query={
'fields': 'config_url,created_time,description,license,metadata.connections.comments.total,metadata.connections.likes.total,release_time,stats.plays',
})
info = self._parse_config(self._download_json(
video['config_url'], video_id), video_id)
self._vimeo_sort_formats(info['formats'])
get_timestamp = lambda x: parse_iso8601(video.get(x + '_time'))
info.update({
'description': video.get('description'),
'license': video.get('license'),
'release_timestamp': get_timestamp('release'),
'timestamp': get_timestamp('created'),
'view_count': int_or_none(try_get(video, lambda x: x['stats']['plays'])),
})
connections = try_get(
video, lambda x: x['metadata']['connections'], dict) or {}
for k in ('comment', 'like'):
info[k + '_count'] = int_or_none(try_get(connections, lambda x: x[k + 's']['total']))
return info
orig_url = url orig_url = url
is_pro = 'vimeopro.com/' in url is_pro = 'vimeopro.com/' in url
is_player = '://player.vimeo.com/video/' in url is_player = '://player.vimeo.com/video/' in url
@ -722,7 +751,10 @@ def _real_extract(self, url):
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None: if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if '_video_password_verified' in data: if '_video_password_verified' in data:
raise ExtractorError('video password verification failed!') raise ExtractorError('video password verification failed!')
self._verify_video_password(redirect_url, video_id, webpage) video_password = self._get_video_password()
token, vuid = self._extract_xsrft_and_vuid(webpage)
self._verify_video_password(
redirect_url, video_id, video_password, token, vuid)
return self._real_extract( return self._real_extract(
smuggle_url(redirect_url, {'_video_password_verified': 'verified'})) smuggle_url(redirect_url, {'_video_password_verified': 'verified'}))
else: else:
@ -808,6 +840,8 @@ def is_rented():
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1', r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
webpage, 'license', default=None, group='license') webpage, 'license', default=None, group='license')
channel_id = self._search_regex(
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None
info_dict = { info_dict = {
@ -1114,10 +1148,23 @@ def _real_initialize(self):
def _real_extract(self, url): def _real_extract(self, url):
page_url, video_id = re.match(self._VALID_URL, url).groups() page_url, video_id = re.match(self._VALID_URL, url).groups()
clip_data = self._download_json( data = self._download_json(
page_url.replace('/review/', '/review/data/'), page_url.replace('/review/', '/review/data/'), video_id)
video_id)['clipData'] if data.get('isLocked') is True:
config_url = clip_data['configUrl'] video_password = self._get_video_password()
viewer = self._download_json(
'https://vimeo.com/_rv/viewer', video_id)
webpage = self._verify_video_password(
'https://vimeo.com/' + video_id, video_id,
video_password, viewer['xsrft'], viewer['vuid'])
clip_page_config = self._parse_json(self._search_regex(
r'window\.vimeo\.clip_page_config\s*=\s*({.+?});',
webpage, 'clip page config'), video_id)
config_url = clip_page_config['player']['config_url']
clip_data = clip_page_config.get('clip') or {}
else:
clip_data = data['clipData']
config_url = clip_data['configUrl']
config = self._download_json(config_url, video_id) config = self._download_json(config_url, video_id)
info_dict = self._parse_config(config, video_id) info_dict = self._parse_config(config, video_id)
source_format = self._extract_original_format( source_format = self._extract_original_format(

View File

@ -113,7 +113,7 @@ def is_logged_in():
raise ExtractorError('Unable to log in', expected=True) raise ExtractorError('Unable to log in', expected=True)
def _call_api(self, path_template, video_id, fields=None, limit=None): def _call_api(self, path_template, video_id, fields=None, limit=None):
query = {'appId': self._APP_ID, 'gcc': 'KR'} query = {'appId': self._APP_ID, 'gcc': 'KR', 'platformType': 'PC'}
if fields: if fields:
query['fields'] = fields query['fields'] = fields
if limit: if limit:

View File

@ -261,13 +261,23 @@ def warn(message):
return True return True
def _download_webpage_handle(self, *args, **kwargs): def _initialize_consent(self):
query = kwargs.get('query', {}).copy() cookies = self._get_cookies('https://www.youtube.com/')
kwargs['query'] = query if cookies.get('__Secure-3PSID'):
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle( return
*args, **compat_kwargs(kwargs)) consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _real_initialize(self): def _real_initialize(self):
self._initialize_consent()
if self._downloader is None: if self._downloader is None:
return return
if not self._login(): if not self._login():
@ -1760,8 +1770,7 @@ def _real_extract(self, url):
base_url = self.http_scheme() + '//www.youtube.com/' base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id webpage_url = base_url + 'watch?v=' + video_id
webpage = self._download_webpage( webpage = self._download_webpage(
webpage_url + '&has_verified=1&bpctr=9999999999', webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
video_id, fatal=False)
player_response = None player_response = None
if webpage: if webpage:
@ -2244,7 +2253,7 @@ def chapter_time(mmlir):
info['channel'] = get_text(try_get( info['channel'] = get_text(try_get(
vsir, vsir,
lambda x: x['owner']['videoOwnerRenderer']['title'], lambda x: x['owner']['videoOwnerRenderer']['title'],
compat_str)) dict))
rows = try_get( rows = try_get(
vsir, vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'], lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
@ -3010,9 +3019,9 @@ def extract_entries(parent_renderer): # this needs to called again for continua
'richItemRenderer': (extract_entries, 'contents'), # for hashtag 'richItemRenderer': (extract_entries, 'contents'), # for hashtag
'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents') 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
} }
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get( continuation_items = try_get(
response, on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
lambda x: dict_get(x, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {} continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None video_items_renderer = None
for key, value in continuation_item.items(): for key, value in continuation_item.items():