Compare commits

...

8 Commits

Author SHA1 Message Date
dirkf
645b58b76e
Merge b663bba81d into 4d05f84325 2024-06-27 06:36:18 +08:00
dirkf
4d05f84325 [PalcoMP3] Conform to new linter rule
* no space after @ in decorator
2024-06-20 20:03:49 +01:00
dirkf
e0094e63c3 [jsinterp] Various tweaks
* treat Infinity like NaN
* cache operator list
2024-06-20 20:03:49 +01:00
dirkf
fd8242e3ef [jsinterp] Fix and improve expression parsing
* improve BODMAS (fixes https://github.com/ytdl-org/youtube-dl/issues/32815)
* support more weird expressions with multiple unary ops
2024-06-20 20:03:49 +01:00
dirkf
ad01fa6cca [jsinterp] Add Debugger from yt-dlp
* https://github.com/yt-dlp/yt-dlp/commit/8f53dc4
* thx pukkandan
2024-06-20 20:03:49 +01:00
dirkf
2eac0fa379 [utils] Save orig_msg in ExtractorError 2024-06-20 20:03:49 +01:00
dirkf
b663bba81d Back-port from yt-dlp PR #2149
Include MediasetShowIE, DRM work-around, etc; add MediasetClipIE
2022-01-01 23:39:53 +00:00
dirkf
3efdb2758d Fix OnDemandPagedList underflow on slice end
Also migrate towards yt-dlp structure
2021-12-30 21:33:51 +00:00
8 changed files with 358 additions and 80 deletions

View File

@ -577,9 +577,11 @@ class TestJSInterpreter(unittest.TestCase):
def test_unary_operators(self):
jsi = JSInterpreter('function f(){return 2 - - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# fails
# jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
# self.assertEqual(jsi.call_function('f'), 0)
jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# https://github.com/ytdl-org/youtube-dl/issues/32815
jsi = JSInterpreter('function f(){return 0 - 7 * - 6;}')
self.assertEqual(jsi.call_function('f'), 42)
""" # fails so far
def test_packed(self):

View File

@ -158,6 +158,10 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
'_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
),
(
'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js',
'1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg',
),
]

View File

@ -3033,7 +3033,6 @@ class InfoExtractor(object):
transform_source=transform_source, default=None)
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
# allow passing `transform_source` through to _find_jwplayer_data()
transform_source = kwargs.pop('transform_source', None)
kwfind = compat_kwargs({'transform_source': transform_source}) if transform_source else {}

View File

@ -672,7 +672,11 @@ from .massengeschmacktv import MassengeschmackTVIE
from .matchtv import MatchTVIE
from .mdr import MDRIE
from .medaltv import MedalTVIE
from .mediaset import MediasetIE
from .mediaset import (
MediasetIE,
MediasetClipIE,
MediasetShowIE,
)
from .mediasite import (
MediasiteIE,
MediasiteCatalogIE,

View File

@ -1,6 +1,7 @@
# coding: utf-8
from __future__ import unicode_literals
import functools
import re
from .theplatform import ThePlatformBaseIE
@ -10,7 +11,11 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
GeoRestrictedError,
int_or_none,
OnDemandPagedList,
try_get,
urljoin,
update_url_query,
)
@ -30,37 +35,110 @@ class MediasetIE(ThePlatformBaseIE):
'''
_TESTS = [{
# full episode
'url': 'https://www.mediasetplay.mediaset.it/video/hellogoodbye/quarta-puntata_FAFU000000661824',
'md5': '9b75534d42c44ecef7bf1ffeacb7f85d',
'url': 'https://www.mediasetplay.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102',
'md5': 'a7e75c6384871f322adb781d3bd72c26',
'info_dict': {
'id': 'FAFU000000661824',
'id': 'F310575103000102',
'ext': 'mp4',
'title': 'Quarta puntata',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'title': 'Episodio 1',
'description': 'md5:e8017b7d7194e9bfb75299c2b8d81e02',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1414.26,
'upload_date': '20161107',
'series': 'Hello Goodbye',
'timestamp': 1478532900,
'uploader': 'Rete 4',
'uploader_id': 'R4',
'duration': 2682.0,
'upload_date': '20210530',
'series': 'Mr Wrong - Lezioni d\'amore',
'timestamp': 1622413946,
'uploader': 'Canale 5',
'uploader_id': 'C5',
'season': 'Season 1',
'episode': 'Episode 1',
'season_number': 1,
'episode_number': 1,
'chapters': [{'start_time': 0.0, 'end_time': 439.88}, {'start_time': 439.88, 'end_time': 1685.84}, {'start_time': 1685.84, 'end_time': 2682.0}],
},
'skip': 'Geo restricted',
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501',
'md5': '288532f0ad18307705b01e581304cd7b',
'md5': '1276f966ac423d16ba255ce867de073e',
'info_dict': {
'id': 'F309013801000501',
'ext': 'mp4',
'title': 'Puntata del 25 maggio',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'description': 'md5:ee2e456e3eb1dba5e814596655bb5296',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 6565.007,
'upload_date': '20180526',
'duration': 6565.008,
'upload_date': '20200903',
'series': 'Matrix',
'timestamp': 1527326245,
'timestamp': 1599172492,
'uploader': 'Canale 5',
'uploader_id': 'C5',
'season': 'Season 5',
'episode': 'Episode 5',
'season_number': 5,
'episode_number': 5,
'chapters': [{'start_time': 0.0, 'end_time': 3409.08}, {'start_time': 3409.08, 'end_time': 6565.008}],
},
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-69-pezzo-di-luna_F303843101017801',
'md5': 'd1650ac9ff944f185556126a736df148',
'info_dict': {
'id': 'F303843101017801',
'ext': 'mp4',
'title': 'Episodio 69 - Pezzo di luna',
'description': 'md5:7c32c8ec4118b72588b9412f11353f73',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 263.008,
'upload_date': '20200902',
'series': 'Camera Café 5',
'timestamp': 1599064700,
'uploader': 'Italia 1',
'uploader_id': 'I1',
'season': 'Season 5',
'episode': 'Episode 178',
'season_number': 5,
'episode_number': 178,
'chapters': [{'start_time': 0.0, 'end_time': 261.88}, {'start_time': 261.88, 'end_time': 263.008}],
},
'skip': 'Geo restricted',
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-51-tu-chi-sei_F303843107000601',
'md5': '567e9ad375b7a27a0e370650f572a1e3',
'info_dict': {
'id': 'F303843107000601',
'ext': 'mp4',
'title': 'Episodio 51 - Tu chi sei?',
'description': 'md5:42ef006e56824cc31787a547590923f4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 367.021,
'upload_date': '20200902',
'series': 'Camera Café 5',
'timestamp': 1599069817,
'uploader': 'Italia 1',
'uploader_id': 'I1',
'season': 'Season 5',
'episode': 'Episode 6',
'season_number': 5,
'episode_number': 6,
'chapters': [{'start_time': 0.0, 'end_time': 358.68}, {'start_time': 358.68, 'end_time': 367.021}],
},
'skip': 'Geo restricted',
}, {
# movie
'url': 'https://www.mediasetplay.mediaset.it/movie/selvaggi/selvaggi_F006474501000101',
'md5': '720440187a2ae26af8148eb9e6b901ed',
'info_dict': {
'id': 'F006474501000101',
'ext': 'mp4',
'title': 'Selvaggi',
'description': 'md5:cfdedbbfdd12d4d0e5dcf1fa1b75284f',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 5233.01,
'upload_date': '20210729',
'timestamp': 1627594716,
'uploader': 'Cine34',
'uploader_id': 'B6',
'chapters': [{'start_time': 0.0, 'end_time': 1938.56}, {'start_time': 1938.56, 'end_time': 5233.01}],
},
'skip': 'Geo restricted',
}, {
# clip
'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680',
@ -131,6 +209,22 @@ class MediasetIE(ThePlatformBaseIE):
video.attrib['src'] = re.sub(r'(https?://vod05)t(-mediaset-it\.akamaized\.net/.+?.mpd)\?.+', r'\1\2', video.attrib['src'])
return super(MediasetIE, self)._parse_smil_formats(smil, smil_url, video_id, namespace, f4m_params, transform_rtmp_url)
def _check_drm_formats(self, tp_formats, video_id):
has_nondrm, drm_manifest = False, ''
for f in tp_formats:
if '_sampleaes/' in (f.get('manifest_url') or ''):
drm_manifest = drm_manifest or f['manifest_url']
f['has_drm'] = True
if not has_nondrm and not f.get('has_drm') and f.get('manifest_url'):
has_nondrm = True
nodrm_manifest = re.sub(r'_sampleaes/(\w+)_fp_', r'/\1_no_', drm_manifest)
if has_nondrm or nodrm_manifest == drm_manifest:
return
tp_formats.extend(self._extract_m3u8_formats(
nodrm_manifest, video_id, m3u8_id='hls', fatal=False) or [])
def _real_extract(self, url):
guid = self._match_id(url)
tp_path = 'PR1GhC/media/guid/2702976343/' + guid
@ -138,44 +232,64 @@ class MediasetIE(ThePlatformBaseIE):
formats = []
subtitles = {}
first_e = None
for asset_type in ('SD', 'HD'):
first_e = geo_e = None
asset_type = 'geoNo:HD,browser,geoIT|geoNo:HD,geoIT|geoNo:SD,browser,geoIT|geoNo:SD,geoIT|geoNo|HD|SD'
# TODO: fixup ISM+none manifest URLs
for f in ('MPEG4', 'MPEG-DASH+none', 'M3U+none'):
for f in ('MPEG4', 'M3U'):
try:
tp_formats, tp_subtitles = self._extract_theplatform_smil(
update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), {
'mbr': 'true',
'formats': f,
'assetTypes': asset_type,
}), guid, 'Downloading %s %s SMIL data' % (f.split('+')[0], asset_type))
}), guid, 'Downloading %s SMIL data' % (f.split('+')[0]))
except ExtractorError as e:
if not first_e:
first_e = e
break
for tp_f in tp_formats:
tp_f['quality'] = 1 if asset_type == 'HD' else 0
if not geo_e and isinstance(e, GeoRestrictedError):
geo_e = e
continue
self._check_drm_formats(tp_formats, guid)
formats.extend(tp_formats)
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
if first_e and not formats:
# check for errors and report them
if (first_e or geo_e) and not formats:
if geo_e:
raise geo_e
if 'None of the available releases match' in first_e.message:
raise ExtractorError('No non-DRM formats available', cause=first_e)
raise first_e
self._sort_formats(formats)
fields = []
for templ, repls in (('tvSeason%sNumber', ('', 'Episode')), ('mediasetprogram$%s', ('brandTitle', 'numberOfViews', 'publishInfo'))):
fields.extend(templ % repl for repl in repls)
feed_data = self._download_json(
'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs/guid/-/' + guid,
guid, fatal=False, query={'fields': ','.join(fields)})
'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2/guid/-/' + guid,
guid, fatal=False)
if feed_data:
publish_info = feed_data.get('mediasetprogram$publishInfo') or {}
thumbnails = feed_data.get('thumbnails') or {}
thumbnail = None
for key, value in thumbnails.items():
if key.startswith('image_keyframe_poster-'):
thumbnail = value.get('url')
break
info.update({
'episode_number': int_or_none(feed_data.get('tvSeasonEpisodeNumber')),
'season_number': int_or_none(feed_data.get('tvSeasonNumber')),
'series': feed_data.get('mediasetprogram$brandTitle'),
'description': info.get('description') or feed_data.get('description') or feed_data.get('longDescription'),
'uploader': publish_info.get('description'),
'uploader_id': publish_info.get('channel'),
'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')),
'thumbnail': thumbnail,
})
if feed_data.get('programType') == 'episode':
info.update({
'episode_number': int_or_none(
feed_data.get('tvSeasonEpisodeNumber')),
'season_number': int_or_none(
feed_data.get('tvSeasonNumber')),
'series': feed_data.get('mediasetprogram$brandTitle'),
})
info.update({
@ -184,3 +298,98 @@ class MediasetIE(ThePlatformBaseIE):
'subtitles': subtitles,
})
return info
class MediasetClipIE(MediasetIE):
_VALID_URL = r'https?://(?:www\.)?\w+\.mediaset\.it/video/(?:[^/]+/)*[\w-]+_(?P<id>\d+)\.s?html?'
_TESTS = [{
'url': 'https://www.grandefratello.mediaset.it/video/ventinovesima-puntata_27071.shtml',
'info_dict': {
'id': 'F310293901002901',
'ext': 'mp4',
},
'skip': 'Geo restricted, DRM content',
}]
def _real_extract(self, url):
clip_id = self._match_id(url)
webpage = self._download_webpage(url, clip_id)
guid = self._search_regex(
(r'''var\s*_onplay_guid\s*=\s*(?P<q>'|"|\b)(?P<guid>[\dA-Z]{16,})(?P=q)\s*;''',
r'\bGUID\s+(?P<guid>[\dA-Z]{16,})\b', ),
webpage, 'clip GUID', group='guid')
return self.url_result('mediaset:%s' % guid, ie='Mediaset', video_id=clip_id)
class MediasetShowIE(MediasetIE):
_VALID_URL = r'''(?x)
(?:
https?://
(?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
(?:
(?:fiction|programmi-tv|serie-tv)/(?:.+?/)?
(?:[a-z-]+)_SE(?P<id>\d{12})
(?:,ST(?P<st>\d{12}))?
(?:,sb(?P<sb>\d{9}))?$
)
)
'''
_TESTS = [{
# TV Show webpage (general webpage)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061',
'info_dict': {
'id': '000000000061',
'title': 'Le Iene',
},
'playlist_mincount': 7,
}, {
# TV Show webpage (specific season)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061,ST000000002763',
'info_dict': {
'id': '000000002763',
'title': 'Le Iene',
},
'playlist_mincount': 7,
}, {
# TV Show specific playlist (with multiple pages)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/iservizi_SE000000000061,ST000000002763,sb100013375',
'info_dict': {
'id': '100013375',
'title': 'I servizi',
},
'playlist_mincount': 50,
}]
_BY_SUBBRAND = 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2?byCustomValue={subBrandId}{%s}&sort=:publishInfo_lastPublished|desc,tvSeasonEpisodeNumber|desc&range=%d-%d'
_PAGE_SIZE = 25
_match_valid_url = lambda s, u: re.match(s._VALID_URL, u)
def _fetch_page(self, sb, page):
lower_limit = page * self._PAGE_SIZE + 1
upper_limit = lower_limit + self._PAGE_SIZE - 1
content = self._download_json(
self._BY_SUBBRAND % (sb, lower_limit, upper_limit), sb)
for entry in content.get('entries') or []:
res = self.url_result('mediaset:' + entry['guid'])
if res:
res['playlist_title'] = entry['mediasetprogram$subBrandDescription']
yield res
def _real_extract(self, url):
playlist_id, st, sb = self._match_valid_url(url).group('id', 'st', 'sb')
if not sb:
page = self._download_webpage(url, st or playlist_id)
entries = [self.url_result(urljoin('https://www.mediasetplay.mediaset.it', url))
for url in re.findall(r'href="([^<>=]+SE\d{12},ST\d{12},sb\d{9})">[^<]+<', page)]
title = (self._html_search_regex(r'(?s)<h1[^>]*>(.+?)</h1>', page, 'title', default=None)
or self._og_search_title(page))
return self.playlist_result(entries, st or playlist_id, title)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, sb),
self._PAGE_SIZE)
# slice explicitly, as no __getitem__ in OnDemandPagedList yet
title = try_get(entries, lambda x: x.getslice(0, 1)[0]['playlist_title'])
return self.playlist_result(entries, sb, title)

View File

@ -8,7 +8,7 @@ from ..compat import compat_str
from ..utils import (
int_or_none,
str_or_none,
try_get,
traverse_obj,
)
@ -109,7 +109,7 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
}
name'''
@ classmethod
@classmethod
def suitable(cls, url):
return False if re.match(PalcoMP3IE._VALID_URL, url) else super(PalcoMP3ArtistIE, cls).suitable(url)
@ -118,7 +118,8 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist']
def entries():
for music in (try_get(artist, lambda x: x['musics']['nodes'], list) or []):
for music in traverse_obj(artist, (
'musics', 'nodes', lambda _, m: m['musicID'])):
yield self._parse_music(music)
return self.playlist_result(
@ -137,7 +138,7 @@ class PalcoMP3VideoIE(PalcoMP3BaseIE):
'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande',
'description': 'md5:7043342c09a224598e93546e98e49282',
'upload_date': '20161107',
'uploader_id': 'maiaramaraisaoficial',
'uploader_id': '@maiaramaraisaoficial',
'uploader': 'Maiara e Maraisa',
}
}]

View File

@ -14,6 +14,7 @@ from .utils import (
remove_quotes,
unified_timestamp,
variadic,
write_string,
)
from .compat import (
compat_basestring,
@ -53,15 +54,16 @@ def wraps_op(op):
# NB In principle NaN cannot be checked by membership.
# Here all NaN values are actually this one, so _NaN is _NaN,
# although _NaN != _NaN.
# although _NaN != _NaN. Ditto Infinity.
_NaN = float('nan')
_Infinity = float('inf')
def _js_bit_op(op):
def zeroise(x):
return 0 if x in (None, JS_Undefined, _NaN) else x
return 0 if x in (None, JS_Undefined, _NaN, _Infinity) else x
@wraps_op(op)
def wrapped(a, b):
@ -84,7 +86,7 @@ def _js_arith_op(op):
def _js_div(a, b):
if JS_Undefined in (a, b) or not (a or b):
return _NaN
return operator.truediv(a or 0, b) if b else float('inf')
return operator.truediv(a or 0, b) if b else _Infinity
def _js_mod(a, b):
@ -220,6 +222,42 @@ class LocalNameSpace(ChainMap):
return 'LocalNameSpace%s' % (self.maps, )
class Debugger(object):
ENABLED = False
@staticmethod
def write(*args, **kwargs):
level = kwargs.get('level', 100)
def truncate_string(s, left, right=0):
if s is None or len(s) <= left + right:
return s
return '...'.join((s[:left - 3], s[-right:] if right else ''))
write_string('[debug] JS: {0}{1}\n'.format(
' ' * (100 - level),
' '.join(truncate_string(compat_str(x), 50, 50) for x in args)))
@classmethod
def wrap_interpreter(cls, f):
def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs):
if cls.ENABLED and stmt.strip():
cls.write(stmt, level=allow_recursion)
try:
ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs)
except Exception as e:
if cls.ENABLED:
if isinstance(e, ExtractorError):
e = e.orig_msg
cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion)
raise
if cls.ENABLED and stmt.strip():
if should_ret or not repr(ret) == stmt:
cls.write(['->', '=>'][should_ret], repr(ret), '<-|', stmt, level=allow_recursion)
return ret, should_ret
return interpret_statement
class JSInterpreter(object):
__named_object_counter = 0
@ -307,8 +345,7 @@ class JSInterpreter(object):
def __op_chars(cls):
op_chars = set(';,[')
for op in cls._all_operators():
for c in op[0]:
op_chars.add(c)
op_chars.update(op[0])
return op_chars
def _named_object(self, namespace, obj):
@ -326,9 +363,8 @@ class JSInterpreter(object):
# collections.Counter() is ~10% slower in both 2.7 and 3.9
counters = dict((k, 0) for k in _MATCHING_PARENS.values())
start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1
in_quote, escaping, skipping = None, False, 0
after_op, in_regex_char_group = True, False
in_quote, escaping, after_op, in_regex_char_group = None, False, True, False
skipping = 0
for idx, char in enumerate(expr):
paren_delta = 0
if not in_quote:
@ -382,10 +418,12 @@ class JSInterpreter(object):
return separated[0][1:].strip(), separated[1].strip()
@staticmethod
def _all_operators():
return itertools.chain(
def _all_operators(_cached=[]):
if not _cached:
_cached.extend(itertools.chain(
# Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS)
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS))
return _cached
def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion):
if op in ('||', '&&'):
@ -416,7 +454,7 @@ class JSInterpreter(object):
except Exception as e:
if allow_undefined:
return JS_Undefined
raise self.Exception('Cannot get index {idx:.100}'.format(**locals()), expr=repr(obj), cause=e)
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
try:
@ -438,6 +476,7 @@ class JSInterpreter(object):
_FINALLY_RE = re.compile(r'finally\s*\{')
_SWITCH_RE = re.compile(r'switch\s*\(')
@Debugger.wrap_interpreter
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise self.Exception('Recursion limit reached')
@ -511,7 +550,6 @@ class JSInterpreter(object):
expr = self._dump(inner, local_vars) + outer
if expr.startswith('('):
m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr)
if m:
# short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig`
@ -693,7 +731,7 @@ class JSInterpreter(object):
(?P<op>{_OPERATOR_RE})?
=(?!=)(?P<expr>.*)$
)|(?P<return>
(?!if|return|true|false|null|undefined)(?P<name>{_NAME_RE})$
(?!if|return|true|false|null|undefined|NaN|Infinity)(?P<name>{_NAME_RE})$
)|(?P<indexing>
(?P<in>{_NAME_RE})\[(?P<idx>.+)\]$
)|(?P<attribute>
@ -727,11 +765,12 @@ class JSInterpreter(object):
raise JS_Break()
elif expr == 'continue':
raise JS_Continue()
elif expr == 'undefined':
return JS_Undefined, should_return
elif expr == 'NaN':
return _NaN, should_return
elif expr == 'Infinity':
return _Infinity, should_return
elif md.get('return'):
return local_vars[m.group('name')], should_return
@ -760,17 +799,27 @@ class JSInterpreter(object):
right_expr = separated.pop()
# handle operators that are both unary and binary, minimal BODMAS
if op in ('+', '-'):
# simplify/adjust consecutive instances of these operators
undone = 0
while len(separated) > 1 and not separated[-1].strip():
undone += 1
separated.pop()
if op == '-' and undone % 2 != 0:
right_expr = op + right_expr
elif op == '+':
while len(separated) > 1 and separated[-1].strip() in self.OP_CHARS:
right_expr = separated.pop() + right_expr
# hanging op at end of left => unary + (strip) or - (push right)
left_val = separated[-1]
for dm_op in ('*', '%', '/', '**'):
bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
if len(bodmas) > 1 and not bodmas[-1].strip():
expr = op.join(separated) + op + right_expr
if len(separated) > 1:
separated.pop()
right_expr = op.join((left_val, right_expr))
else:
separated = [op.join((left_val, right_expr))]
right_expr = None
break
if right_expr is None:
@ -797,6 +846,8 @@ class JSInterpreter(object):
def eval_method():
if (variable, member) == ('console', 'debug'):
if Debugger.ENABLED:
Debugger.write(self.interpret_expression('[{}]'.format(arg_str), local_vars, allow_recursion))
return
types = {
'String': compat_str,

View File

@ -2406,7 +2406,7 @@ class ExtractorError(YoutubeDLError):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
self.orig_msg = msg
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None:
@ -4118,6 +4118,12 @@ class PagedList(object):
# This is only useful for tests
return len(self.getslice())
def _getslice(self, start, end):
raise NotImplementedError('This method must be implemented by subclasses')
def getslice(self, start=0, end=None):
return list(self._getslice(start, end))
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=True):
@ -4127,11 +4133,12 @@ class OnDemandPagedList(PagedList):
if use_cache:
self._cache = {}
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
def _getslice(self, start=0, end=None):
firstpage = start // self._pagesize
nextfirstid = firstpage * self._pagesize
for pagenum in itertools.count(firstpage):
firstid = nextfirstid
nextfirstid += self._pagesize
if start >= nextfirstid:
continue
@ -4144,18 +4151,19 @@ class OnDemandPagedList(PagedList):
self._cache[pagenum] = page_results
startv = (
start % self._pagesize
start - firstid
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
end - firstid
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
for item in page_results:
yield item
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
@ -4168,7 +4176,7 @@ class OnDemandPagedList(PagedList):
# break out early as well
if end == nextfirstid:
break
return res
return
class InAdvancePagedList(PagedList):