Compare commits

...

12 Commits

Author SHA1 Message Date
dirkf
7d2ce9c6c8
Merge 7fb0a87d7c into 4d05f84325 2024-06-27 06:36:46 +08:00
dirkf
4d05f84325 [PalcoMP3] Conform to new linter rule
* no space after @ in decorator
2024-06-20 20:03:49 +01:00
dirkf
e0094e63c3 [jsinterp] Various tweaks
* treat Infinity like NaN
* cache operator list
2024-06-20 20:03:49 +01:00
dirkf
fd8242e3ef [jsinterp] Fix and improve expression parsing
* improve BODMAS (fixes https://github.com/ytdl-org/youtube-dl/issues/32815)
* support more weird expressions with multiple unary ops
2024-06-20 20:03:49 +01:00
dirkf
ad01fa6cca [jsinterp] Add Debugger from yt-dlp
* https://github.com/yt-dlp/yt-dlp/commit/8f53dc4
* thx pukkandan
2024-06-20 20:03:49 +01:00
dirkf
2eac0fa379 [utils] Save orig_msg in ExtractorError 2024-06-20 20:03:49 +01:00
dirkf
7fb0a87d7c
Merge branch 'ytdl-org:master' into df-bitchute-ovrhaul 2023-02-04 14:41:35 +00:00
dirkf
ab362c8031
Small update, force CI 2023-02-04 02:36:34 +00:00
dirkf
add5e7dced [BitChute] Extract channel_url
* resolves #28500
2022-06-10 17:03:05 +01:00
dirkf
5ed0057798 [BitChute] Handle NSFW in listings and single videos
* listing: include NSFW if --age-limit 18 (or higher)
* single video: extract 'age_limit': 18 if NSFW
Resolves #24419
2022-06-10 05:12:22 +01:00
dirkf
1f62792551 [BitChute] Added BitChutePlaylistIE
* closes #26725
2022-06-10 01:43:39 +01:00
dirkf
79819f441b [BitChute] Back-port from yt-dlp and upgrade
BitChute
* extract timestamp instead of upload date
* improve title and description extraction
BitChuteChannel
* fix listing entries
Throughout
* relax regexes
2022-06-10 01:37:03 +01:00
8 changed files with 323 additions and 106 deletions

View File

@ -577,9 +577,11 @@ class TestJSInterpreter(unittest.TestCase):
def test_unary_operators(self):
jsi = JSInterpreter('function f(){return 2 - - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# fails
# jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
# self.assertEqual(jsi.call_function('f'), 0)
jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# https://github.com/ytdl-org/youtube-dl/issues/32815
jsi = JSInterpreter('function f(){return 0 - 7 * - 6;}')
self.assertEqual(jsi.call_function('f'), 42)
""" # fails so far
def test_packed(self):

View File

@ -158,6 +158,10 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
'_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
),
(
'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js',
'1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg',
),
]

View File

@ -6,25 +6,105 @@ import re
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
GeoRestrictedError,
get_element_by_class,
get_element_by_id,
int_or_none,
merge_dicts,
orderedSet,
unified_strdate,
unified_timestamp,
urlencode_postdata,
urljoin,
)
class BitChuteIE(InfoExtractor):
class BitChuteBaseIE(InfoExtractor):
_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36'
def _list_entries(self, list_id):
TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7'
list_url = self._API_URL + list_id
offset = 0
query = {'showall': '1'} if 18 <= int_or_none(self._downloader.params.get('age_limit')) or 0 else None
for page_num in itertools.count(1):
data = self._download_json(
list_url + '/extend/', list_id,
'Downloading list page %d' % (page_num, ),
data=urlencode_postdata({
'csrfmiddlewaretoken': TOKEN,
'name': '',
'offset': offset,
}), headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': list_url,
'X-Requested-With': 'XMLHttpRequest',
'Cookie': 'csrftoken=' + TOKEN,
}, query=query)
if data.get('success') is False:
break
html = data.get('html')
if not html:
break
video_ids = re.findall(
r'''class\s*=\s*["'](?:channel-videos-)?image-container[^>]+>\s*<a\b[^>]+\bhref\s*=\s*["']/video/([^"'/]+)''',
html)
if not video_ids:
break
offset += len(video_ids)
for video_id in video_ids:
yield self.url_result(
'https://www.bitchute.com/video/' + video_id,
ie=BitChuteIE.ie_key(), video_id=video_id)
def _search_title(self, html, title_id, **kwargs):
return (
clean_html(get_element_by_id(title_id, html)) or None
or self._og_search_title(html, default=None)
or self._html_search_regex(r'(?s)<title\b[^>]*>.*?</title', html, 'title', **kwargs))
def _search_description(self, html, descr_id):
return (
self._og_search_description(html)
or (descr_id and clean_html(get_element_by_id(descr_id, html)))
or self._html_search_regex(
r'(?s)<div\b[^>]+\bclass=["\']full hidden[^>]+>(.+?)</div>',
html, 'description', fatal=False))
class BitChuteIE(BitChuteBaseIE):
_VALID_URL = r'https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.bitchute.com/video/szoMrox2JEI/',
'md5': '66c4a70e6bfc40dcb6be3eb1d74939eb',
'url': 'https://www.bitchute.com/video/UGlrF9o9b-Q/',
'md5': '7e427d7ed7af5a75b5855705ec750e2b',
'info_dict': {
'id': 'szoMrox2JEI',
'id': 'UGlrF9o9b-Q',
'ext': 'mp4',
'title': 'Fuck bitches get money',
'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a',
'title': 'This is the first video on #BitChute !',
'timestamp': 1483425420,
'upload_date': '20170103',
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Victoria X Rave',
'upload_date': '20170813',
'uploader': 'BitChute',
'age_limit': None,
'channel_url': 'https://www.bitchute.com/channel/bitchute/',
},
}, {
# NSFW (#24419)
'url': 'https://www.bitchute.com/video/wrTrKp7PmFZC/',
'md5': '4ef880ce8d24e322172d41a0cf6f8096',
'info_dict': {
'id': 'wrTrKp7PmFZC',
'ext': 'mp4',
'title': "You Can't Stop Progress | Episode 2",
'timestamp': 1541476920,
'upload_date': '20181106',
'description': 'md5:f191b538a2c4d8f57540141a6bfd7eb0',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': "You Can't Stop Progress",
'age_limit': 18,
'channel_url': 'https://www.bitchute.com/channel/ycsp/',
},
}, {
'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/',
@ -34,51 +114,90 @@ class BitChuteIE(InfoExtractor):
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
urls = re.finditer(
r'''<(?:script|iframe)\b[^>]+\bsrc\s*=\s*("|')(?P<url>%s)''' % (BitChuteIE._VALID_URL, ),
webpage)
return (mobj.group('url') for mobj in urls)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://www.bitchute.com/video/%s' % video_id, video_id, headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36',
})
def get_error_title(html):
return clean_html(get_element_by_class('page-title', html)) or None
title = self._html_search_regex(
(r'<[^>]+\bid=["\']video-title[^>]+>([^<]+)', r'<title>([^<]+)'),
webpage, 'title', default=None) or self._html_search_meta(
'description', webpage, 'title',
default=None) or self._og_search_description(webpage)
def get_error_text(html):
return clean_html(get_element_by_id('page-detail', html)) or None
format_urls = []
webpage, urlh = self._download_webpage_handle(
'https://www.bitchute.com/video/' + video_id, video_id,
headers={
'User-Agent': self._USER_AGENT,
}, expected_status=404)
if urlh.getcode() == 404:
raise ExtractorError(get_error_title(webpage) or 'Cannot find video', expected=True)
title = self._search_title(webpage, 'video-title')
format_urls = [
mobj.group('url')
for mobj in re.finditer(
r'addWebSeed\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage):
format_urls.append(mobj.group('url'))
format_urls.extend(re.findall(r'as=(https?://[^&"\']+)', webpage))
r'''\baddWebSeed\s*\(\s*("|')(?P<url>(?:(?!\1).)+)\1''', webpage)]
format_urls.extend(re.findall(r'''as=(https?://[^&"']+)''', webpage))
formats = [
{'url': format_url}
for format_url in orderedSet(format_urls)]
if not formats:
formats = self._parse_html5_media_entries(
url, webpage, video_id)[0]['formats']
entries = self._parse_html5_media_entries(
url, webpage, video_id)
if not entries:
error = clean_html(get_element_by_id('video-title', webpage)) or None
if error == 'Video Unavailable':
raise GeoRestrictedError(error, expected=True)
error = get_error_title(webpage)
if error:
reason = get_error_text(webpage)
if reason:
self.to_screen(reason)
raise ExtractorError(error, expected=True)
raise ExtractorError('Cannot find video', )
formats = entries[0]['formats']
self._check_formats(formats, video_id)
self._sort_formats(formats)
description = self._html_search_regex(
description = (
self._search_description(webpage, 'video-description')
or self._html_search_regex(
r'(?s)<div\b[^>]+\bclass=["\']full hidden[^>]+>(.+?)</div>',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._html_search_meta(
'twitter:image:src', webpage, 'thumbnail')
webpage, 'description', fatal=False))
thumbnail = self._html_search_meta(
('og:image', 'twitter:image:src'), webpage, 'thumbnail', fatal=False)
uploader = self._html_search_regex(
(r'(?s)<div class=["\']channel-banner.*?<p\b[^>]+\bclass=["\']name[^>]+>(.+?)</p>',
r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>'),
(r'''(?s)<div\b[^>]+?\bclass\s*=\s*["']channel-banner.*?<p\b[^>]+\bclass\s*=\s*["']name\b[^>]+>(.+?)</p>''',
r'''(?s)<p\b[^>]+\bclass\s*=\s*["']video-author\b[^>]+>(.+?)</p>'''),
webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._search_regex(
r'class=["\']video-publish-date[^>]+>[^<]+ at \d+:\d+ UTC on (.+?)\.',
webpage, 'upload date', fatal=False))
def more_unified_timestamp(x):
# ... at hh:mm TZ on month nth.
y = re.split(r'\s+at\s+', x or '')[-1]
y = re.sub(r'(?:^\s+|\s+$|\.+$|(?<=\d)(?:st|nd|rd|th))', '', y)
y = ' '.join(reversed(re.split(r'\s+on\s+', y, 1)))
return unified_timestamp(y) or unified_timestamp(x)
timestamp = more_unified_timestamp(get_element_by_class('video-publish-date', webpage))
# TODO: remove this work-around for class matching bug
webpage = re.split(r'''('|")channel-banner\1''', webpage, 1)[-1]
channel_details = get_element_by_class('details', webpage)
channel_details = channel_details and get_element_by_class('name', channel_details)
channel_url = urljoin(url, self._search_regex(
r'''<a\b[^>]*?\bhref\s*=\s*('|")(?P<url>(?:(?!\1).)+)''',
channel_details or '', 'channel url', group='url', default=None))
return {
'id': video_id,
@ -86,57 +205,97 @@ class BitChuteIE(InfoExtractor):
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
'timestamp': timestamp,
'formats': formats,
'age_limit': 18 if '>This video has been marked as Not Safe For Work' in webpage else None,
'channel_url': channel_url,
}
class BitChuteChannelIE(InfoExtractor):
class BitChuteChannelIE(BitChuteBaseIE):
_VALID_URL = r'https?://(?:www\.)?bitchute\.com/channel/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.bitchute.com/channel/victoriaxrave/',
'playlist_mincount': 185,
_TESTS = [{
'url': 'https://www.bitchute.com/channel/livesonnet/',
'playlist_mincount': 135,
'info_dict': {
'id': 'victoriaxrave',
},
'id': 'livesonnet',
'title': 'livesonnet_vidz',
'description': 'md5:b0017be20656a1347eeb84f1049fc424',
}
_TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7'
def _entries(self, channel_id):
channel_url = 'https://www.bitchute.com/channel/%s/' % channel_id
offset = 0
for page_num in itertools.count(1):
data = self._download_json(
'%sextend/' % channel_url, channel_id,
'Downloading channel page %d' % page_num,
data=urlencode_postdata({
'csrfmiddlewaretoken': self._TOKEN,
'name': '',
'offset': offset,
}), headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': channel_url,
'X-Requested-With': 'XMLHttpRequest',
'Cookie': 'csrftoken=%s' % self._TOKEN,
})
if data.get('success') is False:
break
html = data.get('html')
if not html:
break
video_ids = re.findall(
r'class=["\']channel-videos-image-container[^>]+>\s*<a\b[^>]+\bhref=["\']/video/([^"\'/]+)',
html)
if not video_ids:
break
offset += len(video_ids)
for video_id in video_ids:
yield self.url_result(
'https://www.bitchute.com/video/%s' % video_id,
ie=BitChuteIE.ie_key(), video_id=video_id)
}, {
# channel with NSFW content, not listed
'url': 'https://www.bitchute.com/channel/hQl9oMSgUyMX/',
'playlist_maxcount': 150,
'info_dict': {
'id': 'hQl9oMSgUyMX',
'title': "You Can't Stop Progress",
'description': 'md5:a7e3fd8cf02e96ddcc73e9f13d2ce768',
},
}, {
# channel with NSFW content, listed with adult age limit
'url': 'https://www.bitchute.com/channel/hQl9oMSgUyMX/',
'playlist_mincount': 160,
'info_dict': {
'id': 'hQl9oMSgUyMX',
'title': "You Can't Stop Progress",
'description': 'md5:a7e3fd8cf02e96ddcc73e9f13d2ce768',
},
'params': {
'age_limit': 18,
}
}]
_API_URL = 'https://www.bitchute.com/channel/'
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.playlist_result(
self._entries(channel_id), playlist_id=channel_id)
webpage = self._download_webpage(
'https://www.bitchute.com/channel/' + channel_id, channel_id,
headers={
'User-Agent': self._USER_AGENT,
})
title = self._search_title(webpage, 'channel-title', default=None)
result = self.playlist_result(
self._list_entries(channel_id), playlist_id=channel_id, playlist_title=title)
return merge_dicts(
result,
{'description': self._search_description(webpage, 'channel-description')})
class BitChutePlaylistIE(BitChuteBaseIE):
_VALID_URL = r'https?://(?:www\.)?bitchute\.com/playlist/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.bitchute.com/playlist/g4WTfWTdYEQa/',
'playlist_mincount': 1,
'info_dict': {
'id': 'g4WTfWTdYEQa',
'title': 'Podcasts',
'description': 'Podcast Playlist',
},
}]
_API_URL = 'https://www.bitchute.com/playlist/'
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(
'https://www.bitchute.com/playlist/' + playlist_id, playlist_id,
headers={
'User-Agent': self._USER_AGENT,
})
title = self._search_title(webpage, 'playlist-title', default=None)
result = self.playlist_result(
self._list_entries(playlist_id), playlist_id=playlist_id, playlist_title=title)
description = (
clean_html(get_element_by_class('description', webpage))
or self._search_description(webpage, None))
return merge_dicts(
result,
{'description': description})

View File

@ -3033,7 +3033,6 @@ class InfoExtractor(object):
transform_source=transform_source, default=None)
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
# allow passing `transform_source` through to _find_jwplayer_data()
transform_source = kwargs.pop('transform_source', None)
kwfind = compat_kwargs({'transform_source': transform_source}) if transform_source else {}

View File

@ -132,6 +132,7 @@ from .biobiochiletv import BioBioChileTVIE
from .bitchute import (
BitChuteIE,
BitChuteChannelIE,
BitChutePlaylistIE,
)
from .biqle import BIQLEIE
from .bleacherreport import (

View File

@ -8,7 +8,7 @@ from ..compat import compat_str
from ..utils import (
int_or_none,
str_or_none,
try_get,
traverse_obj,
)
@ -118,7 +118,8 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist']
def entries():
for music in (try_get(artist, lambda x: x['musics']['nodes'], list) or []):
for music in traverse_obj(artist, (
'musics', 'nodes', lambda _, m: m['musicID'])):
yield self._parse_music(music)
return self.playlist_result(
@ -137,7 +138,7 @@ class PalcoMP3VideoIE(PalcoMP3BaseIE):
'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande',
'description': 'md5:7043342c09a224598e93546e98e49282',
'upload_date': '20161107',
'uploader_id': 'maiaramaraisaoficial',
'uploader_id': '@maiaramaraisaoficial',
'uploader': 'Maiara e Maraisa',
}
}]

View File

@ -14,6 +14,7 @@ from .utils import (
remove_quotes,
unified_timestamp,
variadic,
write_string,
)
from .compat import (
compat_basestring,
@ -53,15 +54,16 @@ def wraps_op(op):
# NB In principle NaN cannot be checked by membership.
# Here all NaN values are actually this one, so _NaN is _NaN,
# although _NaN != _NaN.
# although _NaN != _NaN. Ditto Infinity.
_NaN = float('nan')
_Infinity = float('inf')
def _js_bit_op(op):
def zeroise(x):
return 0 if x in (None, JS_Undefined, _NaN) else x
return 0 if x in (None, JS_Undefined, _NaN, _Infinity) else x
@wraps_op(op)
def wrapped(a, b):
@ -84,7 +86,7 @@ def _js_arith_op(op):
def _js_div(a, b):
if JS_Undefined in (a, b) or not (a or b):
return _NaN
return operator.truediv(a or 0, b) if b else float('inf')
return operator.truediv(a or 0, b) if b else _Infinity
def _js_mod(a, b):
@ -220,6 +222,42 @@ class LocalNameSpace(ChainMap):
return 'LocalNameSpace%s' % (self.maps, )
class Debugger(object):
ENABLED = False
@staticmethod
def write(*args, **kwargs):
level = kwargs.get('level', 100)
def truncate_string(s, left, right=0):
if s is None or len(s) <= left + right:
return s
return '...'.join((s[:left - 3], s[-right:] if right else ''))
write_string('[debug] JS: {0}{1}\n'.format(
' ' * (100 - level),
' '.join(truncate_string(compat_str(x), 50, 50) for x in args)))
@classmethod
def wrap_interpreter(cls, f):
def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs):
if cls.ENABLED and stmt.strip():
cls.write(stmt, level=allow_recursion)
try:
ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs)
except Exception as e:
if cls.ENABLED:
if isinstance(e, ExtractorError):
e = e.orig_msg
cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion)
raise
if cls.ENABLED and stmt.strip():
if should_ret or not repr(ret) == stmt:
cls.write(['->', '=>'][should_ret], repr(ret), '<-|', stmt, level=allow_recursion)
return ret, should_ret
return interpret_statement
class JSInterpreter(object):
__named_object_counter = 0
@ -307,8 +345,7 @@ class JSInterpreter(object):
def __op_chars(cls):
op_chars = set(';,[')
for op in cls._all_operators():
for c in op[0]:
op_chars.add(c)
op_chars.update(op[0])
return op_chars
def _named_object(self, namespace, obj):
@ -326,9 +363,8 @@ class JSInterpreter(object):
# collections.Counter() is ~10% slower in both 2.7 and 3.9
counters = dict((k, 0) for k in _MATCHING_PARENS.values())
start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1
in_quote, escaping, skipping = None, False, 0
after_op, in_regex_char_group = True, False
in_quote, escaping, after_op, in_regex_char_group = None, False, True, False
skipping = 0
for idx, char in enumerate(expr):
paren_delta = 0
if not in_quote:
@ -382,10 +418,12 @@ class JSInterpreter(object):
return separated[0][1:].strip(), separated[1].strip()
@staticmethod
def _all_operators():
return itertools.chain(
def _all_operators(_cached=[]):
if not _cached:
_cached.extend(itertools.chain(
# Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS)
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS))
return _cached
def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion):
if op in ('||', '&&'):
@ -416,7 +454,7 @@ class JSInterpreter(object):
except Exception as e:
if allow_undefined:
return JS_Undefined
raise self.Exception('Cannot get index {idx:.100}'.format(**locals()), expr=repr(obj), cause=e)
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
try:
@ -438,6 +476,7 @@ class JSInterpreter(object):
_FINALLY_RE = re.compile(r'finally\s*\{')
_SWITCH_RE = re.compile(r'switch\s*\(')
@Debugger.wrap_interpreter
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise self.Exception('Recursion limit reached')
@ -511,7 +550,6 @@ class JSInterpreter(object):
expr = self._dump(inner, local_vars) + outer
if expr.startswith('('):
m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr)
if m:
# short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig`
@ -693,7 +731,7 @@ class JSInterpreter(object):
(?P<op>{_OPERATOR_RE})?
=(?!=)(?P<expr>.*)$
)|(?P<return>
(?!if|return|true|false|null|undefined)(?P<name>{_NAME_RE})$
(?!if|return|true|false|null|undefined|NaN|Infinity)(?P<name>{_NAME_RE})$
)|(?P<indexing>
(?P<in>{_NAME_RE})\[(?P<idx>.+)\]$
)|(?P<attribute>
@ -727,11 +765,12 @@ class JSInterpreter(object):
raise JS_Break()
elif expr == 'continue':
raise JS_Continue()
elif expr == 'undefined':
return JS_Undefined, should_return
elif expr == 'NaN':
return _NaN, should_return
elif expr == 'Infinity':
return _Infinity, should_return
elif md.get('return'):
return local_vars[m.group('name')], should_return
@ -760,17 +799,27 @@ class JSInterpreter(object):
right_expr = separated.pop()
# handle operators that are both unary and binary, minimal BODMAS
if op in ('+', '-'):
# simplify/adjust consecutive instances of these operators
undone = 0
while len(separated) > 1 and not separated[-1].strip():
undone += 1
separated.pop()
if op == '-' and undone % 2 != 0:
right_expr = op + right_expr
elif op == '+':
while len(separated) > 1 and separated[-1].strip() in self.OP_CHARS:
right_expr = separated.pop() + right_expr
# hanging op at end of left => unary + (strip) or - (push right)
left_val = separated[-1]
for dm_op in ('*', '%', '/', '**'):
bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
if len(bodmas) > 1 and not bodmas[-1].strip():
expr = op.join(separated) + op + right_expr
if len(separated) > 1:
separated.pop()
right_expr = op.join((left_val, right_expr))
else:
separated = [op.join((left_val, right_expr))]
right_expr = None
break
if right_expr is None:
@ -797,6 +846,8 @@ class JSInterpreter(object):
def eval_method():
if (variable, member) == ('console', 'debug'):
if Debugger.ENABLED:
Debugger.write(self.interpret_expression('[{}]'.format(arg_str), local_vars, allow_recursion))
return
types = {
'String': compat_str,

View File

@ -2406,7 +2406,7 @@ class ExtractorError(YoutubeDLError):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
self.orig_msg = msg
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None: