Compare commits

...

9 Commits

Author SHA1 Message Date
dirkf
92194bd10a
Merge 14a087ec62 into 4d05f84325 2024-06-27 06:36:05 +08:00
dirkf
4d05f84325 [PalcoMP3] Conform to new linter rule
* no space after @ in decorator
2024-06-20 20:03:49 +01:00
dirkf
e0094e63c3 [jsinterp] Various tweaks
* treat Infinity like NaN
* cache operator list
2024-06-20 20:03:49 +01:00
dirkf
fd8242e3ef [jsinterp] Fix and improve expression parsing
* improve BODMAS (fixes https://github.com/ytdl-org/youtube-dl/issues/32815)
* support more weird expressions with multiple unary ops
2024-06-20 20:03:49 +01:00
dirkf
ad01fa6cca [jsinterp] Add Debugger from yt-dlp
* https://github.com/yt-dlp/yt-dlp/commit/8f53dc4
* thx pukkandan
2024-06-20 20:03:49 +01:00
dirkf
2eac0fa379 [utils] Save orig_msg in ExtractorError 2024-06-20 20:03:49 +01:00
dirkf
14a087ec62 [SpankBang] Support category, tag, search, channel, star and profile pages 2022-06-07 19:53:31 +01:00
dirkf
836463013c [SpankBang] Rework SpankBangPlaylistIE with pagination 2022-06-06 14:47:40 +01:00
dirkf
30a954bad9 [SpankBang] Back-port changes to SpankBangIE from yt-dlp
* improve title extraction
* add uploader_id
* update test
* but don't check file md5
2022-06-06 14:18:46 +01:00
8 changed files with 261 additions and 57 deletions

View File

@ -577,9 +577,11 @@ class TestJSInterpreter(unittest.TestCase):
def test_unary_operators(self):
jsi = JSInterpreter('function f(){return 2 - - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# fails
# jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
# self.assertEqual(jsi.call_function('f'), 0)
jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# https://github.com/ytdl-org/youtube-dl/issues/32815
jsi = JSInterpreter('function f(){return 0 - 7 * - 6;}')
self.assertEqual(jsi.call_function('f'), 42)
""" # fails so far
def test_packed(self):

View File

@ -158,6 +158,10 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
'_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
),
(
'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js',
'1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg',
),
]

View File

@ -3033,7 +3033,6 @@ class InfoExtractor(object):
transform_source=transform_source, default=None)
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
# allow passing `transform_source` through to _find_jwplayer_data()
transform_source = kwargs.pop('transform_source', None)
kwfind = compat_kwargs({'transform_source': transform_source}) if transform_source else {}

View File

@ -1179,6 +1179,7 @@ from .southpark import (
from .spankbang import (
SpankBangIE,
SpankBangPlaylistIE,
SpankBangListIE,
)
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE

View File

@ -8,7 +8,7 @@ from ..compat import compat_str
from ..utils import (
int_or_none,
str_or_none,
try_get,
traverse_obj,
)
@ -109,7 +109,7 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
}
name'''
@ classmethod
@classmethod
def suitable(cls, url):
return False if re.match(PalcoMP3IE._VALID_URL, url) else super(PalcoMP3ArtistIE, cls).suitable(url)
@ -118,7 +118,8 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist']
def entries():
for music in (try_get(artist, lambda x: x['musics']['nodes'], list) or []):
for music in traverse_obj(artist, (
'musics', 'nodes', lambda _, m: m['musicID'])):
yield self._parse_music(music)
return self.playlist_result(
@ -137,7 +138,7 @@ class PalcoMP3VideoIE(PalcoMP3BaseIE):
'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande',
'description': 'md5:7043342c09a224598e93546e98e49282',
'upload_date': '20161107',
'uploader_id': 'maiaramaraisaoficial',
'uploader_id': '@maiaramaraisaoficial',
'uploader': 'Maiara e Maraisa',
}
}]

View File

@ -1,11 +1,17 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
get_element_by_class,
get_element_by_id,
merge_dicts,
parse_duration,
parse_resolution,
@ -26,19 +32,24 @@ class SpankBangIE(InfoExtractor):
)
'''
_TESTS = [{
'url': 'http://spankbang.com/3vvn/video/fantasy+solo',
'md5': '1cc433e1d6aa14bc376535b8679302f7',
'url': 'https://spankbang.com/56b3d/video/the+slut+maker+hmv',
'md5': '5039ba9d26f6124a7fdea6df2f21e765',
'info_dict': {
'id': '3vvn',
'id': '56b3d',
'ext': 'mp4',
'title': 'fantasy solo',
'description': 'dillion harper masturbates on a bed',
'title': 'The Slut Maker HMV',
'description': 'Girls getting converted into cock slaves.',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'silly2587',
'timestamp': 1422571989,
'upload_date': '20150129',
'uploader': 'Mindself',
'uploader_id': 'mindself',
'timestamp': 1617109572,
'upload_date': '20210330',
'age_limit': 18,
}
},
'params': {
# adaptive download
'skip_download': True,
},
}, {
# 480p only
'url': 'http://spankbang.com/1vt0/video/solvane+gangbang',
@ -134,15 +145,15 @@ class SpankBangIE(InfoExtractor):
info = self._search_json_ld(webpage, video_id, default={})
title = self._html_search_regex(
r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None)
r'(?s)<h1[^>]+\btitle=["\']([^"]+)["\']>', webpage, 'title', default=None)
description = self._search_regex(
r'<div[^>]+\bclass=["\']bottom[^>]+>\s*<p>[^<]*</p>\s*<p>([^<]+)',
webpage, 'description', default=None)
thumbnail = self._og_search_thumbnail(webpage, default=None)
uploader = self._html_search_regex(
(r'(?s)<li[^>]+class=["\']profile[^>]+>(.+?)</a>',
r'class="user"[^>]*><img[^>]+>([^<]+)'),
webpage, 'uploader', default=None)
r'<svg[^>]+\bclass="(?:[^"]*?user[^"]*?)">.*?</svg>([^<]+)', webpage, 'uploader', default=None)
uploader_id = self._html_search_regex(
r'<a[^>]+href="/profile/([^"]+)"', webpage, 'uploader_id', default=None)
duration = parse_duration(self._search_regex(
r'<div[^>]+\bclass=["\']right_side[^>]+>\s*<span>([^<]+)',
webpage, 'duration', default=None))
@ -157,6 +168,7 @@ class SpankBangIE(InfoExtractor):
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'formats': formats,
@ -167,32 +179,166 @@ class SpankBangIE(InfoExtractor):
class SpankBangPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/playlist/(?P<display_id>[^/]+)'
_TEST = {
_TESTS = [{
'url': 'https://spankbang.com/ug0k/playlist/big+ass+titties',
'info_dict': {
'id': 'ug0k',
'title': 'Big Ass Titties',
'description': 'md5:65b01bb13a9276cf172a67a41304bafd',
},
'playlist_mincount': 40,
}
'playlist_mincount': 35,
}, {
# pagination required
'url': 'https://spankbang.com/51wxk/playlist/dance',
'info_dict': {
'id': '51wxk',
'title': 'Dance',
'description': 'md5:7aae6991c65d561a9319ecab31f857e2',
},
'playlist_mincount': 60,
}]
def _entries(self, url, playlist_id, webpage=None):
for ii in itertools.count(1):
if not webpage:
webpage = self._download_webpage(
url, playlist_id,
note='Downloading playlist page %d' % (ii, ),
fatal=False)
if not webpage:
break
# search <main id="container">...</main>.innerHTML
for mobj in re.finditer(
r'''<a\b[^>]*?\bclass\s*=\s*('|")(?:(?:(?!\1).)+?\s)?\s*thumb\b[^>]*>''',
get_element_by_id('container', webpage) or webpage):
item_url = extract_attributes(mobj.group(0)).get('href')
if item_url:
yield urljoin(url, item_url)
next_url = self._search_regex(
r'''\bhref\s*=\s*(["'])(?P<path>(?!\1).+?)/?\1''',
get_element_by_class('next', webpage) or '',
'continuation page', group='path', default=None)
if next_url is None or next_url in url:
break
url, webpage = urljoin(url, next_url), None
p_url = compat_urlparse.urlparse(url)
url = compat_urlparse.urlunparse(p_url._replace(path=p_url.path + '/'))
def _get_title(self, list_id, webpage, url):
return self._html_search_regex(
r'<h1>([^<]+)\s+playlist\s*<', webpage, 'playlist title',
fatal=False) or re.sub(r'(\w)\+(\w)', r'\1 \2', list_id).title()
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
display_id = mobj.group('display_id')
mobj = re.match(self._VALID_URL, url).groupdict()
playlist_id = mobj['id']
display_id = mobj.get('display_id') or playlist_id
related = mobj.get('related')
webpage = self._download_webpage(
url, playlist_id, headers={'Cookie': 'country=US; mobile=on'})
webpage = self._download_webpage(url, playlist_id)
entries = [self.url_result(
urljoin(url, mobj.group('path')),
ie=SpankBangIE.ie_key(), video_id=mobj.group('id'))
for mobj in re.finditer(
r'<a[^>]+\bhref=(["\'])(?P<path>/?[\da-z]+-(?P<id>[\da-z]+)/playlist/%s(?:(?!\1).)*)\1'
% re.escape(display_id), webpage)]
def get_title():
t = self._get_title(display_id, webpage, url)
if related:
t = '%s of %s' % (related.title(), t, )
return t
title = self._html_search_regex(
r'<h1>([^<]+)\s+playlist\s*<', webpage, 'playlist title',
fatal=False)
result = self.playlist_from_matches(self._entries(url, playlist_id, webpage), playlist_id, get_title(), ie=SpankBangIE.ie_key())
description = self._html_search_meta(('description', 'og:description'), webpage)
if description:
result['description'] = description
return result
return self.playlist_result(entries, playlist_id, title)
class SpankBangListIE(SpankBangPlaylistIE):
_VALID_URL = r'''(?x)
https?://(?:[^/]+\.)?spankbang\.com/
(?:
category|tag|s|
[^/]+/channel|
[^/]+/pornstar|
(?P<profile>profile)
)/(?P<id>[^/?#]+)(?(profile)/(?P<related>videos|likes|playlists)?|)'''
_TESTS = [{
'url': 'https://spankbang.com/category/striptease/?p=d',
'info_dict': {
'id': 'striptease',
'title': 'Category: Striptease Porn (Jun 2022) - Joi & Strip (today)',
'description': 'md5:d4580b5fc1e8fae9f627178964989959',
},
'playlist_mincount': 1,
}, {
'url': 'https://spankbang.com/tag/monty/',
'info_dict': {
'id': 'monty',
'title': 'Tag: Monty Porn - Jenni Lee & British',
'description': 'md5:93535d53455f10b934034bfb9562de35',
},
'playlist_mincount': 40,
}, {
'url': 'https://spankbang.com/s/silly/?q=fhd&d=10',
'info_dict': {
'id': 'silly',
'title': 'Search: Silly HD Porn - Chubby Teens & Cum Swallow (FHD, 10+ mins)',
'description': 'md5:a241b24847f1efd7cc3cfc4ef2b22429',
},
'playlist_mincount': 20,
}, {
'url': 'https://spankbang.com/er/channel/purexxxfilms/?o=new',
'info_dict': {
'id': 'purexxxfilms',
'title': 'PureXXXFilms Channel (new)',
'description': 'md5:bb61fbf523511f3bd15aea0360bdbdc0',
},
'playlist_mincount': 65,
}, {
'url': 'https://spankbang.com/5pj/pornstar/promise/',
'info_dict': {
'id': 'promise',
'title': 'Promise Pornstar Page',
'description': 'md5:ff08a6ac2d6cd1225f0fae74ac896d63',
},
'playlist_mincount': 90,
}, {
'url': 'https://spankbang.com/profile/migouche/likes',
'info_dict': {
'id': 'migouche',
'title': 'Likes of migouche Profile',
'description': 'md5:b97540209879a62cadf2af6b0934bbc9',
},
'playlist_mincount': 2,
},
]
def _get_title(self, list_id, webpage, url):
title = (
self._og_search_title(webpage, default=None)
or self._html_search_regex(
r'<title\b[^>]*>([^<]+)</title', webpage, 'channel title',
fatal=False))
if title:
title = re.sub(r'\s*(?:&\s+(?:Porn\s+))?(?:Videos\s+)?[@-]\s*SpankBang\s*$', '', title)
title = title or re.sub(r'(\w)\+(\w)', r'\1 \2', list_id).title()
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
quals = []
for q in ('o', 'q', 'd', 'p'):
v = qs.get(q, [None])[-1]
if not v:
continue
if q == 'q':
v = v.upper()
elif q == 'd':
v += '+ mins'
elif q == 'p':
v = {'d': 'today', 'w': 'this week', 'm': 'this month', 'y': 'this year'}.get(v, v)
quals.append(v)
quals = ', '.join(quals)
if quals:
title += ' (%s)' % (quals, )
m = re.search(r'/(category|tag|s)/', url)
if m:
pfx = m.group(1)
if pfx == 's':
pfx = 'search'
title = '%s: %s' % (pfx.title(), title)
return title

View File

@ -14,6 +14,7 @@ from .utils import (
remove_quotes,
unified_timestamp,
variadic,
write_string,
)
from .compat import (
compat_basestring,
@ -53,15 +54,16 @@ def wraps_op(op):
# NB In principle NaN cannot be checked by membership.
# Here all NaN values are actually this one, so _NaN is _NaN,
# although _NaN != _NaN.
# although _NaN != _NaN. Ditto Infinity.
_NaN = float('nan')
_Infinity = float('inf')
def _js_bit_op(op):
def zeroise(x):
return 0 if x in (None, JS_Undefined, _NaN) else x
return 0 if x in (None, JS_Undefined, _NaN, _Infinity) else x
@wraps_op(op)
def wrapped(a, b):
@ -84,7 +86,7 @@ def _js_arith_op(op):
def _js_div(a, b):
if JS_Undefined in (a, b) or not (a or b):
return _NaN
return operator.truediv(a or 0, b) if b else float('inf')
return operator.truediv(a or 0, b) if b else _Infinity
def _js_mod(a, b):
@ -220,6 +222,42 @@ class LocalNameSpace(ChainMap):
return 'LocalNameSpace%s' % (self.maps, )
class Debugger(object):
ENABLED = False
@staticmethod
def write(*args, **kwargs):
level = kwargs.get('level', 100)
def truncate_string(s, left, right=0):
if s is None or len(s) <= left + right:
return s
return '...'.join((s[:left - 3], s[-right:] if right else ''))
write_string('[debug] JS: {0}{1}\n'.format(
' ' * (100 - level),
' '.join(truncate_string(compat_str(x), 50, 50) for x in args)))
@classmethod
def wrap_interpreter(cls, f):
def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs):
if cls.ENABLED and stmt.strip():
cls.write(stmt, level=allow_recursion)
try:
ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs)
except Exception as e:
if cls.ENABLED:
if isinstance(e, ExtractorError):
e = e.orig_msg
cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion)
raise
if cls.ENABLED and stmt.strip():
if should_ret or not repr(ret) == stmt:
cls.write(['->', '=>'][should_ret], repr(ret), '<-|', stmt, level=allow_recursion)
return ret, should_ret
return interpret_statement
class JSInterpreter(object):
__named_object_counter = 0
@ -307,8 +345,7 @@ class JSInterpreter(object):
def __op_chars(cls):
op_chars = set(';,[')
for op in cls._all_operators():
for c in op[0]:
op_chars.add(c)
op_chars.update(op[0])
return op_chars
def _named_object(self, namespace, obj):
@ -326,9 +363,8 @@ class JSInterpreter(object):
# collections.Counter() is ~10% slower in both 2.7 and 3.9
counters = dict((k, 0) for k in _MATCHING_PARENS.values())
start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1
in_quote, escaping, skipping = None, False, 0
after_op, in_regex_char_group = True, False
in_quote, escaping, after_op, in_regex_char_group = None, False, True, False
skipping = 0
for idx, char in enumerate(expr):
paren_delta = 0
if not in_quote:
@ -382,10 +418,12 @@ class JSInterpreter(object):
return separated[0][1:].strip(), separated[1].strip()
@staticmethod
def _all_operators():
return itertools.chain(
# Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS)
def _all_operators(_cached=[]):
if not _cached:
_cached.extend(itertools.chain(
# Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS))
return _cached
def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion):
if op in ('||', '&&'):
@ -416,7 +454,7 @@ class JSInterpreter(object):
except Exception as e:
if allow_undefined:
return JS_Undefined
raise self.Exception('Cannot get index {idx:.100}'.format(**locals()), expr=repr(obj), cause=e)
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
try:
@ -438,6 +476,7 @@ class JSInterpreter(object):
_FINALLY_RE = re.compile(r'finally\s*\{')
_SWITCH_RE = re.compile(r'switch\s*\(')
@Debugger.wrap_interpreter
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise self.Exception('Recursion limit reached')
@ -511,7 +550,6 @@ class JSInterpreter(object):
expr = self._dump(inner, local_vars) + outer
if expr.startswith('('):
m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr)
if m:
# short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig`
@ -693,7 +731,7 @@ class JSInterpreter(object):
(?P<op>{_OPERATOR_RE})?
=(?!=)(?P<expr>.*)$
)|(?P<return>
(?!if|return|true|false|null|undefined)(?P<name>{_NAME_RE})$
(?!if|return|true|false|null|undefined|NaN|Infinity)(?P<name>{_NAME_RE})$
)|(?P<indexing>
(?P<in>{_NAME_RE})\[(?P<idx>.+)\]$
)|(?P<attribute>
@ -727,11 +765,12 @@ class JSInterpreter(object):
raise JS_Break()
elif expr == 'continue':
raise JS_Continue()
elif expr == 'undefined':
return JS_Undefined, should_return
elif expr == 'NaN':
return _NaN, should_return
elif expr == 'Infinity':
return _Infinity, should_return
elif md.get('return'):
return local_vars[m.group('name')], should_return
@ -760,18 +799,28 @@ class JSInterpreter(object):
right_expr = separated.pop()
# handle operators that are both unary and binary, minimal BODMAS
if op in ('+', '-'):
# simplify/adjust consecutive instances of these operators
undone = 0
while len(separated) > 1 and not separated[-1].strip():
undone += 1
separated.pop()
if op == '-' and undone % 2 != 0:
right_expr = op + right_expr
elif op == '+':
while len(separated) > 1 and separated[-1].strip() in self.OP_CHARS:
right_expr = separated.pop() + right_expr
# hanging op at end of left => unary + (strip) or - (push right)
left_val = separated[-1]
for dm_op in ('*', '%', '/', '**'):
bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
if len(bodmas) > 1 and not bodmas[-1].strip():
expr = op.join(separated) + op + right_expr
right_expr = None
if len(separated) > 1:
separated.pop()
right_expr = op.join((left_val, right_expr))
else:
separated = [op.join((left_val, right_expr))]
right_expr = None
break
if right_expr is None:
continue
@ -797,6 +846,8 @@ class JSInterpreter(object):
def eval_method():
if (variable, member) == ('console', 'debug'):
if Debugger.ENABLED:
Debugger.write(self.interpret_expression('[{}]'.format(arg_str), local_vars, allow_recursion))
return
types = {
'String': compat_str,

View File

@ -2406,7 +2406,7 @@ class ExtractorError(YoutubeDLError):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
self.orig_msg = msg
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None: