This commit is contained in:
Josef Bohórquez 2024-06-20 16:48:19 -04:00 committed by GitHub
commit dcde3c9f4d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 64 additions and 139 deletions

View File

@ -2371,60 +2371,38 @@ class YoutubeDL(object):
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
note_parts = []
if fdict.get('ext') in ('f4f', 'f4m'):
note_parts.append('(unsupported)')
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
note_parts.append(f'[{fdict["language"]}]')
if fdict.get('format_note'):
note_parts.append(fdict['format_note'])
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
note_parts.append('%4dk' % fdict['tbr'])
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
note_parts.append('%s container' % fdict['container'])
if fdict.get('vcodec') not in (None, 'none'):
note_parts.append(fdict['vcodec'] + ('@' if fdict.get('vbr') else ''))
elif fdict.get('vbr') is not None:
note_parts.append('video@')
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
note_parts.append('%4dk' % fdict['vbr'])
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
note_parts.append('%sfps' % fdict['fps'])
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
note_parts.append('video only' if fdict['acodec'] == 'none' else '%-5s' % fdict['acodec'])
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
note_parts.append('audio')
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
note_parts.append('@%3dk' % fdict['abr'])
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
note_parts.append('(%5dHz)' % fdict['asr'])
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
note_parts.append(format_bytes(fdict['filesize']))
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
note_parts.append('~' + format_bytes(fdict['filesize_approx']))
return ' '.join(note_parts)
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])

View File

@ -87,6 +87,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
@ -129,21 +130,20 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username, None, [], None, 'US', None, None, 2, False, True,
[None, None, [2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4], 1, [None, None, []], None, None, None, True],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
# --- Cambio 1: Extracción de función para mejorar la legibilidad ---
def perform_lookup(req):
return self._download_json(
self._LOOKUP_URL, req,
'Looking up account info', 'Unable to look up account info')
lookup_results = perform_lookup(lookup_req)
if lookup_results is False:
return False
@ -154,12 +154,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
user_hash, None, 1, None, [1, None, None, None, [password, None, True]],
[None, None, [2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4], 1, [None, None, []], None, None, None, True]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,

View File

@ -6002,114 +6002,63 @@ def parse_m3u8_attributes(attrib):
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
def unpack_integer(data):
return compat_struct_unpack(f'>{int_map[len(data)]}', data)[0]
int_map = {1: 'B', 2: 'H', 4: 'I'}
header = png_data[8:]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type, chunk_data, header = header[4:8], header[8:8 + length], header[8 + length + 4:]
chunks.append({'type': chunk_type, 'data': chunk_data})
chunk_type = header[:4]
header = header[4:]
if not (ihdr := next((c["data"] for c in chunks if c["type"] == b'IHDR'), None)):
raise IOError("Unable to read PNG header.")
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
width, height = unpack_integer(ihdr[:4]), unpack_integer(ihdr[4:8])
idat = b''.join(c['data'] for c in chunks if c['type'] == b'IDAT')
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
pixels = [[] for _ in range(height)]
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
def _get_pixel(x, y):
return pixels[y][x] if x >= 0 and y >= 0 else 0
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
filter_type = decompressed_data[y * (1 + stride)]
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
color = decompressed_data[1 + y * (1 + stride) + x]
left, up = _get_pixel(x - 3, y), _get_pixel(x, y - 1)
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
elif filter_type == 4: # Paeth
a, b, c = left, up, _get_pixel(x - 3, y - 1)
p = a + b - c
pa, pb, pc = abs(p - a), abs(p - b), abs(p - c)
color = (color + (a if pa <= pb and pa <= pc else b if pb <= pc else c)) & 0xff
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
pixels[y].append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try: