youtube-dl/youtube_dl/extractor/viki.py

256 lines
9.0 KiB
Python
Raw Normal View History

2014-03-20 18:53:18 -05:00
from __future__ import unicode_literals
2013-11-24 00:30:05 -06:00
import re
2015-05-20 14:44:05 -05:00
import time
import hmac
import hashlib
2013-11-24 00:30:05 -06:00
from ..utils import (
ExtractorError,
2015-05-20 14:44:05 -05:00
int_or_none,
parse_age_limit,
parse_iso8601,
2013-11-24 00:30:05 -06:00
)
2015-02-18 13:37:16 -06:00
from .common import InfoExtractor
2013-11-24 00:30:05 -06:00
2015-05-20 14:44:05 -05:00
class VikiBaseIE(InfoExtractor):
_API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com'
_API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s'
_APP = '65535a'
_APP_VERSION = '2.2.5.1428709186'
_APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)'
def _prepare_call(self, path, timestamp=None):
path += '?' if '?' not in path else '&'
if not timestamp:
timestamp = int(time.time())
query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp)
sig = hmac.new(
self._APP_SECRET.encode('ascii'),
query.encode('ascii'),
hashlib.sha1
).hexdigest()
return self._API_URL_TEMPLATE % (query, sig)
def _call_api(self, path, video_id, note, timestamp=None):
resp = self._download_json(
self._prepare_call(path, timestamp), video_id, note)
error = resp.get('error')
if error:
if error == 'invalid timestamp':
resp = self._download_json(
self._prepare_call(path, int(resp['current_timestamp'])),
video_id, '%s (retry)' % note)
error = resp.get('error')
if error:
self._raise_error(resp['error'])
return resp
def _raise_error(self, error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error),
expected=True)
class VikiIE(VikiBaseIE):
2014-03-20 18:53:18 -05:00
IE_NAME = 'viki'
2015-05-20 14:44:05 -05:00
_VALID_URL = r'https?://(?:www\.)?viki\.com/(?:videos|player)/(?P<id>[0-9]+v)'
_TESTS = [{
2014-03-20 18:53:18 -05:00
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
2014-03-20 18:53:18 -05:00
'skip': 'Blocked in the US',
}, {
2015-05-20 14:44:05 -05:00
# clip
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
2015-05-20 14:44:05 -05:00
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
'info_dict': {
'id': '1067139v',
'ext': 'mp4',
2015-05-20 14:44:05 -05:00
'title': "'The Avengers: Age of Ultron' Press Conference",
'description': 'md5:d70b2f9428f5488321bfe1db10d612ea',
2015-05-20 14:44:05 -05:00
'duration': 352,
'timestamp': 1430380829,
'upload_date': '20150430',
2015-05-20 14:44:05 -05:00
'uploader': 'Arirang TV',
'like_count': int,
'age_limit': 0,
}
2015-05-01 12:19:06 -05:00
}, {
'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi',
'info_dict': {
'id': '1048879v',
'ext': 'mp4',
'title': 'Ankhon Dekhi',
2015-05-20 14:44:05 -05:00
'duration': 6512,
'timestamp': 1408532356,
'upload_date': '20140820',
'uploader': 'Spuul',
'like_count': int,
'age_limit': 13,
2015-05-01 12:19:06 -05:00
},
'params': {
2015-05-20 14:44:05 -05:00
# m3u8 download
2015-05-01 12:19:06 -05:00
'skip_download': True,
}
2015-05-20 14:44:05 -05:00
}, {
# episode
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
'md5': '190f3ef426005ba3a080a63325955bc3',
'info_dict': {
'id': '44699v',
'ext': 'mp4',
'title': 'Boys Over Flowers - Episode 1',
'description': 'md5:52617e4f729c7d03bfd4bcbbb6e946f2',
'duration': 4155,
'timestamp': 1270496524,
'upload_date': '20100405',
'uploader': 'group8',
'like_count': int,
'age_limit': 13,
}
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
}]
2013-11-24 00:30:05 -06:00
def _real_extract(self, url):
2015-01-07 00:21:24 -06:00
video_id = self._match_id(url)
2013-11-24 00:30:05 -06:00
2015-05-20 14:44:05 -05:00
streams = self._call_api(
'videos/%s/streams.json' % video_id, video_id,
'Downloading video streams JSON')
formats = []
for format_id, stream_dict in streams.items():
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
for protocol, format_dict in stream_dict.items():
if format_id == 'm3u8':
formats = self._extract_m3u8_formats(
format_dict['url'], video_id, 'mp4', m3u8_id='m3u8-%s' % protocol)
else:
formats.append({
'url': format_dict['url'],
'format_id': '%s-%s' % (format_id, protocol),
'height': height,
})
self._sort_formats(formats)
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
title = None
titles = video.get('titles')
if titles:
title = titles.get('en') or titles[titles.keys()[0]]
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
container_titles = video.get('container', {}).get('titles')
if container_titles:
container_title = container_titles.get('en') or container_titles[titles.keys()[0]]
title = '%s - %s' % (container_title, title)
descriptions = video.get('descriptions')
description = descriptions.get('en') or descriptions[titles.keys()[0]] if descriptions else None
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
uploader = video.get('author')
like_count = int_or_none(video.get('likes', {}).get('count'))
age_limit = parse_age_limit(video.get('rating'))
thumbnails = []
for thumbnail_id, thumbnail in video.get('images', {}).items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail.get('url'),
})
subtitles = {}
for subtitle_lang, _ in video.get('subtitle_completions', {}).items():
subtitles[subtitle_lang] = [{
'ext': subtitles_format,
'url': self._prepare_call(
'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)),
} for subtitles_format in ('srt', 'vtt')]
2013-11-24 00:30:05 -06:00
return {
'id': video_id,
'title': title,
'description': description,
2015-05-20 14:44:05 -05:00
'duration': duration,
'timestamp': timestamp,
2013-11-24 00:30:05 -06:00
'uploader': uploader,
2015-05-20 14:44:05 -05:00
'like_count': like_count,
'age_limit': age_limit,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
2013-11-24 00:30:05 -06:00
}
2015-05-16 02:43:13 -05:00
class VikiChannelIE(InfoExtractor):
IE_NAME = 'viki:channel'
2015-05-20 10:28:04 -05:00
_VALID_URL = r'https?://(?:www\.)?viki\.com/tv/(?P<id>[0-9]+c)'
2015-05-16 02:43:13 -05:00
_TESTS = [{
'url': 'http://www.viki.com/tv/50c-boys-over-flowers',
'info_dict': {
'id': '50c',
'title': 'Boys Over Flowers',
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
},
'playlist_count': 70,
}, {
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
'info_dict': {
'id': '1354c',
'title': 'Poor Nastya [COMPLETE]',
'description': 'md5:05bf5471385aa8b21c18ad450e350525',
},
'playlist_count': 127,
2015-05-16 02:43:13 -05:00
}]
2015-05-20 10:28:04 -05:00
_API_BASE = 'http://api.viki.io/v4/containers'
_APP = '100000a'
_PER_PAGE = 25
2015-05-16 02:43:13 -05:00
def _real_extract(self, url):
2015-05-20 10:28:04 -05:00
channel_id = self._match_id(url)
2015-05-16 02:43:13 -05:00
2015-05-20 10:28:04 -05:00
channel = self._download_json(
'%s/%s.json?app=%s' % (self._API_BASE, channel_id, self._APP),
channel_id, 'Downloading channel JSON')
titles = channel['titles']
title = titles.get('en') or titles[titles.keys()[0]]
descriptions = channel['descriptions']
description = descriptions.get('en') or descriptions[descriptions.keys()[0]]
2015-05-16 02:43:13 -05:00
entries = []
2015-05-20 10:28:04 -05:00
for video_type in ('episodes', 'clips'):
page_url = '%s/%s/%s.json?app=%s&per_page=%d&sort=number&direction=asc&with_paging=true&page=1' % (self._API_BASE, channel_id, video_type, self._APP, self._PER_PAGE)
while page_url:
page = self._download_json(
page_url, channel_id,
'Downloading %s JSON page #%s'
% (video_type, re.search(r'[?&]page=([0-9]+)', page_url).group(1)))
for video in page['response']:
video_id = video['id']
entries.append(self.url_result(
'http://www.viki.com/videos/%s' % video_id, 'Viki', video_id))
2015-05-20 10:28:04 -05:00
page_url = page['pagination']['next']
2015-05-16 02:43:13 -05:00
2015-05-20 10:28:04 -05:00
return self.playlist_result(entries, channel_id, title, description)