youtube-dl/youtube_dl/extractor/twitter.py

143 lines
5.0 KiB
Python
Raw Normal View History

2015-10-18 04:07:48 -05:00
# coding: utf-8
2015-06-27 14:22:25 -05:00
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
unescapeHTML,
2015-10-18 04:11:55 -05:00
xpath_text,
2015-06-27 14:22:25 -05:00
)
class TwitterCardIE(InfoExtractor):
2015-10-18 04:13:58 -05:00
IE_NAME = 'twitter:card'
2015-06-27 14:22:25 -05:00
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
2015-07-21 16:45:36 -05:00
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
2015-10-18 04:07:48 -05:00
'md5': '7d2f6b4d2eb841a7ccc893d479bfceb4',
2015-07-21 16:45:36 -05:00
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 30.033,
}
2015-06-27 14:22:25 -05:00
},
2015-07-21 16:45:36 -05:00
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 80.155,
},
}
]
2015-06-27 14:22:25 -05:00
def _real_extract(self, url):
video_id = self._match_id(url)
# Different formats served for different User-Agents
USER_AGENTS = [
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', # mp4
'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0', # webm
]
config = None
formats = []
for user_agent in USER_AGENTS:
request = compat_urllib_request.Request(url)
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(request, video_id)
config = self._parse_json(
unescapeHTML(self._search_regex(
r'data-player-config="([^"]+)"', webpage, 'data player config')),
video_id)
2015-07-21 16:45:36 -05:00
if 'playlist' not in config:
if 'vmapUrl' in config:
2015-10-18 04:11:55 -05:00
vmap_data = self._download_xml(config['vmapUrl'], video_id)
video_url = xpath_text(vmap_data, './/MediaFile').strip()
2015-10-18 04:15:47 -05:00
formats.append({
2015-07-21 16:45:36 -05:00
'url': video_url,
2015-10-18 04:15:47 -05:00
})
2015-07-21 16:45:36 -05:00
break # same video regardless of UA
continue
2015-06-27 14:22:25 -05:00
video_url = config['playlist'][0]['source']
f = {
'url': video_url,
}
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(f)
self._sort_formats(formats)
thumbnail = config.get('posterImageUrl')
duration = float_or_none(config.get('duration'))
return {
'id': video_id,
'title': 'TwitterCard',
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class TwitterIE(InfoExtractor):
2015-10-18 04:13:58 -05:00
IE_NAME = 'twitter'
_VALID_URL = r'https?://(?:www|m|mobile)?\.?twitter\.com/(?P<id>[^/]+/status/\d+)'
_TEST = {
2015-10-18 04:07:48 -05:00
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'md5': '31cd83a116fc41f99ae3d909d4caf6a0',
'info_dict': {
2015-10-18 04:07:48 -05:00
'id': '643211948184596480',
'ext': 'mp4',
2015-10-18 04:07:48 -05:00
'title': 'freethenipple - FTN supporters on Hollywood Blvd today!',
'thumbnail': 're:^https?://.*\.jpg',
2015-10-18 04:07:48 -05:00
'duration': 12.922,
'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
},
}
def _real_extract(self, url):
id = self._match_id(url)
username, twid = re.match(r'([^/]+)/status/(\d+)', id).groups()
name = username
url = re.sub(r'https?://(m|mobile)\.', 'https://', url)
webpage = self._download_webpage(url, 'tweet: ' + url)
2015-10-18 04:18:01 -05:00
description = self._html_search_regex('<title>\s*(.+?)\s*</title>', webpage, 'title')
title = description.replace('\n', ' ')
splitdesc = re.match(r'^(.+?)\s*on Twitter:\s* "(.+?)"$', title)
if splitdesc:
name, title = splitdesc.groups()
title = re.sub(r'\s*https?://[^ ]+', '', title) # strip 'https -_t.co_BJYgOjSeGA' junk from filenames
card_id = self._search_regex(r'["\']/i/cards/tfw/v1/(\d+)', webpage, '/i/card/...')
card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id
return {
'_type': 'url_transparent',
'ie_key': 'TwitterCard',
'uploader_id': username,
'uploader': name,
'url': card_url,
'webpage_url': url,
'description': description,
'title': username + ' - ' + title,
}