diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py index 583477b98aed..d09502e5aad4 100644 --- a/yt_dlp/extractor/_extractors.py +++ b/yt_dlp/extractor/_extractors.py @@ -2499,6 +2499,7 @@ Zee5SeriesIE, ) from .zeenews import ZeeNewsIE +from .zenporn import ZenPornIE from .zetland import ZetlandDKArticleIE from .zhihu import ZhihuIE from .zingmp3 import ( diff --git a/yt_dlp/extractor/chzzk.py b/yt_dlp/extractor/chzzk.py index 6894baea5cb2..420fe0514bc1 100644 --- a/yt_dlp/extractor/chzzk.py +++ b/yt_dlp/extractor/chzzk.py @@ -2,7 +2,7 @@ from .common import InfoExtractor from ..utils import ( - ExtractorError, + UserNotLive, float_or_none, int_or_none, parse_iso8601, @@ -40,7 +40,7 @@ def _real_extract(self, url): note='Downloading channel info', errnote='Unable to download channel info')['content'] if live_detail.get('status') == 'CLOSE': - raise ExtractorError('The channel is not currently live', expected=True) + raise UserNotLive(video_id=channel_id) live_playback = self._parse_json(live_detail['livePlaybackJson'], channel_id) diff --git a/yt_dlp/extractor/niconico.py b/yt_dlp/extractor/niconico.py index b889c752ccfe..05a1a3ddb8ca 100644 --- a/yt_dlp/extractor/niconico.py +++ b/yt_dlp/extractor/niconico.py @@ -13,13 +13,11 @@ from ..utils import ( ExtractorError, OnDemandPagedList, - bug_reports_message, clean_html, float_or_none, int_or_none, join_nonempty, parse_duration, - parse_filesize, parse_iso8601, parse_resolution, qualities, @@ -55,25 +53,31 @@ class NiconicoIE(InfoExtractor): 'duration': 33, 'view_count': int, 'comment_count': int, + 'genres': ['未設定'], + 'tags': [], + 'expected_protocol': str, }, - 'skip': 'Requires an account', }, { # File downloaded with and without credentials are different, so omit # the md5 field 'url': 'http://www.nicovideo.jp/watch/nm14296458', 'info_dict': { 'id': 'nm14296458', - 'ext': 'swf', - 'title': '【鏡音リン】Dance on media【オリジナル】take2!', - 'description': 'md5:689f066d74610b3b22e0f1739add0f58', + 'ext': 'mp4', + 'title': '【Kagamine Rin】Dance on media【Original】take2!', + 'description': 'md5:9368f2b1f4178de64f2602c2f3d6cbf5', 'thumbnail': r're:https?://.*', 'uploader': 'りょうた', 'uploader_id': '18822557', 'upload_date': '20110429', 'timestamp': 1304065916, - 'duration': 209, + 'duration': 208.0, + 'comment_count': int, + 'view_count': int, + 'genres': ['音楽・サウンド'], + 'tags': ['Translation_Request', 'Kagamine_Rin', 'Rin_Original'], + 'expected_protocol': str, }, - 'skip': 'Requires an account', }, { # 'video exists but is marked as "deleted" # md5 is unstable @@ -107,22 +111,24 @@ class NiconicoIE(InfoExtractor): }, { # video not available via `getflv`; "old" HTML5 video 'url': 'http://www.nicovideo.jp/watch/sm1151009', - 'md5': '8fa81c364eb619d4085354eab075598a', + 'md5': 'f95a3d259172667b293530cc2e41ebda', 'info_dict': { 'id': 'sm1151009', 'ext': 'mp4', 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)', - 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7', + 'description': 'md5:f95a3d259172667b293530cc2e41ebda', 'thumbnail': r're:https?://.*', 'duration': 184, - 'timestamp': 1190868283, - 'upload_date': '20070927', + 'timestamp': 1190835883, + 'upload_date': '20070926', 'uploader': 'denden2', 'uploader_id': '1392194', 'view_count': int, 'comment_count': int, + 'genres': ['ゲーム'], + 'tags': [], + 'expected_protocol': str, }, - 'skip': 'Requires an account', }, { # "New" HTML5 video # md5 is unstable @@ -132,16 +138,18 @@ class NiconicoIE(InfoExtractor): 'ext': 'mp4', 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質', 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb', - 'timestamp': 1498514060, + 'timestamp': 1498481660, 'upload_date': '20170626', - 'uploader': 'ゲスト', + 'uploader': 'no-namamae', 'uploader_id': '40826363', 'thumbnail': r're:https?://.*', 'duration': 198, 'view_count': int, 'comment_count': int, + 'genres': ['アニメ'], + 'tags': [], + 'expected_protocol': str, }, - 'skip': 'Requires an account', }, { # Video without owner 'url': 'http://www.nicovideo.jp/watch/sm18238488', @@ -151,7 +159,7 @@ class NiconicoIE(InfoExtractor): 'ext': 'mp4', 'title': '【実写版】ミュータントタートルズ', 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e', - 'timestamp': 1341160408, + 'timestamp': 1341128008, 'upload_date': '20120701', 'uploader': None, 'uploader_id': None, @@ -159,8 +167,10 @@ class NiconicoIE(InfoExtractor): 'duration': 5271, 'view_count': int, 'comment_count': int, + 'genres': ['エンターテイメント'], + 'tags': [], + 'expected_protocol': str, }, - 'skip': 'Requires an account', }, { 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg', 'only_matching': True, @@ -353,15 +363,10 @@ def _extract_format_for_quality(self, video_id, audio_quality, video_quality, dm if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'): return None - def extract_video_quality(video_quality): - return parse_filesize('%sB' % self._search_regex( - r'\| ([0-9]*\.?[0-9]*[MK])', video_quality, 'vbr', default='')) - format_id = '-'.join( [remove_start(s['id'], 'archive_') for s in (video_quality, audio_quality)] + [dmc_protocol]) vid_qual_label = traverse_obj(video_quality, ('metadata', 'label')) - vid_quality = traverse_obj(video_quality, ('metadata', 'bitrate')) return { 'url': 'niconico_dmc:%s/%s/%s' % (video_id, video_quality['id'], audio_quality['id']), @@ -370,10 +375,15 @@ def extract_video_quality(video_quality): 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4 'acodec': 'aac', 'vcodec': 'h264', - 'abr': float_or_none(traverse_obj(audio_quality, ('metadata', 'bitrate')), 1000), - 'vbr': float_or_none(vid_quality if vid_quality > 0 else extract_video_quality(vid_qual_label), 1000), - 'height': traverse_obj(video_quality, ('metadata', 'resolution', 'height')), - 'width': traverse_obj(video_quality, ('metadata', 'resolution', 'width')), + **traverse_obj(audio_quality, ('metadata', { + 'abr': ('bitrate', {functools.partial(float_or_none, scale=1000)}), + 'asr': ('samplingRate', {int_or_none}), + })), + **traverse_obj(video_quality, ('metadata', { + 'vbr': ('bitrate', {functools.partial(float_or_none, scale=1000)}), + 'height': ('resolution', 'height', {int_or_none}), + 'width': ('resolution', 'width', {int_or_none}), + })), 'quality': -2 if 'low' in video_quality['id'] else None, 'protocol': 'niconico_dmc', 'expected_protocol': dmc_protocol, # XXX: This is not a documented field @@ -383,6 +393,63 @@ def extract_video_quality(video_quality): } } + def _yield_dmc_formats(self, api_data, video_id): + dmc_data = traverse_obj(api_data, ('media', 'delivery', 'movie')) + audios = traverse_obj(dmc_data, ('audios', ..., {dict})) + videos = traverse_obj(dmc_data, ('videos', ..., {dict})) + protocols = traverse_obj(dmc_data, ('session', 'protocols', ..., {str})) + if not all((audios, videos, protocols)): + return + + for audio_quality, video_quality, protocol in itertools.product(audios, videos, protocols): + if fmt := self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol): + yield fmt + + def _yield_dms_formats(self, api_data, video_id): + fmt_filter = lambda _, v: v['isAvailable'] and v['id'] + videos = traverse_obj(api_data, ('media', 'domand', 'videos', fmt_filter)) + audios = traverse_obj(api_data, ('media', 'domand', 'audios', fmt_filter)) + access_key = traverse_obj(api_data, ('media', 'domand', 'accessRightKey', {str})) + track_id = traverse_obj(api_data, ('client', 'watchTrackId', {str})) + if not all((videos, audios, access_key, track_id)): + return + + dms_m3u8_url = self._download_json( + f'https://nvapi.nicovideo.jp/v1/watch/{video_id}/access-rights/hls', video_id, + data=json.dumps({ + 'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios))) + }).encode(), query={'actionTrackId': track_id}, headers={ + 'x-access-right-key': access_key, + 'x-frontend-id': 6, + 'x-frontend-version': 0, + 'x-request-with': 'https://www.nicovideo.jp', + })['data']['contentUrl'] + # Getting all audio formats results in duplicate video formats which we filter out later + dms_fmts = self._extract_m3u8_formats(dms_m3u8_url, video_id) + + # m3u8 extraction does not provide audio bitrates, so extract from the API data and fix + for audio_fmt in traverse_obj(dms_fmts, lambda _, v: v['vcodec'] == 'none'): + yield { + **audio_fmt, + **traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), { + 'format_id': ('id', {str}), + 'abr': ('bitRate', {functools.partial(float_or_none, scale=1000)}), + 'asr': ('samplingRate', {int_or_none}), + }), get_all=False), + 'acodec': 'aac', + 'ext': 'm4a', + } + + # Sort before removing dupes to keep the format dicts with the lowest tbr + video_fmts = sorted((fmt for fmt in dms_fmts if fmt['vcodec'] != 'none'), key=lambda f: f['tbr']) + self._remove_duplicate_formats(video_fmts) + # Calculate the true vbr/tbr by subtracting the lowest abr + min_abr = min(traverse_obj(audios, (..., 'bitRate', {float_or_none})), default=0) / 1000 + for video_fmt in video_fmts: + video_fmt['tbr'] -= min_abr + video_fmt['format_id'] = f'video-{video_fmt["tbr"]:.0f}' + yield video_fmt + def _real_extract(self, url): video_id = self._match_id(url) @@ -409,19 +476,17 @@ def _real_extract(self, url): webpage, 'error reason', default=None) if not error_msg: raise - raise ExtractorError(re.sub(r'\s+', ' ', error_msg), expected=True) - - formats = [] - - def get_video_info(*items, get_first=True, **kwargs): - return traverse_obj(api_data, ('video', *items), get_all=not get_first, **kwargs) - - quality_info = api_data['media']['delivery']['movie'] - session_api_data = quality_info['session'] - for (audio_quality, video_quality, protocol) in itertools.product(quality_info['audios'], quality_info['videos'], session_api_data['protocols']): - fmt = self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol) - if fmt: - formats.append(fmt) + raise ExtractorError(clean_html(error_msg), expected=True) + + club_joined = traverse_obj(api_data, ('channel', 'viewer', 'follow', 'isFollowed', {bool})) + if club_joined is None: + fail_msg = self._html_search_regex( + r']+\bclass="fail-message"[^>]*>(?P.+?)

', + webpage, 'fail message', default=None, group='msg') + if fail_msg: + self.raise_login_required(clean_html(fail_msg), metadata_available=True) + elif not club_joined: + self.raise_login_required('This video is for members only', metadata_available=True) # Start extracting information tags = None @@ -440,11 +505,15 @@ def get_video_info(*items, get_first=True, **kwargs): thumb_prefs = qualities(['url', 'middleUrl', 'largeUrl', 'player', 'ogp']) + def get_video_info(*items, get_first=True, **kwargs): + return traverse_obj(api_data, ('video', *items), get_all=not get_first, **kwargs) + return { 'id': video_id, '_api_data': api_data, 'title': get_video_info(('originalTitle', 'title')) or self._og_search_title(webpage, default=None), - 'formats': formats, + 'formats': [*self._yield_dmc_formats(api_data, video_id), + *self._yield_dms_formats(api_data, video_id)], 'thumbnails': [{ 'id': key, 'url': url, @@ -472,8 +541,11 @@ def get_video_info(*items, get_first=True, **kwargs): def _get_subtitles(self, video_id, api_data): comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict})) or {} + if not comments_info.get('server'): + return + danmaku = traverse_obj(self._download_json( - f'{comments_info.get("server")}/v1/threads', video_id, data=json.dumps({ + f'{comments_info["server"]}/v1/threads', video_id, data=json.dumps({ 'additionals': {}, 'params': comments_info.get('params'), 'threadKey': comments_info.get('threadKey'), @@ -489,10 +561,6 @@ def _get_subtitles(self, video_id, api_data): note='Downloading comments', errnote='Failed to download comments'), ('data', 'threads', ..., 'comments', ...)) - if not danmaku: - self.report_warning(f'Failed to get comments. {bug_reports_message()}') - return - return { 'comments': [{ 'ext': 'json', diff --git a/yt_dlp/extractor/rai.py b/yt_dlp/extractor/rai.py index f6219c2dbd9a..c1fc65c81fd9 100644 --- a/yt_dlp/extractor/rai.py +++ b/yt_dlp/extractor/rai.py @@ -28,6 +28,29 @@ class RaiBaseIE(InfoExtractor): _GEO_COUNTRIES = ['IT'] _GEO_BYPASS = False + def _fix_m3u8_formats(self, media_url, video_id): + fmts = self._extract_m3u8_formats( + media_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + + # Fix malformed m3u8 manifests by setting audio-only/video-only formats + for f in fmts: + if not f.get('acodec'): + f['acodec'] = 'mp4a' + if not f.get('vcodec'): + f['vcodec'] = 'avc1' + man_url = f['url'] + if re.search(r'chunklist(?:_b\d+)*_ao[_.]', man_url): # audio only + f['vcodec'] = 'none' + elif re.search(r'chunklist(?:_b\d+)*_vo[_.]', man_url): # video only + f['acodec'] = 'none' + else: # video+audio + if f['acodec'] == 'none': + f['acodec'] = 'mp4a' + if f['vcodec'] == 'none': + f['vcodec'] = 'avc1' + + return fmts + def _extract_relinker_info(self, relinker_url, video_id, audio_only=False): def fix_cdata(s): # remove \r\n\t before and after to avoid @@ -69,8 +92,7 @@ def fix_cdata(s): 'format_id': 'https-mp3', }) elif ext == 'm3u8' or 'format=m3u8' in media_url: - formats.extend(self._extract_m3u8_formats( - media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + formats.extend(self._fix_m3u8_formats(media_url, video_id)) elif ext == 'f4m': # very likely no longer needed. Cannot find any url that uses it. manifest_url = update_url_query( @@ -153,10 +175,10 @@ def get_format_info(tbr): 'format_id': f'https-{tbr}', 'width': format_copy.get('width'), 'height': format_copy.get('height'), - 'tbr': format_copy.get('tbr'), - 'vcodec': format_copy.get('vcodec'), - 'acodec': format_copy.get('acodec'), - 'fps': format_copy.get('fps'), + 'tbr': format_copy.get('tbr') or tbr, + 'vcodec': format_copy.get('vcodec') or 'avc1', + 'acodec': format_copy.get('acodec') or 'mp4a', + 'fps': format_copy.get('fps') or 25, } if format_copy else { 'format_id': f'https-{tbr}', 'width': _QUALITY[tbr][0], @@ -245,7 +267,7 @@ class RaiPlayIE(RaiBaseIE): 'series': 'Report', 'season': '2013/14', 'subtitles': {'it': 'count:4'}, - 'release_year': 2022, + 'release_year': 2024, 'episode': 'Espresso nel caffè - 07/04/2014', 'timestamp': 1396919880, 'upload_date': '20140408', @@ -253,7 +275,7 @@ class RaiPlayIE(RaiBaseIE): }, 'params': {'skip_download': True}, }, { - # 1080p direct mp4 url + # 1080p 'url': 'https://www.raiplay.it/video/2021/11/Blanca-S1E1-Senza-occhi-b1255a4a-8e72-4a2f-b9f3-fc1308e00736.html', 'md5': 'aeda7243115380b2dd5e881fd42d949a', 'info_dict': { @@ -274,7 +296,7 @@ class RaiPlayIE(RaiBaseIE): 'episode': 'Senza occhi', 'timestamp': 1637318940, 'upload_date': '20211119', - 'formats': 'count:12', + 'formats': 'count:7', }, 'params': {'skip_download': True}, 'expected_warnings': ['Video not available. Likely due to geo-restriction.'] @@ -527,7 +549,7 @@ class RaiPlaySoundPlaylistIE(InfoExtractor): 'info_dict': { 'id': 'ilruggitodelconiglio', 'title': 'Il Ruggito del Coniglio', - 'description': 'md5:48cff6972435964284614d70474132e6', + 'description': 'md5:62a627b3a2d0635d08fa8b6e0a04f27e', }, 'playlist_mincount': 65, }, { @@ -634,19 +656,20 @@ def _real_extract(self, url): } -class RaiNewsIE(RaiIE): # XXX: Do not subclass from concrete IE +class RaiNewsIE(RaiBaseIE): _VALID_URL = rf'https?://(www\.)?rainews\.it/(?!articoli)[^?#]+-(?P{RaiBaseIE._UUID_RE})(?:-[^/?#]+)?\.html' _EMBED_REGEX = [rf']+data-src="(?P/iframe/[^?#]+?{RaiBaseIE._UUID_RE}\.html)'] _TESTS = [{ # new rainews player (#3911) - 'url': 'https://www.rainews.it/rubriche/24mm/video/2022/05/24mm-del-29052022-12cf645d-1ffd-4220-b27c-07c226dbdecf.html', + 'url': 'https://www.rainews.it/video/2024/02/membri-della-croce-rossa-evacuano-gli-abitanti-di-un-villaggio-nella-regione-ucraina-di-kharkiv-il-filmato-dallucraina--31e8017c-845c-43f5-9c48-245b43c3a079.html', 'info_dict': { - 'id': '12cf645d-1ffd-4220-b27c-07c226dbdecf', + 'id': '31e8017c-845c-43f5-9c48-245b43c3a079', 'ext': 'mp4', - 'title': 'Puntata del 29/05/2022', - 'duration': 1589, - 'upload_date': '20220529', + 'title': 'md5:1e81364b09de4a149042bac3c7d36f0b', + 'duration': 196, + 'upload_date': '20240225', 'uploader': 'rainews', + 'formats': 'count:2', }, 'params': {'skip_download': True}, }, { @@ -659,7 +682,8 @@ class RaiNewsIE(RaiIE): # XXX: Do not subclass from concrete IE 'description': 'I film in uscita questa settimana.', 'thumbnail': r're:^https?://.*\.png$', 'duration': 833, - 'upload_date': '20161103' + 'upload_date': '20161103', + 'formats': 'count:8', }, 'params': {'skip_download': True}, 'expected_warnings': ['unable to extract player_data'], @@ -684,7 +708,7 @@ def _real_extract(self, url): if not relinker_url: # fallback on old implementation for some old content try: - return self._extract_from_content_id(video_id, url) + return RaiIE._real_extract(self, url) except GeoRestrictedError: raise except ExtractorError as e: diff --git a/yt_dlp/extractor/rozhlas.py b/yt_dlp/extractor/rozhlas.py index 63134322dcca..411a625192d9 100644 --- a/yt_dlp/extractor/rozhlas.py +++ b/yt_dlp/extractor/rozhlas.py @@ -247,17 +247,17 @@ class MujRozhlasIE(RozhlasBaseIE): 'url': 'https://www.mujrozhlas.cz/vykopavky/ach-jo-zase-teleci-rizek-je-mnohem-min-cesky-nez-jsme-si-mysleli', 'md5': '6f8fd68663e64936623e67c152a669e0', 'info_dict': { - 'id': '10739193', + 'id': '10787730', 'ext': 'mp3', 'title': 'Ach jo, zase to telecí! Řízek je mnohem míň český, než jsme si mysleli', 'description': 'md5:db7141e9caaedc9041ec7cefb9a62908', 'timestamp': 1684915200, - 'modified_timestamp': 1684922446, + 'modified_timestamp': 1687550432, 'series': 'Vykopávky', 'thumbnail': 'https://portal.rozhlas.cz/sites/default/files/images/84377046610af6ddc54d910b1dd7a22b.jpg', 'channel_id': 'radio-wave', 'upload_date': '20230524', - 'modified_date': '20230524', + 'modified_date': '20230623', }, }, { # serial extraction @@ -277,6 +277,26 @@ class MujRozhlasIE(RozhlasBaseIE): 'title': 'Nespavci', 'description': 'md5:c430adcbf9e2b9eac88b745881e814dc', }, + }, { + # serialPart + 'url': 'https://www.mujrozhlas.cz/povidka/gustavo-adolfo-becquer-hora-duchu', + 'info_dict': { + 'id': '8889035', + 'ext': 'm4a', + 'title': 'Gustavo Adolfo Bécquer: Hora duchů', + 'description': 'md5:343a15257b376c276e210b78e900ffea', + 'chapter': 'Hora duchů a Polibek – dva tajemné příběhy Gustava Adolfa Bécquera', + 'thumbnail': 'https://portal.rozhlas.cz/sites/default/files/images/2adfe1387fb140634be725c1ccf26214.jpg', + 'timestamp': 1708173000, + 'episode': 'Episode 1', + 'episode_number': 1, + 'series': 'Povídka', + 'modified_date': '20240217', + 'upload_date': '20240217', + 'modified_timestamp': 1708173198, + 'channel_id': 'vltava', + }, + 'params': {'skip_download': 'dash'}, }] def _call_api(self, path, item_id, msg='API JSON'): @@ -322,7 +342,7 @@ def _real_extract(self, url): entity = info['siteEntityBundle'] - if entity == 'episode': + if entity in ('episode', 'serialPart'): return self._extract_audio_entry(self._call_api( 'episodes', info['contentId'], 'episode info API JSON')) diff --git a/yt_dlp/extractor/tiktok.py b/yt_dlp/extractor/tiktok.py index f26972cff2d9..39a4219221c4 100644 --- a/yt_dlp/extractor/tiktok.py +++ b/yt_dlp/extractor/tiktok.py @@ -6,7 +6,7 @@ import time from .common import InfoExtractor -from ..compat import compat_urllib_parse_unquote, compat_urllib_parse_urlparse +from ..compat import compat_urllib_parse_urlparse from ..networking import HEADRequest from ..utils import ( ExtractorError, @@ -15,7 +15,6 @@ UserNotLive, determine_ext, format_field, - get_first, int_or_none, join_nonempty, merge_dicts, @@ -219,8 +218,8 @@ def audio_meta(url): def extract_addr(addr, add_meta={}): parsed_meta, res = parse_url_key(addr.get('url_key', '')) if res: - known_resolutions.setdefault(res, {}).setdefault('height', add_meta.get('height') or addr.get('height')) - known_resolutions[res].setdefault('width', add_meta.get('width') or addr.get('width')) + known_resolutions.setdefault(res, {}).setdefault('height', int_or_none(addr.get('height'))) + known_resolutions[res].setdefault('width', int_or_none(addr.get('width'))) parsed_meta.update(known_resolutions.get(res, {})) add_meta.setdefault('height', int_or_none(res[:-1])) return [{ @@ -237,22 +236,26 @@ def extract_addr(addr, add_meta={}): # Hack: Add direct video links first to prioritize them when removing duplicate formats formats = [] + width = int_or_none(video_info.get('width')) + height = int_or_none(video_info.get('height')) if video_info.get('play_addr'): formats.extend(extract_addr(video_info['play_addr'], { 'format_id': 'play_addr', 'format_note': 'Direct video', 'vcodec': 'h265' if traverse_obj( video_info, 'is_bytevc1', 'is_h265') else 'h264', # TODO: Check for "direct iOS" videos, like https://www.tiktok.com/@cookierun_dev/video/7039716639834656002 - 'width': video_info.get('width'), - 'height': video_info.get('height'), + 'width': width, + 'height': height, })) if video_info.get('download_addr'): - formats.extend(extract_addr(video_info['download_addr'], { + download_addr = video_info['download_addr'] + dl_width = int_or_none(download_addr.get('width')) + formats.extend(extract_addr(download_addr, { 'format_id': 'download_addr', 'format_note': 'Download video%s' % (', watermarked' if video_info.get('has_watermark') else ''), 'vcodec': 'h264', - 'width': video_info.get('width'), - 'height': video_info.get('height'), + 'width': dl_width or width, + 'height': try_call(lambda: int(dl_width / 0.5625)) or height, # download_addr['height'] is wrong 'preference': -2 if video_info.get('has_watermark') else -1, })) if video_info.get('play_addr_h264'): @@ -315,9 +318,6 @@ def extract_addr(addr, add_meta={}): return { 'id': aweme_id, - 'extractor_key': TikTokIE.ie_key(), - 'extractor': TikTokIE.IE_NAME, - 'webpage_url': self._create_url(author_info.get('uid'), aweme_id), **traverse_obj(aweme_detail, { 'title': ('desc', {str}), 'description': ('desc', {str}), @@ -921,20 +921,23 @@ class DouyinIE(TikTokBaseIE): _VALID_URL = r'https?://(?:www\.)?douyin\.com/video/(?P[0-9]+)' _TESTS = [{ 'url': 'https://www.douyin.com/video/6961737553342991651', - 'md5': 'a97db7e3e67eb57bf40735c022ffa228', + 'md5': '9ecce7bc5b302601018ecb2871c63a75', 'info_dict': { 'id': '6961737553342991651', 'ext': 'mp4', 'title': '#杨超越 小小水手带你去远航❤️', 'description': '#杨超越 小小水手带你去远航❤️', + 'uploader': '6897520xka', 'uploader_id': '110403406559', 'uploader_url': 'https://www.douyin.com/user/MS4wLjABAAAAEKnfa654JAJ_N5lgZDQluwsxmY0lhfmEYNQBBkwGG98', 'channel_id': 'MS4wLjABAAAAEKnfa654JAJ_N5lgZDQluwsxmY0lhfmEYNQBBkwGG98', 'creator': '杨超越', - 'duration': 19782, + 'creators': ['杨超越'], + 'duration': 19, 'timestamp': 1620905839, 'upload_date': '20210513', 'track': '@杨超越创作的原声', + 'artists': ['杨超越'], 'view_count': int, 'like_count': int, 'repost_count': int, @@ -943,20 +946,23 @@ class DouyinIE(TikTokBaseIE): }, }, { 'url': 'https://www.douyin.com/video/6982497745948921092', - 'md5': '34a87ebff3833357733da3fe17e37c0e', + 'md5': '15c5e660b7048af3707304e3cc02bbb5', 'info_dict': { 'id': '6982497745948921092', 'ext': 'mp4', 'title': '这个夏日和小羊@杨超越 一起遇见白色幻想', 'description': '这个夏日和小羊@杨超越 一起遇见白色幻想', + 'uploader': '0731chaoyue', 'uploader_id': '408654318141572', 'uploader_url': 'https://www.douyin.com/user/MS4wLjABAAAAZJpnglcjW2f_CMVcnqA_6oVBXKWMpH0F8LIHuUu8-lA', 'channel_id': 'MS4wLjABAAAAZJpnglcjW2f_CMVcnqA_6oVBXKWMpH0F8LIHuUu8-lA', 'creator': '杨超越工作室', - 'duration': 42479, + 'creators': ['杨超越工作室'], + 'duration': 42, 'timestamp': 1625739481, 'upload_date': '20210708', 'track': '@杨超越工作室创作的原声', + 'artists': ['杨超越工作室'], 'view_count': int, 'like_count': int, 'repost_count': int, @@ -965,20 +971,23 @@ class DouyinIE(TikTokBaseIE): }, }, { 'url': 'https://www.douyin.com/video/6953975910773099811', - 'md5': 'dde3302460f19db59c47060ff013b902', + 'md5': '0e6443758b8355db9a3c34864a4276be', 'info_dict': { 'id': '6953975910773099811', 'ext': 'mp4', 'title': '#一起看海 出现在你的夏日里', 'description': '#一起看海 出现在你的夏日里', + 'uploader': '6897520xka', 'uploader_id': '110403406559', 'uploader_url': 'https://www.douyin.com/user/MS4wLjABAAAAEKnfa654JAJ_N5lgZDQluwsxmY0lhfmEYNQBBkwGG98', 'channel_id': 'MS4wLjABAAAAEKnfa654JAJ_N5lgZDQluwsxmY0lhfmEYNQBBkwGG98', 'creator': '杨超越', - 'duration': 17343, + 'creators': ['杨超越'], + 'duration': 17, 'timestamp': 1619098692, 'upload_date': '20210422', 'track': '@杨超越创作的原声', + 'artists': ['杨超越'], 'view_count': int, 'like_count': int, 'repost_count': int, @@ -1004,20 +1013,23 @@ class DouyinIE(TikTokBaseIE): 'skip': 'No longer available', }, { 'url': 'https://www.douyin.com/video/6963263655114722595', - 'md5': 'cf9f11f0ec45d131445ec2f06766e122', + 'md5': '1440bcf59d8700f8e014da073a4dfea8', 'info_dict': { 'id': '6963263655114722595', 'ext': 'mp4', 'title': '#哪个爱豆的105度最甜 换个角度看看我哈哈', 'description': '#哪个爱豆的105度最甜 换个角度看看我哈哈', + 'uploader': '6897520xka', 'uploader_id': '110403406559', 'uploader_url': 'https://www.douyin.com/user/MS4wLjABAAAAEKnfa654JAJ_N5lgZDQluwsxmY0lhfmEYNQBBkwGG98', 'channel_id': 'MS4wLjABAAAAEKnfa654JAJ_N5lgZDQluwsxmY0lhfmEYNQBBkwGG98', 'creator': '杨超越', - 'duration': 15115, + 'creators': ['杨超越'], + 'duration': 15, 'timestamp': 1621261163, 'upload_date': '20210517', 'track': '@杨超越创作的原声', + 'artists': ['杨超越'], 'view_count': int, 'like_count': int, 'repost_count': int, @@ -1025,34 +1037,23 @@ class DouyinIE(TikTokBaseIE): 'thumbnail': r're:https?://.+\.jpe?g', }, }] - _APP_VERSIONS = [('23.3.0', '230300')] - _APP_NAME = 'aweme' - _AID = 1128 - _API_HOSTNAME = 'aweme.snssdk.com' _UPLOADER_URL_FORMAT = 'https://www.douyin.com/user/%s' _WEBPAGE_HOST = 'https://www.douyin.com/' def _real_extract(self, url): video_id = self._match_id(url) - try: - return self._extract_aweme_app(video_id) - except ExtractorError as e: - e.expected = True - self.to_screen(f'{e}; trying with webpage') - - webpage = self._download_webpage(url, video_id) - render_data = self._search_json( - r'