Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8ad9d7eaf9 | |||
| 066149d81b | |||
| 2854cd4bc7 | |||
| a94215ec3a | |||
| 1450f5a801 | |||
| 7709ebbf87 | |||
| 5724373550 | |||
| 0a82a3571f | |||
| 56ed4b89dd | |||
| db1211c9bf | |||
| 6eef894fbb | |||
| 858e14683e | |||
| db76afe5b5 | |||
| 1f28a304eb | |||
| b6d0c20db1 | |||
| 758eb0ba49 | |||
| 5a33a2a3d9 | |||
| ff203974e1 | |||
| 56759f06ec | |||
| 0d1286dd41 | |||
| d6798f6eed | |||
| 1847d6a659 | |||
| 13bf29cd07 | |||
| 8034bc1556 |
514
HIDI/__init__.py
514
HIDI/__init__.py
@ -1,514 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
import base64
|
||||
import hashlib
|
||||
import click
|
||||
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Iterable
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Series, Movie, Movies, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle, Audio, Video
|
||||
from unshackle.core.utilities import import_module_by_path
|
||||
|
||||
|
||||
class HIDI(Service):
|
||||
"""
|
||||
Service code for HiDive (hidive.com)
|
||||
Version: 1.3.2
|
||||
Authorization: Email + password login, with automatic token refresh.
|
||||
Security: FHD@L3
|
||||
|
||||
IMPORTANT: UPDATE YOUR UNSHACKLE TO 2.3.0 TO GET THE NECESSARY FIX FOR THIS SERVICE
|
||||
Also when downloading a series, use the link from the first season of the series
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^https?://(?:www\.)?hidive\.com/(?:season/(?P<season_id>\d+)|playlist/(?P<playlist_id>\d+))$"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
API_BASE = "https://dce-frontoffice.imggaming.com/api/v4"
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="HIDI", short_help="https://hidive.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return HIDI(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
raise ValueError("Unsupported HiDive URL. Use /season/<id> or /playlist/<id>")
|
||||
|
||||
self.season_id = m.group("season_id")
|
||||
self.playlist_id = m.group("playlist_id")
|
||||
self.kind = "serie" if self.season_id else "movie"
|
||||
self.content_id = int(self.season_id or self.playlist_id)
|
||||
|
||||
if not self.config:
|
||||
raise EnvironmentError("Missing HIDI service config.")
|
||||
self.cdm = ctx.obj.cdm
|
||||
self._auth_token = None
|
||||
self._refresh_token = None
|
||||
self._drm_cache = {}
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
base_headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Accept-Language": "en-US",
|
||||
"Referer": "https://www.hidive.com/",
|
||||
"Origin": "https://www.hidive.com",
|
||||
"x-api-key": self.config["x_api_key"],
|
||||
"app": "dice",
|
||||
"Realm": "dce.hidive",
|
||||
"x-app-var": self.config["x_app_var"],
|
||||
}
|
||||
self.session.headers.update(base_headers)
|
||||
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise ValueError("HiDive requires email + password")
|
||||
|
||||
r_login = self.session.post(
|
||||
self.config["endpoints"]["login"],
|
||||
json={"id": credential.username, "secret": credential.password}
|
||||
)
|
||||
if r_login.status_code == 401:
|
||||
raise PermissionError("Invalid email or password.")
|
||||
r_login.raise_for_status()
|
||||
|
||||
login_data = r_login.json()
|
||||
self._auth_token = login_data["authorisationToken"]
|
||||
self._refresh_token = login_data["refreshToken"]
|
||||
|
||||
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
||||
self.log.info("HiDive login successful.")
|
||||
|
||||
def _refresh_auth(self):
|
||||
if not self._refresh_token:
|
||||
raise PermissionError("No refresh token available to renew session.")
|
||||
|
||||
self.log.warning("Auth token expired, refreshing...")
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["refresh"],
|
||||
json={"refreshToken": self._refresh_token}
|
||||
)
|
||||
if r.status_code == 401:
|
||||
raise PermissionError("Refresh token is invalid. Please log in again.")
|
||||
r.raise_for_status()
|
||||
|
||||
data = r.json()
|
||||
self._auth_token = data["authorisationToken"]
|
||||
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
||||
self.log.info("Auth token refreshed successfully.")
|
||||
|
||||
def _api_get(self, url, **kwargs):
|
||||
resp = self.session.get(url, **kwargs)
|
||||
if resp.status_code == 401:
|
||||
self._refresh_auth()
|
||||
resp = self.session.get(url, **kwargs)
|
||||
resp.raise_for_status()
|
||||
return resp
|
||||
|
||||
def _fetch_season_data(self, season_id: int) -> dict:
|
||||
"""Fetch season view data."""
|
||||
return self._api_get(
|
||||
self.config["endpoints"]["view"],
|
||||
params={
|
||||
"type": "season",
|
||||
"id": season_id,
|
||||
"timezone": "Europe/Amsterdam"
|
||||
}
|
||||
).json()
|
||||
|
||||
def _fetch_adjacent_seasons(self, series_id: int, season_id: int) -> dict:
|
||||
"""Fetch all seasons in a series using adjacentTo endpoint."""
|
||||
url = f"{self.API_BASE}/series/{series_id}/adjacentTo/{season_id}"
|
||||
return self._api_get(url, params={"size": 25}).json()
|
||||
|
||||
def _extract_series_info(self, season_data: dict) -> tuple[Optional[int], Optional[str]]:
|
||||
"""
|
||||
Extract series ID and title from season data.
|
||||
Checks multiple locations in the JSON structure.
|
||||
"""
|
||||
series_id = None
|
||||
series_title = None
|
||||
|
||||
# Method 1: Check metadata.series
|
||||
metadata = season_data.get("metadata", {})
|
||||
if metadata.get("series"):
|
||||
series_id = metadata["series"].get("seriesId")
|
||||
series_title = metadata["series"].get("title")
|
||||
if series_id:
|
||||
return series_id, series_title
|
||||
|
||||
# Method 2: Check elements for $type: "series"
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "series":
|
||||
attrs = elem.get("attributes", {})
|
||||
series_id = attrs.get("id")
|
||||
series_info = attrs.get("series", {})
|
||||
series_title = series_info.get("title") or series_title
|
||||
if series_id:
|
||||
return series_id, series_title
|
||||
|
||||
# Method 3: Check bucket elements for seriesId
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "bucket":
|
||||
attrs = elem.get("attributes", {})
|
||||
if attrs.get("seriesId"):
|
||||
series_id = attrs["seriesId"]
|
||||
return series_id, series_title
|
||||
|
||||
# Method 4: Check hero actions for seriesId
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "hero":
|
||||
for action in elem.get("attributes", {}).get("actions", []):
|
||||
action_data = action.get("attributes", {}).get("action", {}).get("data", {})
|
||||
if action_data.get("seriesId"):
|
||||
series_id = action_data["seriesId"]
|
||||
return series_id, series_title
|
||||
|
||||
return series_id, series_title
|
||||
|
||||
def _extract_season_number(self, season_data: dict) -> int:
|
||||
"""Extract season number from season data."""
|
||||
# Check metadata.currentSeason
|
||||
metadata = season_data.get("metadata", {})
|
||||
current_season = metadata.get("currentSeason", {})
|
||||
if current_season.get("title"):
|
||||
# Parse "Season 2" -> 2
|
||||
title = current_season["title"]
|
||||
if title.lower().startswith("season "):
|
||||
try:
|
||||
return int(title.split(" ")[1])
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
|
||||
# Check elements for series type with seasons info
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "series":
|
||||
seasons_items = elem.get("attributes", {}).get("seasons", {}).get("items", [])
|
||||
for item in seasons_items:
|
||||
if item.get("seasonNumber"):
|
||||
return item["seasonNumber"]
|
||||
|
||||
# Check bucket title
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "season":
|
||||
bucket_title = elem.get("attributes", {}).get("bucketTitle", "")
|
||||
if bucket_title.lower().startswith("season "):
|
||||
try:
|
||||
return int(bucket_title.split(" ")[1])
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
|
||||
return 1
|
||||
|
||||
def _parse_episodes_from_season(self, season_data: dict, series_title: str, season_number: int) -> list[Episode]:
|
||||
"""Parse episodes from season JSON data."""
|
||||
episodes = []
|
||||
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "season":
|
||||
items = elem.get("attributes", {}).get("items", [])
|
||||
|
||||
for idx, item in enumerate(items):
|
||||
if item.get("type") != "SEASON_VOD":
|
||||
continue
|
||||
|
||||
ep_title = item.get("title", "")
|
||||
ep_num = idx + 1
|
||||
|
||||
# Try to extract episode number from title "E1 - Title"
|
||||
if ep_title.startswith("E") and " - " in ep_title:
|
||||
try:
|
||||
ep_num = int(ep_title.split(" - ")[0][1:])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
episodes.append(Episode(
|
||||
id_=item["id"],
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_number,
|
||||
number=ep_num,
|
||||
name=ep_title,
|
||||
description=item.get("description", ""),
|
||||
language=Language.get("ja"),
|
||||
data=item,
|
||||
))
|
||||
break
|
||||
|
||||
return episodes
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
anchor_data = self._fetch_season_data(self.content_id)
|
||||
|
||||
if self.kind == "movie":
|
||||
vod_id = None
|
||||
movie_title = None
|
||||
description = ""
|
||||
|
||||
for elem in anchor_data.get("elements", []):
|
||||
if elem.get("$type") == "hero":
|
||||
hdr = (elem.get("attributes", {}).get("header", {}) or {}).get("attributes", {})
|
||||
movie_title = hdr.get("text", movie_title)
|
||||
for c in elem.get("attributes", {}).get("content", []):
|
||||
if c.get("$type") == "textblock":
|
||||
description = c.get("attributes", {}).get("text", description)
|
||||
|
||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "playlist":
|
||||
items = elem.get("attributes", {}).get("items", [])
|
||||
if items:
|
||||
vod_id = items[0]["id"]
|
||||
movie_title = movie_title or items[0].get("title")
|
||||
description = description or items[0].get("description", "")
|
||||
break
|
||||
|
||||
if not vod_id:
|
||||
raise ValueError("No VOD found in playlist data.")
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=vod_id,
|
||||
service=self.__class__,
|
||||
name=movie_title or "Unknown Title",
|
||||
description=description or "",
|
||||
year=None,
|
||||
language=Language.get("en"),
|
||||
data={"playlistId": self.content_id}
|
||||
)
|
||||
])
|
||||
|
||||
series_id, series_title = self._extract_series_info(anchor_data)
|
||||
series_title = series_title or "HiDive Series"
|
||||
anchor_season_num = self._extract_season_number(anchor_data)
|
||||
|
||||
if not series_id:
|
||||
self.log.warning("Could not determine Series ID. Fetching single season only.")
|
||||
episodes = self._parse_episodes_from_season(anchor_data, series_title, anchor_season_num)
|
||||
return Series(episodes)
|
||||
|
||||
try:
|
||||
adj_data = self._fetch_adjacent_seasons(series_id, self.content_id)
|
||||
except Exception as e:
|
||||
self.log.warning(f"Failed to fetch adjacent seasons: {e}. Falling back to single season.")
|
||||
episodes = self._parse_episodes_from_season(anchor_data, series_title, anchor_season_num)
|
||||
return Series(episodes)
|
||||
|
||||
# Build list of all seasons
|
||||
all_seasons = []
|
||||
|
||||
# Preceding seasons (these come before current season)
|
||||
for s in adj_data.get("precedingSeasons", []):
|
||||
all_seasons.append({
|
||||
"id": s["id"],
|
||||
"seasonNumber": s.get("seasonNumber", 0),
|
||||
"title": s.get("title", "")
|
||||
})
|
||||
|
||||
# Current/Anchor season
|
||||
all_seasons.append({
|
||||
"id": self.content_id,
|
||||
"seasonNumber": anchor_season_num,
|
||||
"title": f"Season {anchor_season_num}",
|
||||
"_data": anchor_data # Cache to avoid re-fetching
|
||||
})
|
||||
|
||||
# Following seasons (these come after current season)
|
||||
for s in adj_data.get("followingSeasons", []):
|
||||
all_seasons.append({
|
||||
"id": s["id"],
|
||||
"seasonNumber": s.get("seasonNumber", 0),
|
||||
"title": s.get("title", "")
|
||||
})
|
||||
|
||||
# Deduplicate by ID and sort by season number
|
||||
unique_seasons = {}
|
||||
for s in all_seasons:
|
||||
s_id = s["id"]
|
||||
if s_id not in unique_seasons:
|
||||
unique_seasons[s_id] = s
|
||||
elif "_data" in s:
|
||||
# Prefer the one with cached data
|
||||
unique_seasons[s_id] = s
|
||||
|
||||
sorted_seasons = sorted(unique_seasons.values(), key=lambda x: x["seasonNumber"])
|
||||
|
||||
all_episodes = []
|
||||
|
||||
for season_info in sorted_seasons:
|
||||
s_id = season_info["id"]
|
||||
s_num = season_info["seasonNumber"]
|
||||
|
||||
if "_data" in season_info:
|
||||
self.log.info(f"Processing Season {s_num} (ID: {s_id}) [cached]")
|
||||
season_data = season_info["_data"]
|
||||
else:
|
||||
self.log.info(f"Fetching Season {s_num} (ID: {s_id})")
|
||||
try:
|
||||
season_data = self._fetch_season_data(s_id)
|
||||
except Exception as e:
|
||||
self.log.error(f"Failed to fetch Season {s_num}: {e}")
|
||||
continue
|
||||
|
||||
episodes = self._parse_episodes_from_season(season_data, series_title, s_num)
|
||||
self.log.info(f" Found {len(episodes)} episodes")
|
||||
all_episodes.extend(episodes)
|
||||
|
||||
if not all_episodes:
|
||||
raise ValueError("No episodes found across all seasons.")
|
||||
|
||||
return Series(all_episodes)
|
||||
|
||||
def _get_audio_for_langs(self, mpd_url: str, langs: Iterable[Language]) -> list[Audio]:
|
||||
merged: list[Audio] = []
|
||||
seen = set()
|
||||
|
||||
# Use first available language as fallback, or "en" as ultimate fallback
|
||||
fallback_lang = langs[0] if langs else Language.get("en")
|
||||
|
||||
dash = DASH.from_url(mpd_url, session=self.session)
|
||||
try:
|
||||
# Parse with a valid fallback language
|
||||
base_tracks = dash.to_tracks(language=fallback_lang)
|
||||
except Exception:
|
||||
# Try with English as ultimate fallback
|
||||
base_tracks = dash.to_tracks(language=Language.get("en"))
|
||||
|
||||
all_audio = base_tracks.audio or []
|
||||
|
||||
for lang in langs:
|
||||
# Match by language prefix (e.g. en, ja)
|
||||
for audio in all_audio:
|
||||
lang_code = getattr(audio.language, "language", "en")
|
||||
if lang_code.startswith(lang.language[:2]):
|
||||
key = (lang_code, getattr(audio, "codec", None), getattr(audio, "bitrate", None))
|
||||
if key in seen:
|
||||
continue
|
||||
merged.append(audio)
|
||||
seen.add(key)
|
||||
|
||||
# If nothing matched, just return all available audio tracks
|
||||
if not merged and all_audio:
|
||||
merged = all_audio
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
vod_resp = self._api_get(
|
||||
self.config["endpoints"]["vod"].format(vod_id=title.id),
|
||||
params={"includePlaybackDetails": "URL"},
|
||||
)
|
||||
vod = vod_resp.json()
|
||||
|
||||
playback_url = vod.get("playerUrlCallback")
|
||||
if not playback_url:
|
||||
raise ValueError("No playback URL found.")
|
||||
|
||||
stream_data = self._api_get(playback_url).json()
|
||||
dash_list = stream_data.get("dash", [])
|
||||
if not dash_list:
|
||||
raise ValueError("No DASH streams available.")
|
||||
|
||||
entry = dash_list[0]
|
||||
mpd_url = entry["url"]
|
||||
|
||||
# Collect available HiDive metadata languages
|
||||
meta_audio_tracks = vod.get("onlinePlaybackMetadata", {}).get("audioTracks", [])
|
||||
available_langs = []
|
||||
for m in meta_audio_tracks:
|
||||
lang_code = (m.get("languageCode") or "").split("-")[0]
|
||||
if not lang_code:
|
||||
continue
|
||||
try:
|
||||
available_langs.append(Language.get(lang_code))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Use first available language as fallback, or English as ultimate fallback
|
||||
fallback_lang = available_langs[0] if available_langs else Language.get("en")
|
||||
|
||||
# Parse DASH manifest with a valid fallback language
|
||||
base_tracks = DASH.from_url(mpd_url, session=self.session).to_tracks(language=fallback_lang)
|
||||
|
||||
audio_tracks = self._get_audio_for_langs(mpd_url, available_langs)
|
||||
|
||||
# Map metadata labels
|
||||
meta_audio_map = {m.get("languageCode", "").split("-")[0]: m.get("label") for m in meta_audio_tracks}
|
||||
for a in audio_tracks:
|
||||
lang_code = getattr(a.language, "language", "en")
|
||||
a.name = meta_audio_map.get(lang_code, lang_code)
|
||||
a.is_original_lang = (lang_code == title.language.language)
|
||||
|
||||
base_tracks.audio = audio_tracks
|
||||
|
||||
# Subtitles
|
||||
subtitles = []
|
||||
for sub in entry.get("subtitles", []):
|
||||
if sub.get("format", "").lower() != "vtt":
|
||||
continue
|
||||
lang_code = sub.get("language", "en").replace("-", "_")
|
||||
try:
|
||||
lang = Language.get(lang_code)
|
||||
except Exception:
|
||||
lang = Language.get("en")
|
||||
subtitles.append(Subtitle(
|
||||
id_=f"{lang_code}:vtt",
|
||||
url=sub.get("url"),
|
||||
language=lang,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
name=lang.language_name(),
|
||||
))
|
||||
base_tracks.subtitles = subtitles
|
||||
|
||||
# DRM info
|
||||
drm = entry.get("drm", {}) or {}
|
||||
jwt = drm.get("jwtToken")
|
||||
lic_url = (drm.get("url") or "").strip()
|
||||
if jwt and lic_url:
|
||||
self._drm_cache[title.id] = (jwt, lic_url)
|
||||
|
||||
return base_tracks
|
||||
|
||||
def _hidive_get_drm_info(self, title: Title_T) -> tuple[str, str]:
|
||||
if title.id in self._drm_cache:
|
||||
return self._drm_cache[title.id]
|
||||
self.get_tracks(title)
|
||||
if title.id not in self._drm_cache:
|
||||
raise ValueError("DRM information not found for this title.")
|
||||
return self._drm_cache[title.id]
|
||||
|
||||
def _decode_hidive_license_payload(self, payload: bytes) -> bytes:
|
||||
text = payload.decode("utf-8", errors="ignore")
|
||||
prefix = "data:application/octet-stream;base64,"
|
||||
if text.startswith(prefix):
|
||||
b64 = text.split(",", 1)[1]
|
||||
return base64.b64decode(b64)
|
||||
return payload
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes | str | None:
|
||||
jwt_token, license_url = self._hidive_get_drm_info(title)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Accept": "*/*",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
|
||||
"Origin": "https://www.hidive.com",
|
||||
"Referer": "https://www.hidive.com/",
|
||||
"X-DRM-INFO": "eyJzeXN0ZW0iOiJjb20ud2lkZXZpbmUuYWxwaGEifQ==",
|
||||
}
|
||||
r = self.session.post(license_url, data=challenge, headers=headers, timeout=30)
|
||||
r.raise_for_status()
|
||||
return self._decode_hidive_license_payload(r.content)
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
@ -1,10 +0,0 @@
|
||||
x_api_key: "857a1e5d-e35e-4fdf-805b-a87b6f8364bf"
|
||||
x_app_var: "6.59.1.e16cdfd"
|
||||
|
||||
endpoints:
|
||||
init: "https://dce-frontoffice.imggaming.com/api/v1/init/"
|
||||
login: "https://dce-frontoffice.imggaming.com/api/v2/login"
|
||||
vod: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}?includePlaybackDetails=URL"
|
||||
adjacent: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}/adjacent"
|
||||
view: "https://dce-frontoffice.imggaming.com/api/v1/view" # Changed from season_view
|
||||
refresh: "https://dce-frontoffice.imggaming.com/api/v2/token/refresh"
|
||||
128
KIJK/__init__.py
Normal file
128
KIJK/__init__.py
Normal file
@ -0,0 +1,128 @@
|
||||
import re
|
||||
from collections.abc import Generator
|
||||
from typing import Optional, Union
|
||||
import urllib.parse
|
||||
import json
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Movie, Movies, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Tracks, Chapter
|
||||
|
||||
|
||||
class KIJK(Service):
|
||||
"""
|
||||
Service code for kijk.nl
|
||||
Version: 1.0.0
|
||||
|
||||
Authorization: None
|
||||
|
||||
Security: FHD@L3, UHD@L3
|
||||
"""
|
||||
|
||||
TITLE_RE = r"https?://(?:www\.)?kijk\.nl/programmas/[^/]+/([^/?]+)"
|
||||
GEOFENCE = ("NL",)
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="KIJK", short_help="https://kijk.nl")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return KIJK(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title):
|
||||
super().__init__(ctx)
|
||||
self.title = title
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
|
||||
self.session.headers.update({"user-agent": self.config["client"]["default"]["user_agent"]})
|
||||
self.token = None
|
||||
self.license_url = None
|
||||
|
||||
def authenticate(self, cookies=None, credential=None):
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
self.log.info("Retrieving new token")
|
||||
query = {
|
||||
"query": "query DrmTokenQuery($provider: DrmProvider) {\n drmToken(drmProvider: $provider) {\n expiration\n token\n }\n }",
|
||||
"variables": {
|
||||
"provider": "JWP"
|
||||
}
|
||||
}
|
||||
res = self.session.post(self.config["endpoints"]["graphql"], json=query)
|
||||
res.raise_for_status()
|
||||
self.token = res.json()["data"]["drmToken"]["token"]
|
||||
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
raise NotImplementedError("Search is not supported for this service.")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
guid_match = re.match(self.TITLE_RE, self.title)
|
||||
if not guid_match:
|
||||
raise ValueError("Invalid KIJK URL. Could not extract GUID.")
|
||||
|
||||
guid = guid_match.group(1)
|
||||
|
||||
query_graphql = "query GetVideoQuery($guid:[String]){programs(guid:$guid){items{guid type metadata availableRegion ...Media ...Tracks ...Sources}}}fragment Media on Program{media{type availableDate availabilityState airedDateTime expirationDate}}fragment Tracks on Program{tracks{file kind label}}fragment Sources on Program{sources{type file drm}}"
|
||||
variables_graphql = json.dumps({"guid": guid})
|
||||
|
||||
url = f"{self.config['endpoints']['graphql']}?query={urllib.parse.quote(query_graphql)}&variables={urllib.parse.quote(variables_graphql)}"
|
||||
|
||||
res = self.session.get(url)
|
||||
res.raise_for_status()
|
||||
|
||||
metadata = res.json()["data"]["programs"]["items"][0]
|
||||
|
||||
return Movies(
|
||||
[
|
||||
Movie(
|
||||
id_=metadata["guid"],
|
||||
service=self.__class__,
|
||||
name=metadata["metadata"]["media_program_name"],
|
||||
description=metadata["metadata"].get("media_description", ""),
|
||||
year=int(metadata["media"][0]["airedDateTime"].split('-')[0]),
|
||||
language=Language.get("nl"), # Hardcoded as it's a Dutch service
|
||||
data=metadata,
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
dash_link = None
|
||||
for source in title.data["sources"]:
|
||||
if source.get("type") == "dash" and source.get("drm") and "widevine" in source.get("drm"):
|
||||
dash_link = source["file"]
|
||||
self.license_url = source["drm"]["widevine"]["url"]
|
||||
break
|
||||
|
||||
if not dash_link:
|
||||
raise ValueError("Could not find a DASH manifest for this title.")
|
||||
|
||||
self.log.debug(f"Manifest URL: {dash_link}")
|
||||
tracks = DASH.from_url(url=dash_link, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
if not self.license_url:
|
||||
raise ValueError("Widevine license endpoint not configured")
|
||||
|
||||
headers = {'x-vudrm-token': self.token} if self.token else {}
|
||||
response = self.session.post(
|
||||
url=self.license_url,
|
||||
data=challenge,
|
||||
headers=headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.content
|
||||
6
KIJK/config.yaml
Normal file
6
KIJK/config.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
endpoints:
|
||||
graphql: https://api.prd.video.talpa.network/graphql
|
||||
|
||||
client:
|
||||
default:
|
||||
user_agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
|
||||
407
KNPY/__init__.py
407
KNPY/__init__.py
@ -1,407 +0,0 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
import jwt
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Subtitle, Tracks
|
||||
|
||||
|
||||
class KNPY(Service):
|
||||
"""
|
||||
Service code for Kanopy (kanopy.com).
|
||||
Version: 1.0.0
|
||||
|
||||
Auth: Credential (username + password)
|
||||
Security: FHD@L3
|
||||
|
||||
Handles both Movies and Series (Playlists).
|
||||
Detects and stops for movies that require tickets.
|
||||
Caching included
|
||||
"""
|
||||
|
||||
# Updated regex to match the new URL structure with library subdomain and path
|
||||
TITLE_RE = r"^https?://(?:www\.)?kanopy\.com/.+/(?P<id>\d+)$"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="KNPY", short_help="https://kanopy.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return KNPY(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
if not self.config:
|
||||
raise ValueError("KNPY configuration not found. Ensure config.yaml exists.")
|
||||
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
match = re.match(self.TITLE_RE, title)
|
||||
if match:
|
||||
self.content_id = match.group("id")
|
||||
else:
|
||||
self.content_id = None
|
||||
self.search_query = title
|
||||
|
||||
self.API_VERSION = self.config["client"]["api_version"]
|
||||
self.USER_AGENT = self.config["client"]["user_agent"]
|
||||
self.WIDEVINE_UA = self.config["client"]["widevine_ua"]
|
||||
|
||||
self.session.headers.update({
|
||||
"x-version": self.API_VERSION,
|
||||
"user-agent": self.USER_AGENT
|
||||
})
|
||||
|
||||
self._jwt = None
|
||||
self._visitor_id = None
|
||||
self._user_id = None
|
||||
self._domain_id = None
|
||||
self.widevine_license_url = None
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise ValueError("Kanopy requires email and password for authentication.")
|
||||
|
||||
cache = self.cache.get("auth_token")
|
||||
|
||||
if cache and not cache.expired:
|
||||
cached_data = cache.data
|
||||
valid_token = None
|
||||
|
||||
if isinstance(cached_data, dict) and "token" in cached_data:
|
||||
if cached_data.get("username") == credential.username:
|
||||
valid_token = cached_data["token"]
|
||||
self.log.info("Using cached authentication token")
|
||||
else:
|
||||
self.log.info(f"Cached token belongs to '{cached_data.get('username')}', but logging in as '{credential.username}'. Re-authenticating.")
|
||||
|
||||
elif isinstance(cached_data, str):
|
||||
self.log.info("Found legacy cached token format. Re-authenticating to ensure correct user.")
|
||||
|
||||
if valid_token:
|
||||
self._jwt = valid_token
|
||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||
|
||||
if not self._user_id or not self._domain_id or not self._visitor_id:
|
||||
try:
|
||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||
self._user_id = decoded_jwt["data"]["uid"]
|
||||
self._visitor_id = decoded_jwt["data"]["visitor_id"]
|
||||
self.log.info(f"Extracted user_id and visitor_id from cached token.")
|
||||
self._fetch_user_details()
|
||||
return
|
||||
except (KeyError, jwt.DecodeError) as e:
|
||||
self.log.error(f"Could not decode cached token: {e}. Re-authenticating.")
|
||||
|
||||
self.log.info("Performing handshake to get visitor token...")
|
||||
r = self.session.get(self.config["endpoints"]["handshake"])
|
||||
r.raise_for_status()
|
||||
handshake_data = r.json()
|
||||
self._visitor_id = handshake_data["visitorId"]
|
||||
initial_jwt = handshake_data["jwt"]
|
||||
|
||||
self.log.info(f"Logging in as {credential.username}...")
|
||||
login_payload = {
|
||||
"credentialType": "email",
|
||||
"emailUser": {
|
||||
"email": credential.username,
|
||||
"password": credential.password
|
||||
}
|
||||
}
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["login"],
|
||||
json=login_payload,
|
||||
headers={"authorization": f"Bearer {initial_jwt}"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
login_data = r.json()
|
||||
self._jwt = login_data["jwt"]
|
||||
self._user_id = login_data["userId"]
|
||||
|
||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||
self.log.info(f"Successfully authenticated as {credential.username}")
|
||||
|
||||
self._fetch_user_details()
|
||||
|
||||
try:
|
||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||
exp_timestamp = decoded_jwt.get("exp")
|
||||
|
||||
cache_payload = {
|
||||
"token": self._jwt,
|
||||
"username": credential.username
|
||||
}
|
||||
|
||||
if exp_timestamp:
|
||||
expiration_in_seconds = int(exp_timestamp - datetime.now(timezone.utc).timestamp())
|
||||
self.log.info(f"Caching token for {expiration_in_seconds / 60:.2f} minutes.")
|
||||
cache.set(data=cache_payload, expiration=expiration_in_seconds)
|
||||
else:
|
||||
self.log.warning("JWT has no 'exp' claim, caching for 1 hour as a fallback.")
|
||||
cache.set(data=cache_payload, expiration=3600)
|
||||
except Exception as e:
|
||||
self.log.error(f"Failed to decode JWT for caching: {e}. Caching for 1 hour as a fallback.")
|
||||
cache.set(
|
||||
data={"token": self._jwt, "username": credential.username},
|
||||
expiration=3600
|
||||
)
|
||||
|
||||
def _fetch_user_details(self):
|
||||
self.log.info("Fetching user library memberships...")
|
||||
r = self.session.get(self.config["endpoints"]["memberships"].format(user_id=self._user_id))
|
||||
r.raise_for_status()
|
||||
memberships = r.json()
|
||||
|
||||
for membership in memberships.get("list", []):
|
||||
if membership.get("status") == "active" and membership.get("isDefault", False):
|
||||
self._domain_id = str(membership["domainId"])
|
||||
self.log.info(f"Using default library domain: {membership.get('sitename', 'Unknown')} (ID: {self._domain_id})")
|
||||
return
|
||||
|
||||
if memberships.get("list"):
|
||||
self._domain_id = str(memberships["list"][0]["domainId"])
|
||||
self.log.warning(f"No default library found. Using first active domain: {self._domain_id}")
|
||||
else:
|
||||
raise ValueError("No active library memberships found for this user.")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if not self.content_id:
|
||||
raise ValueError("A content ID is required to get titles. Use a URL or run a search first.")
|
||||
if not self._domain_id:
|
||||
raise ValueError("Domain ID not set. Authentication may have failed.")
|
||||
|
||||
r = self.session.get(self.config["endpoints"]["video_info"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||
r.raise_for_status()
|
||||
content_data = r.json()
|
||||
|
||||
content_type = content_data.get("type")
|
||||
|
||||
def parse_lang(data):
|
||||
try:
|
||||
langs = data.get("languages", [])
|
||||
if langs and isinstance(langs, list) and len(langs) > 0:
|
||||
return Language.find(langs[0])
|
||||
except:
|
||||
pass
|
||||
return Language.get("en")
|
||||
|
||||
if content_type == "video":
|
||||
video_data = content_data["video"]
|
||||
movie = Movie(
|
||||
id_=str(video_data["videoId"]),
|
||||
service=self.__class__,
|
||||
name=video_data["title"],
|
||||
year=video_data.get("productionYear"),
|
||||
description=video_data.get("descriptionHtml", ""),
|
||||
language=parse_lang(video_data),
|
||||
data=video_data,
|
||||
)
|
||||
return Movies([movie])
|
||||
|
||||
elif content_type == "playlist":
|
||||
playlist_data = content_data["playlist"]
|
||||
series_title = playlist_data["title"]
|
||||
series_year = playlist_data.get("productionYear")
|
||||
|
||||
season_match = re.search(r'(?:Season|S)\s*(\d+)', series_title, re.IGNORECASE)
|
||||
season_num = int(season_match.group(1)) if season_match else 1
|
||||
|
||||
r = self.session.get(self.config["endpoints"]["video_items"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||
r.raise_for_status()
|
||||
items_data = r.json()
|
||||
|
||||
episodes = []
|
||||
for i, item in enumerate(items_data.get("list", [])):
|
||||
if item.get("type") != "video":
|
||||
continue
|
||||
|
||||
video_data = item["video"]
|
||||
ep_num = i + 1
|
||||
|
||||
ep_title = video_data.get("title", "")
|
||||
ep_match = re.search(r'Ep(?:isode)?\.?\s*(\d+)', ep_title, re.IGNORECASE)
|
||||
if ep_match:
|
||||
ep_num = int(ep_match.group(1))
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=str(video_data["videoId"]),
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_num,
|
||||
number=ep_num,
|
||||
name=video_data["title"],
|
||||
description=video_data.get("descriptionHtml", ""),
|
||||
year=video_data.get("productionYear", series_year),
|
||||
language=parse_lang(video_data),
|
||||
data=video_data,
|
||||
)
|
||||
)
|
||||
|
||||
series = Series(episodes)
|
||||
series.name = series_title
|
||||
series.description = playlist_data.get("descriptionHtml", "")
|
||||
series.year = series_year
|
||||
return series
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported content type: {content_type}")
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
play_payload = {
|
||||
"videoId": int(title.id),
|
||||
"domainId": int(self._domain_id),
|
||||
"userId": int(self._user_id),
|
||||
"visitorId": self._visitor_id
|
||||
}
|
||||
|
||||
self.session.headers.setdefault("authorization", f"Bearer {self._jwt}")
|
||||
self.session.headers.setdefault("x-version", self.API_VERSION)
|
||||
self.session.headers.setdefault("user-agent", self.USER_AGENT)
|
||||
|
||||
r = self.session.post(self.config["endpoints"]["plays"], json=play_payload)
|
||||
response_json = None
|
||||
try:
|
||||
response_json = r.json()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Handle known errors gracefully
|
||||
if r.status_code == 403:
|
||||
if response_json and response_json.get("errorSubcode") == "playRegionRestricted":
|
||||
self.log.error("Kanopy reports: This video is not available in your country.")
|
||||
raise PermissionError(
|
||||
"Playback blocked by region restriction. Try connecting through a supported country or verify your library’s access region."
|
||||
)
|
||||
else:
|
||||
self.log.error(f"Access forbidden (HTTP 403). Response: {response_json}")
|
||||
raise PermissionError("Kanopy denied access to this video. It may require a different library membership or authentication.")
|
||||
|
||||
# Raise for any other HTTP errors
|
||||
r.raise_for_status()
|
||||
play_data = response_json or r.json()
|
||||
|
||||
manifest_url = None
|
||||
for manifest in play_data.get("manifests", []):
|
||||
if manifest["manifestType"] == "dash":
|
||||
url = manifest["url"]
|
||||
manifest_url = f"https://kanopy.com{url}" if url.startswith("/") else url
|
||||
drm_type = manifest.get("drmType")
|
||||
if drm_type == "kanopyDrm":
|
||||
play_id = play_data.get("playId")
|
||||
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(license_id=f"{play_id}-0")
|
||||
elif drm_type == "studioDrm":
|
||||
license_id = manifest.get("drmLicenseID", f"{play_data.get('playId')}-1")
|
||||
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(license_id=license_id)
|
||||
else:
|
||||
self.log.warning(f"Unknown drmType: {drm_type}")
|
||||
self.widevine_license_url = None
|
||||
break
|
||||
|
||||
if not manifest_url:
|
||||
raise ValueError("Could not find a DASH manifest for this title.")
|
||||
if not self.widevine_license_url:
|
||||
raise ValueError("Could not construct Widevine license URL.")
|
||||
|
||||
self.log.info(f"Fetching DASH manifest from: {manifest_url}")
|
||||
r = self.session.get(manifest_url)
|
||||
r.raise_for_status()
|
||||
|
||||
# Refresh headers for manifest parsing
|
||||
self.session.headers.clear()
|
||||
self.session.headers.update({
|
||||
"User-Agent": self.WIDEVINE_UA,
|
||||
"Accept": "*/*",
|
||||
"Accept-Encoding": "gzip, deflate",
|
||||
"Connection": "keep-alive",
|
||||
})
|
||||
|
||||
tracks = DASH.from_text(r.text, url=manifest_url).to_tracks(language=title.language)
|
||||
for caption_data in play_data.get("captions", []):
|
||||
lang = caption_data.get("language", "en")
|
||||
for file_info in caption_data.get("files", []):
|
||||
if file_info.get("type") == "webvtt":
|
||||
tracks.add(Subtitle(
|
||||
id_=f"caption-{lang}",
|
||||
url=file_info["url"],
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get(lang)
|
||||
))
|
||||
break
|
||||
|
||||
return tracks
|
||||
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.widevine_license_url:
|
||||
raise ValueError("Widevine license URL was not set. Call get_tracks first.")
|
||||
|
||||
license_headers = {
|
||||
"Content-Type": "application/octet-stream",
|
||||
"User-Agent": self.WIDEVINE_UA,
|
||||
"Authorization": f"Bearer {self._jwt}",
|
||||
"X-Version": self.API_VERSION
|
||||
}
|
||||
|
||||
r = self.session.post(
|
||||
self.widevine_license_url,
|
||||
data=challenge,
|
||||
headers=license_headers
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
# def search(self) -> List[SearchResult]:
|
||||
# if not hasattr(self, 'search_query'):
|
||||
# self.log.error("Search query not set. Cannot search.")
|
||||
# return []
|
||||
|
||||
# self.log.info(f"Searching for '{self.search_query}'...")
|
||||
# params = {
|
||||
# "query": self.search_query,
|
||||
# "sort": "relevance",
|
||||
# "domainId": self._domain_id,
|
||||
# "page": 0,
|
||||
# "perPage": 20
|
||||
# }
|
||||
# r = self.session.get(self.config["endpoints"]["search"], params=params)
|
||||
# r.raise_for_status()
|
||||
# search_data = r.json()
|
||||
|
||||
# results = []
|
||||
# for item in search_data.get("list", []):
|
||||
# item_type = item.get("type")
|
||||
# if item_type not in ["playlist", "video"]:
|
||||
# continue
|
||||
|
||||
# video_id = item.get("videoId")
|
||||
# title = item.get("title", "No Title")
|
||||
# label = "Series" if item_type == "playlist" else "Movie"
|
||||
|
||||
# results.append(
|
||||
# SearchResult(
|
||||
# id_=str(video_id),
|
||||
# title=title,
|
||||
# description="",
|
||||
# label=label,
|
||||
# url=f"https://www.kanopy.com/watch/{video_id}"
|
||||
# )
|
||||
# )
|
||||
# return results
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list:
|
||||
return []
|
||||
@ -1,15 +0,0 @@
|
||||
client:
|
||||
api_version: "Android/com.kanopy/6.21.0/952 (SM-A525F; Android 15)"
|
||||
user_agent: "okhttp/5.2.1"
|
||||
widevine_ua: "KanopyApplication/6.21.0 (Linux;Android 15) AndroidXMedia3/1.8.0"
|
||||
|
||||
endpoints:
|
||||
handshake: "https://kanopy.com/kapi/handshake"
|
||||
login: "https://kanopy.com/kapi/login"
|
||||
memberships: "https://kanopy.com/kapi/memberships?userId={user_id}"
|
||||
video_info: "https://kanopy.com/kapi/videos/{video_id}?domainId={domain_id}"
|
||||
video_items: "https://kanopy.com/kapi/videos/{video_id}/items?domainId={domain_id}"
|
||||
search: "https://kanopy.com/kapi/search/videos"
|
||||
plays: "https://kanopy.com/kapi/plays"
|
||||
access_expires_in: "https://kanopy.com/kapi/users/{user_id}/history/videos/{video_id}/access_expires_in?domainId={domain_id}"
|
||||
widevine_license: "https://kanopy.com/kapi/licenses/widevine/{license_id}"
|
||||
297
KOWP/__init__.py
297
KOWP/__init__.py
@ -1,297 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.titles import Episode, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Subtitle, Tracks
|
||||
from unshackle.core.utilities import is_close_match
|
||||
|
||||
class KOWP(Service):
|
||||
"""
|
||||
Service code for Kocowa Plus (kocowa.com).
|
||||
Version: 1.0.0
|
||||
|
||||
Auth: Credential (username + password)
|
||||
Security: FHD@L3
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?kocowa\.com/[^/]+/season/)?(?P<title_id>\d+)"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="kowp", short_help="https://www.kocowa.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("--extras", is_flag=True, default=False, help="Include teasers/extras")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return KOWP(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str, extras: bool = False):
|
||||
super().__init__(ctx)
|
||||
match = re.match(self.TITLE_RE, title)
|
||||
if match:
|
||||
self.title_id = match.group("title_id")
|
||||
else:
|
||||
self.title_id = title # fallback to use as search keyword
|
||||
self.include_extras = extras
|
||||
self.brightcove_account_id = None
|
||||
self.brightcove_pk = None
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
if not credential:
|
||||
raise ValueError("KOWP requires username and password")
|
||||
|
||||
payload = {
|
||||
"username": credential.username,
|
||||
"password": credential.password,
|
||||
"device_id": f"{credential.username}_browser",
|
||||
"device_type": "browser",
|
||||
"device_model": "Firefox",
|
||||
"device_version": "firefox/143.0",
|
||||
"push_token": None,
|
||||
"app_version": "v4.0.16",
|
||||
}
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["login"],
|
||||
json=payload,
|
||||
headers={"Authorization": "anonymous", "Origin": "https://www.kocowa.com"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
res = r.json()
|
||||
if res.get("code") != "0000":
|
||||
raise PermissionError(f"Login failed: {res.get('message')}")
|
||||
|
||||
self.access_token = res["object"]["access_token"]
|
||||
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["middleware_auth"],
|
||||
json={"token": f"wA-Auth.{self.access_token}"},
|
||||
headers={"Origin": "https://www.kocowa.com"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
self.middleware_token = r.json()["token"]
|
||||
|
||||
self._fetch_brightcove_config()
|
||||
|
||||
def _fetch_brightcove_config(self):
|
||||
"""Fetch Brightcove account_id and policy_key from Kocowa's public config endpoint."""
|
||||
try:
|
||||
r = self.session.get(
|
||||
"https://middleware.bcmw.kocowa.com/api/config",
|
||||
headers={
|
||||
"Origin": "https://www.kocowa.com",
|
||||
"Referer": "https://www.kocowa.com/",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36 Edg/142.0.0.0"
|
||||
}
|
||||
)
|
||||
r.raise_for_status()
|
||||
config = r.json()
|
||||
|
||||
self.brightcove_account_id = config.get("VC_ACCOUNT_ID")
|
||||
self.brightcove_pk = config.get("BCOV_POLICY_KEY")
|
||||
|
||||
if not self.brightcove_account_id:
|
||||
raise ValueError("VC_ACCOUNT_ID missing in /api/config response")
|
||||
if not self.brightcove_pk:
|
||||
raise ValueError("BCOV_POLICY_KEY missing in /api/config response")
|
||||
|
||||
self.log.info(f"Brightcove config loaded: account_id={self.brightcove_account_id}")
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to fetch or parse Brightcove config: {e}")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
all_episodes = []
|
||||
offset = 0
|
||||
limit = 20
|
||||
series_title = None # Store the title from the first request
|
||||
|
||||
while True:
|
||||
url = self.config["endpoints"]["metadata"].format(title_id=self.title_id)
|
||||
sep = "&" if "?" in url else "?"
|
||||
url += f"{sep}offset={offset}&limit={limit}"
|
||||
|
||||
r = self.session.get(
|
||||
url,
|
||||
headers={"Authorization": self.access_token, "Origin": "https://www.kocowa.com"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
data = r.json()["object"]
|
||||
|
||||
# Extract the series title only from the very first page
|
||||
if series_title is None and "meta" in data:
|
||||
series_title = data["meta"]["title"]["en"]
|
||||
|
||||
page_objects = data.get("next_episodes", {}).get("objects", [])
|
||||
if not page_objects:
|
||||
break
|
||||
|
||||
for ep in page_objects:
|
||||
is_episode = ep.get("detail_type") == "episode"
|
||||
is_extra = ep.get("detail_type") in ("teaser", "extra")
|
||||
if is_episode or (self.include_extras and is_extra):
|
||||
all_episodes.append(ep)
|
||||
|
||||
offset += limit
|
||||
total = data.get("next_episodes", {}).get("total_count", 0)
|
||||
if len(all_episodes) >= total or len(page_objects) < limit:
|
||||
break
|
||||
|
||||
# If we never got the series title, exit with an error
|
||||
if series_title is None:
|
||||
raise ValueError("Could not retrieve series metadata to get the title.")
|
||||
|
||||
episodes = []
|
||||
for ep in all_episodes:
|
||||
meta = ep["meta"]
|
||||
ep_type = "Episode" if ep["detail_type"] == "episode" else ep["detail_type"].capitalize()
|
||||
ep_num = meta.get("episode_number", 0)
|
||||
title = meta["title"].get("en") or f"{ep_type} {ep_num}"
|
||||
desc = meta["description"].get("en") or ""
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=str(ep["id"]),
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=meta.get("season_number", 1),
|
||||
number=ep_num,
|
||||
name=title,
|
||||
description=desc,
|
||||
year=None,
|
||||
language=Language.get("en"),
|
||||
data=ep,
|
||||
)
|
||||
)
|
||||
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
# Authorize playback
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["authorize"].format(episode_id=title.id),
|
||||
headers={"Authorization": f"Bearer {self.middleware_token}"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
auth_data = r.json()
|
||||
if not auth_data.get("Success"):
|
||||
raise PermissionError("Playback authorization failed")
|
||||
self.playback_token = auth_data["token"]
|
||||
|
||||
# Fetch Brightcove manifest
|
||||
manifest_url = (
|
||||
f"https://edge.api.brightcove.com/playback/v1/accounts/{self.brightcove_account_id}/videos/ref:{title.id}"
|
||||
)
|
||||
r = self.session.get(
|
||||
manifest_url,
|
||||
headers={"Accept": f"application/json;pk={self.brightcove_pk}"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
manifest = r.json()
|
||||
|
||||
# Get DASH URL + Widevine license
|
||||
dash_url = widevine_url = None
|
||||
for src in manifest.get("sources", []):
|
||||
if src.get("type") == "application/dash+xml":
|
||||
dash_url = src["src"]
|
||||
widevine_url = (
|
||||
src.get("key_systems", {})
|
||||
.get("com.widevine.alpha", {})
|
||||
.get("license_url")
|
||||
)
|
||||
if dash_url and widevine_url:
|
||||
break
|
||||
|
||||
if not dash_url or not widevine_url:
|
||||
raise ValueError("No Widevine DASH stream found")
|
||||
|
||||
self.widevine_license_url = widevine_url
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
for sub in manifest.get("text_tracks", []):
|
||||
srclang = sub.get("srclang")
|
||||
if not srclang or srclang == "thumbnails":
|
||||
continue
|
||||
|
||||
subtitle_track = Subtitle(
|
||||
id_=sub["id"],
|
||||
url=sub["src"],
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get(srclang),
|
||||
sdh=True, # Kocowa subs are SDH - mark them as such
|
||||
forced=False,
|
||||
)
|
||||
tracks.add(subtitle_track)
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
r = self.session.post(
|
||||
self.widevine_license_url,
|
||||
data=challenge,
|
||||
headers={
|
||||
"BCOV-Auth": self.playback_token,
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Origin": "https://www.kocowa.com",
|
||||
"Referer": "https://www.kocowa.com/",
|
||||
}
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def search(self) -> List[SearchResult]:
|
||||
url = "https://prod-fms.kocowa.com/api/v01/fe/gks/autocomplete"
|
||||
params = {
|
||||
"search_category": "All",
|
||||
"search_input": self.title_id,
|
||||
"include_webtoon": "true",
|
||||
}
|
||||
|
||||
r = self.session.get(
|
||||
url,
|
||||
params=params,
|
||||
headers={
|
||||
"Authorization": self.access_token,
|
||||
"Origin": "https://www.kocowa.com ",
|
||||
"Referer": "https://www.kocowa.com/ ",
|
||||
}
|
||||
)
|
||||
r.raise_for_status()
|
||||
response = r.json()
|
||||
contents = response.get("object", {}).get("contents", [])
|
||||
|
||||
results = []
|
||||
for item in contents:
|
||||
if item.get("detail_type") != "season":
|
||||
continue
|
||||
|
||||
meta = item["meta"]
|
||||
title_en = meta["title"].get("en") or "[No Title]"
|
||||
description_en = meta["description"].get("en") or ""
|
||||
show_id = str(item["id"])
|
||||
|
||||
results.append(
|
||||
SearchResult(
|
||||
id_=show_id,
|
||||
title=title_en,
|
||||
description=description_en,
|
||||
label="season",
|
||||
url=f"https://www.kocowa.com/en_us/season/{show_id}/"
|
||||
)
|
||||
)
|
||||
return results
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list:
|
||||
return []
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
endpoints:
|
||||
login: "https://prod-sgwv3.kocowa.com/api/v01/user/signin"
|
||||
middleware_auth: "https://middleware.bcmw.kocowa.com/authenticate-user"
|
||||
metadata: "https://prod-fms.kocowa.com/api/v01/fe/content/get?id={title_id}"
|
||||
authorize: "https://middleware.bcmw.kocowa.com/api/playback/authorize/{episode_id}"
|
||||
396
MUBI/__init__.py
396
MUBI/__init__.py
@ -1,396 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Generator
|
||||
from langcodes import Language
|
||||
import base64
|
||||
import click
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Title_T, Titles_T, Series
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
|
||||
|
||||
class MUBI(Service):
|
||||
"""
|
||||
Service code for MUBI (mubi.com)
|
||||
Version: 1.2.0
|
||||
|
||||
Authorization: Required cookies (lt token + session)
|
||||
Security: FHD @ L3 (Widevine)
|
||||
|
||||
Supports:
|
||||
• Series ↦ https://mubi.com/en/nl/series/twin-peaks
|
||||
• Movies ↦ https://mubi.com/en/nl/films/the-substance
|
||||
|
||||
"""
|
||||
SERIES_TITLE_RE = r"^https?://(?:www\.)?mubi\.com(?:/[^/]+)*?/series/(?P<series_slug>[^/]+)(?:/season/(?P<season_slug>[^/]+))?$"
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?mubi\.com)(?:/[^/]+)*?/films/(?P<slug>[^/?#]+)$"
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="MUBI", short_help="https://mubi.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return MUBI(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
m_film = re.match(self.TITLE_RE, title)
|
||||
m_series = re.match(self.SERIES_TITLE_RE, title)
|
||||
|
||||
if not m_film and not m_series:
|
||||
raise ValueError(f"Invalid MUBI URL: {title}")
|
||||
|
||||
self.is_series = bool(m_series)
|
||||
self.slug = m_film.group("slug") if m_film else None
|
||||
self.series_slug = m_series.group("series_slug") if m_series else None
|
||||
self.season_slug = m_series.group("season_slug") if m_series else None
|
||||
|
||||
self.film_id: Optional[int] = None
|
||||
self.lt_token: Optional[str] = None
|
||||
self.session_token: Optional[str] = None
|
||||
self.user_id: Optional[int] = None
|
||||
self.country_code: Optional[str] = None
|
||||
self.anonymous_user_id: Optional[str] = None
|
||||
self.default_country: Optional[str] = None
|
||||
self.reels_data: Optional[list] = None
|
||||
|
||||
# Store CDM reference
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config for MUBI.")
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
try:
|
||||
r_ip = self.session.get(self.config["endpoints"]["ip_geolocation"], timeout=5)
|
||||
r_ip.raise_for_status()
|
||||
ip_data = r_ip.json()
|
||||
if ip_data.get("country"):
|
||||
self.default_country = ip_data["country"]
|
||||
self.log.debug(f"Detected country from IP: {self.default_country}")
|
||||
else:
|
||||
self.log.warning("IP geolocation response did not contain a country code.")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to fetch IP geolocation: {e}")
|
||||
|
||||
if not cookies:
|
||||
raise PermissionError("MUBI requires login cookies.")
|
||||
|
||||
# Extract essential tokens
|
||||
lt_cookie = next((c for c in cookies if c.name == "lt"), None)
|
||||
session_cookie = next((c for c in cookies if c.name == "_mubi_session"), None)
|
||||
snow_id_cookie = next((c for c in cookies if c.name == "_snow_id.c006"), None)
|
||||
|
||||
if not lt_cookie:
|
||||
raise PermissionError("Missing 'lt' cookie (Bearer token).")
|
||||
if not session_cookie:
|
||||
raise PermissionError("Missing '_mubi_session' cookie.")
|
||||
|
||||
self.lt_token = lt_cookie.value
|
||||
self.session_token = session_cookie.value
|
||||
|
||||
# Extract anonymous_user_id from _snow_id.c006
|
||||
if snow_id_cookie and "." in snow_id_cookie.value:
|
||||
self.anonymous_user_id = snow_id_cookie.value.split(".")[0]
|
||||
else:
|
||||
self.anonymous_user_id = str(uuid.uuid4())
|
||||
self.log.warning(f"No _snow_id.c006 cookie found — generated new anonymous_user_id: {self.anonymous_user_id}")
|
||||
|
||||
base_headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) Firefox/143.0",
|
||||
"Origin": "https://mubi.com",
|
||||
"Referer": "https://mubi.com/",
|
||||
"CLIENT": "web",
|
||||
"Client-Accept-Video-Codecs": "h265,vp9,h264",
|
||||
"Client-Accept-Audio-Codecs": "aac",
|
||||
"Authorization": f"Bearer {self.lt_token}",
|
||||
"ANONYMOUS_USER_ID": self.anonymous_user_id,
|
||||
"Client-Country": self.default_country,
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
|
||||
self.session.headers.update(base_headers)
|
||||
|
||||
r_account = self.session.get(self.config["endpoints"]["account"])
|
||||
if not r_account.ok:
|
||||
raise PermissionError(f"Failed to fetch MUBI account: {r_account.status_code} {r_account.text}")
|
||||
|
||||
account_data = r_account.json()
|
||||
self.user_id = account_data.get("id")
|
||||
self.country_code = (account_data.get("country") or {}).get("code", "NL")
|
||||
|
||||
self.session.headers["Client-Country"] = self.country_code
|
||||
self.GEOFENCE = (self.country_code,)
|
||||
|
||||
self._bind_anonymous_user()
|
||||
|
||||
self.log.info(
|
||||
f"Authenticated as user {self.user_id}, "
|
||||
f"country: {self.country_code}, "
|
||||
f"anonymous_id: {self.anonymous_user_id}"
|
||||
)
|
||||
|
||||
def _bind_anonymous_user(self):
|
||||
try:
|
||||
r = self.session.put(
|
||||
self.config["endpoints"]["current_user"],
|
||||
json={"anonymous_user_uuid": self.anonymous_user_id},
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
if r.ok:
|
||||
self.log.debug("Anonymous user ID successfully bound to account.")
|
||||
else:
|
||||
self.log.warning(f"Failed to bind anonymous_user_uuid: {r.status_code}")
|
||||
except Exception as e:
|
||||
self.log.warning(f"Exception while binding anonymous_user_uuid: {e}")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if self.is_series:
|
||||
return self._get_series_titles()
|
||||
else:
|
||||
return self._get_film_title()
|
||||
|
||||
def _get_film_title(self) -> Movies:
|
||||
url = self.config["endpoints"]["film_by_slug"].format(slug=self.slug)
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
self.film_id = data["id"]
|
||||
|
||||
# Fetch reels to get definitive language code and cache the response
|
||||
url_reels = self.config["endpoints"]["reels"].format(film_id=self.film_id)
|
||||
r_reels = self.session.get(url_reels)
|
||||
r_reels.raise_for_status()
|
||||
self.reels_data = r_reels.json()
|
||||
|
||||
# Extract original language from the first audio track of the first reel
|
||||
original_language_code = "en" # Default fallback
|
||||
if self.reels_data and self.reels_data[0].get("audio_tracks"):
|
||||
first_audio_track = self.reels_data[0]["audio_tracks"][0]
|
||||
if "language_code" in first_audio_track:
|
||||
original_language_code = first_audio_track["language_code"]
|
||||
self.log.debug(f"Detected original language from reels: '{original_language_code}'")
|
||||
|
||||
genres = ", ".join(data.get("genres", [])) or "Unknown"
|
||||
description = (
|
||||
data.get("default_editorial_html", "")
|
||||
.replace("<p>", "").replace("</p>", "").replace("<em>", "").replace("</em>", "").strip()
|
||||
)
|
||||
year = data.get("year")
|
||||
name = data.get("title", "Unknown")
|
||||
|
||||
movie = Movie(
|
||||
id_=self.film_id,
|
||||
service=self.__class__,
|
||||
name=name,
|
||||
year=year,
|
||||
description=description,
|
||||
language=Language.get(original_language_code),
|
||||
data=data,
|
||||
)
|
||||
|
||||
return Movies([movie])
|
||||
|
||||
def _get_series_titles(self) -> Titles_T:
|
||||
# Fetch series metadata
|
||||
series_url = self.config["endpoints"]["series"].format(series_slug=self.series_slug)
|
||||
r_series = self.session.get(series_url)
|
||||
r_series.raise_for_status()
|
||||
series_data = r_series.json()
|
||||
|
||||
episodes = []
|
||||
|
||||
# If season is explicitly specified, only fetch that season
|
||||
if self.season_slug:
|
||||
eps_url = self.config["endpoints"]["season_episodes"].format(
|
||||
series_slug=self.series_slug,
|
||||
season_slug=self.season_slug
|
||||
)
|
||||
r_eps = self.session.get(eps_url)
|
||||
if r_eps.status_code == 404:
|
||||
raise ValueError(f"Season '{self.season_slug}' not found.")
|
||||
r_eps.raise_for_status()
|
||||
episodes_data = r_eps.json().get("episodes", [])
|
||||
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
||||
else:
|
||||
# No season specified fetch ALL seasons
|
||||
seasons = series_data.get("seasons", [])
|
||||
if not seasons:
|
||||
raise ValueError("No seasons found for this series.")
|
||||
|
||||
for season in seasons:
|
||||
season_slug = season["slug"]
|
||||
eps_url = self.config["endpoints"]["season_episodes"].format(
|
||||
series_slug=self.series_slug,
|
||||
season_slug=season_slug
|
||||
)
|
||||
|
||||
self.log.debug(f"Fetching episodes for season: {season_slug}")
|
||||
|
||||
r_eps = self.session.get(eps_url)
|
||||
|
||||
# Stop if season returns 404 or empty
|
||||
if r_eps.status_code == 404:
|
||||
self.log.info(f"Season '{season_slug}' not available, skipping.")
|
||||
continue
|
||||
|
||||
r_eps.raise_for_status()
|
||||
episodes_data = r_eps.json().get("episodes", [])
|
||||
|
||||
if not episodes_data:
|
||||
self.log.info(f"No episodes found in season '{season_slug}'.")
|
||||
continue
|
||||
|
||||
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
||||
|
||||
from unshackle.core.titles import Series
|
||||
return Series(sorted(episodes, key=lambda x: (x.season, x.number)))
|
||||
|
||||
def _add_episodes_to_list(self, episodes_list: list, episodes_data: list, series_data: dict):
|
||||
"""Helper to avoid code duplication when adding episodes."""
|
||||
for ep in episodes_data:
|
||||
# Use episode's own language detection via its consumable.playback_languages
|
||||
playback_langs = ep.get("consumable", {}).get("playback_languages", {})
|
||||
audio_langs = playback_langs.get("audio_options", ["English"])
|
||||
lang_code = audio_langs[0].split()[0].lower() if audio_langs else "en"
|
||||
|
||||
try:
|
||||
detected_lang = Language.get(lang_code)
|
||||
except:
|
||||
detected_lang = Language.get("en")
|
||||
|
||||
episodes_list.append(Episode(
|
||||
id_=ep["id"],
|
||||
service=self.__class__,
|
||||
title=series_data["title"], # Series title
|
||||
season=ep["episode"]["season_number"],
|
||||
number=ep["episode"]["number"],
|
||||
name=ep["title"], # Episode title
|
||||
description=ep.get("short_synopsis", ""),
|
||||
language=detected_lang,
|
||||
data=ep, # Full episode data for later use in get_tracks
|
||||
))
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
film_id = getattr(title, "id", None)
|
||||
if not film_id:
|
||||
raise RuntimeError("Title ID not found.")
|
||||
|
||||
# For series episodes, we don't have reels cached, so skip reel-based logic
|
||||
url_view = self.config["endpoints"]["initiate_viewing"].format(film_id=film_id)
|
||||
r_view = self.session.post(url_view, json={}, headers={"Content-Type": "application/json"})
|
||||
r_view.raise_for_status()
|
||||
view_data = r_view.json()
|
||||
reel_id = view_data["reel_id"]
|
||||
|
||||
# For films, use reels data for language/audio mapping
|
||||
if not self.is_series:
|
||||
if not self.film_id:
|
||||
raise RuntimeError("film_id not set. Call get_titles() first.")
|
||||
|
||||
if not self.reels_data:
|
||||
self.log.warning("Reels data not cached, fetching now.")
|
||||
url_reels = self.config["endpoints"]["reels"].format(film_id=film_id)
|
||||
r_reels = self.session.get(url_reels)
|
||||
r_reels.raise_for_status()
|
||||
reels = r_reels.json()
|
||||
else:
|
||||
reels = self.reels_data
|
||||
|
||||
reel = next((r for r in reels if r["id"] == reel_id), reels[0])
|
||||
else:
|
||||
# For episodes, we don’t need reel-based logic — just proceed
|
||||
pass
|
||||
|
||||
# Request secure streaming URL, works for both films and episodes
|
||||
url_secure = self.config["endpoints"]["secure_url"].format(film_id=film_id)
|
||||
r_secure = self.session.get(url_secure)
|
||||
r_secure.raise_for_status()
|
||||
secure_data = r_secure.json()
|
||||
|
||||
manifest_url = None
|
||||
for entry in secure_data.get("urls", []):
|
||||
if entry.get("content_type") == "application/dash+xml":
|
||||
manifest_url = entry["src"]
|
||||
break
|
||||
|
||||
if not manifest_url:
|
||||
raise ValueError("No DASH manifest URL found.")
|
||||
|
||||
# Parse DASH, use title.language as fallback
|
||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Add subtitles
|
||||
subtitles = []
|
||||
for sub in secure_data.get("text_track_urls", []):
|
||||
lang_code = sub.get("language_code", "und")
|
||||
vtt_url = sub.get("url")
|
||||
if not vtt_url:
|
||||
continue
|
||||
|
||||
is_original = lang_code == title.language.language
|
||||
|
||||
subtitles.append(
|
||||
Subtitle(
|
||||
id_=sub["id"],
|
||||
url=vtt_url,
|
||||
language=Language.get(lang_code),
|
||||
is_original_lang=is_original,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
name=sub.get("display_name", lang_code.upper()),
|
||||
forced=False,
|
||||
sdh=False,
|
||||
)
|
||||
)
|
||||
tracks.subtitles = subtitles
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.user_id:
|
||||
raise RuntimeError("user_id not set — authenticate first.")
|
||||
|
||||
dt_custom_data = {
|
||||
"userId": self.user_id,
|
||||
"sessionId": self.lt_token,
|
||||
"merchant": "mubi"
|
||||
}
|
||||
|
||||
dt_custom_data_b64 = base64.b64encode(json.dumps(dt_custom_data).encode()).decode()
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Accept": "*/*",
|
||||
"Origin": "https://mubi.com",
|
||||
"Referer": "https://mubi.com/",
|
||||
"dt-custom-data": dt_custom_data_b64,
|
||||
}
|
||||
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["license"],
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
r.raise_for_status()
|
||||
license_data = r.json()
|
||||
if license_data.get("status") != "OK":
|
||||
raise PermissionError(f"DRM license error: {license_data}")
|
||||
return base64.b64decode(license_data["license"])
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
endpoints:
|
||||
account: "https://api.mubi.com/v4/account"
|
||||
current_user: "https://api.mubi.com/v4/current_user"
|
||||
film_by_slug: "https://api.mubi.com/v4/films/{slug}"
|
||||
playback_languages: "https://api.mubi.com/v4/films/{film_id}/playback_languages"
|
||||
initiate_viewing: "https://api.mubi.com/v4/films/{film_id}/viewing?parental_lock_enabled=true"
|
||||
reels: "https://api.mubi.com/v4/films/{film_id}/reels"
|
||||
secure_url: "https://api.mubi.com/v4/films/{film_id}/viewing/secure_url"
|
||||
license: "https://lic.drmtoday.com/license-proxy-widevine/cenc/"
|
||||
ip_geolocation: "https://directory.cookieyes.com/api/v1/ip"
|
||||
series: "https://api.mubi.com/v4/series/{series_slug}"
|
||||
season_episodes: "https://api.mubi.com/v4/series/{series_slug}/seasons/{season_slug}/episodes/available"
|
||||
149
PTHS/__init__.py
149
PTHS/__init__.py
@ -1,149 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
from http.cookiejar import CookieJar
|
||||
from langcodes import Language
|
||||
import click
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Movie, Movies, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Tracks
|
||||
|
||||
|
||||
class PTHS(Service):
|
||||
"""
|
||||
Service code for Pathé Thuis (pathe-thuis.nl)
|
||||
Version: 1.0.0
|
||||
|
||||
Security: SD @ L3 (Widevine)
|
||||
FHD @ L1
|
||||
Authorization: Cookies or authentication token
|
||||
|
||||
Supported:
|
||||
• Movies → https://www.pathe-thuis.nl/film/{id}
|
||||
|
||||
Note:
|
||||
Pathé Thuis does not have episodic content, only movies.
|
||||
"""
|
||||
|
||||
TITLE_RE = (
|
||||
r"^(?:https?://(?:www\.)?pathe-thuis\.nl/film/)?(?P<id>\d+)(?:/[^/]+)?$"
|
||||
)
|
||||
GEOFENCE = ("NL",)
|
||||
NO_SUBTITLES = True
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="PTHS", short_help="https://www.pathe-thuis.nl")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return PTHS(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
raise ValueError(
|
||||
f"Unsupported Pathé Thuis URL or ID: {title}\n"
|
||||
"Use e.g. https://www.pathe-thuis.nl/film/30591"
|
||||
)
|
||||
|
||||
self.movie_id = m.group("id")
|
||||
self.drm_token = None
|
||||
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config for Pathé Thuis.")
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
if not cookies:
|
||||
self.log.warning("No cookies provided, proceeding unauthenticated.")
|
||||
return
|
||||
|
||||
token = next((c.value for c in cookies if c.name == "authenticationToken"), None)
|
||||
if not token:
|
||||
self.log.info("No authenticationToken cookie found, unauthenticated mode.")
|
||||
return
|
||||
|
||||
self.session.headers.update({
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"X-Pathe-Device-Identifier": "web-widevine-1",
|
||||
"X-Pathe-Auth-Session-Token": token,
|
||||
})
|
||||
self.log.info("Authentication token successfully attached to session.")
|
||||
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
url = self.config["endpoints"]["metadata"].format(movie_id=self.movie_id)
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
movie = Movie(
|
||||
id_=str(data["id"]),
|
||||
service=self.__class__,
|
||||
name=data["name"],
|
||||
description=data.get("intro", ""),
|
||||
year=data.get("year"),
|
||||
language=Language.get(data.get("language", "en")),
|
||||
data=data,
|
||||
)
|
||||
return Movies([movie])
|
||||
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
ticket_id = self._get_ticket_id(title)
|
||||
url = self.config["endpoints"]["ticket"].format(ticket_id=ticket_id)
|
||||
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
stream = data["stream"]
|
||||
|
||||
manifest_url = stream.get("url") or stream.get("drmurl")
|
||||
if not manifest_url:
|
||||
raise ValueError("No stream manifest URL found.")
|
||||
|
||||
self.drm_token = stream["token"]
|
||||
self.license_url = stream["rawData"]["licenseserver"]
|
||||
|
||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
return tracks
|
||||
|
||||
|
||||
def _get_ticket_id(self, title: Title_T) -> str:
|
||||
"""Fetch the user's owned ticket ID if present."""
|
||||
data = title.data
|
||||
for t in (data.get("tickets") or []):
|
||||
if t.get("playable") and str(t.get("movieId")) == str(self.movie_id):
|
||||
return str(t["id"])
|
||||
raise ValueError("No valid ticket found for this movie. Ensure purchase or login.")
|
||||
|
||||
|
||||
def get_chapters(self, title: Title_T):
|
||||
return []
|
||||
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.license_url or not self.drm_token:
|
||||
raise ValueError("Missing license URL or token.")
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Authorization": f"Bearer {self.drm_token}",
|
||||
}
|
||||
|
||||
params = {"custom_data": self.drm_token}
|
||||
|
||||
r = self.session.post(self.license_url, params=params, data=challenge, headers=headers)
|
||||
r.raise_for_status()
|
||||
|
||||
if not r.content:
|
||||
raise ValueError("Empty license response, likely invalid or expired token.")
|
||||
return r.content
|
||||
@ -1,3 +0,0 @@
|
||||
endpoints:
|
||||
metadata: "https://www.pathe-thuis.nl/api/movies/{movie_id}?include=editions"
|
||||
ticket: "https://www.pathe-thuis.nl/api/tickets/{ticket_id}"
|
||||
41
README.md
41
README.md
@ -1,42 +1,17 @@
|
||||
# DISCLAMER, WHATEVER YOU DO WITH THIS SERVICE IS ALL YOUR RESPONSIBILITY!, IF YOU GET DMCA ITS YOUR FAULT NOT MINE, im just sharing knowledge for educational purposes, aka don't sue me npo.nl pls?
|
||||
|
||||
# These services is new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
||||
This service is relatively new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
||||
|
||||
### If you have personal questions or want to request a service, DM me at discord (jerukpurut)
|
||||
|
||||
|
||||
- Roadmap:
|
||||
|
||||
1. NPO:
|
||||
NPO
|
||||
- To add search functionality
|
||||
- More accurate metadata (the year of showing is not according the year of release)
|
||||
- Have a automatic CDM recognition option instead of the user puts it manually in the config for drmType
|
||||
2. KOWP:
|
||||
- Audio mislabel as English
|
||||
- To add Playready Support
|
||||
3. PTHS:
|
||||
- To add Playready Support (is needed since L3 is just 480p)
|
||||
- Search Functionality
|
||||
- Account login if possible
|
||||
4. HIDI:
|
||||
- Subtitle is a bit misplace if second sentences came up making the last sentence on the first order and vice versa (needs to be fixed)
|
||||
5. MUBI:
|
||||
- Search Functionality
|
||||
6. VIKI:
|
||||
- CSRF Token is now scraped, would be from a api requests soon
|
||||
7. VIDO:
|
||||
- Subtitle has little quirk of having javanese and sundanese language labeled on the HLS one but not the DASH one
|
||||
- Search functionality not available yet
|
||||
8. KNPY:
|
||||
- Need to fix the search function
|
||||
9. VRT:
|
||||
- Search functionality
|
||||
- Fixing few hickups
|
||||
10. SKST (the hardest service I ever dealt upon now):
|
||||
- Subtitle has been fixed, hopefully no issue
|
||||
11. VLD:
|
||||
- So far no issue
|
||||
|
||||
|
||||
KIJK
|
||||
- works
|
||||
|
||||
- Acknowledgment
|
||||
|
||||
Thanks to Adef for the NPO start downloader.
|
||||
Thanks to FairTrade for turing NPO into an unshackle service
|
||||
|
||||
|
||||
1044
SKST/__init__.py
1044
SKST/__init__.py
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
endpoints:
|
||||
signin: "https://rango.id.skyshowtime.com/signin/service/international"
|
||||
tokens: "https://ovp.skyshowtime.com/auth/tokens"
|
||||
personas: "https://web.clients.skyshowtime.com/bff/personas/v2"
|
||||
atom_node: "https://atom.skyshowtime.com/adapter-calypso/v3/query/node"
|
||||
atom_search: "https://atom.skyshowtime.com/adapter-calypso/v3/query/search"
|
||||
playback: "https://ovp.skyshowtime.com/video/playouts/vod"
|
||||
|
||||
params:
|
||||
provider: "SKYSHOWTIME"
|
||||
proposition: "SKYSHOWTIME"
|
||||
platform: "PC"
|
||||
device: "COMPUTER"
|
||||
client_version: "6.11.21-gsp"
|
||||
|
||||
signature:
|
||||
app_id: "SHOWMAX-ANDROID-v1"
|
||||
key: "kC2UFjsH6PHrc5ENGfyTgC5bPA7aBVZ4aJAyqBBP"
|
||||
version: "1.0"
|
||||
|
||||
territories:
|
||||
- NL
|
||||
- PL
|
||||
- ES
|
||||
- PT
|
||||
- SE
|
||||
- NO
|
||||
- DK
|
||||
- FI
|
||||
- CZ
|
||||
- SK
|
||||
- HU
|
||||
- RO
|
||||
- BG
|
||||
- HR
|
||||
- SI
|
||||
- BA
|
||||
- RS
|
||||
- ME
|
||||
- MK
|
||||
- AL
|
||||
- XK
|
||||
452
VIDO/__init__.py
452
VIDO/__init__.py
@ -1,452 +0,0 @@
|
||||
import re
|
||||
import uuid
|
||||
import xml.etree.ElementTree as ET
|
||||
from urllib.parse import urljoin
|
||||
from hashlib import md5
|
||||
from typing import Optional, Union
|
||||
from http.cookiejar import CookieJar
|
||||
from langcodes import Language
|
||||
|
||||
import click
|
||||
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import HLS, DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
class VIDO(Service):
|
||||
"""
|
||||
Vidio.com service, Series and Movies, login required.
|
||||
Version: 2.3.0
|
||||
|
||||
Supports URLs like:
|
||||
• https://www.vidio.com/premier/2978/giligilis (Series)
|
||||
• https://www.vidio.com/watch/7454613-marantau-short-movie (Movie)
|
||||
|
||||
Security: HD@L3 (Widevine DRM when available)
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^https?://(?:www\.)?vidio\.com/(?:premier|series|watch)/(?P<id>\d+)"
|
||||
GEOFENCE = ("ID",)
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="VIDO", short_help="https://vidio.com (login required)")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VIDO(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
match = re.match(self.TITLE_RE, title)
|
||||
if not match:
|
||||
raise ValueError(f"Unsupported or invalid Vidio URL: {title}")
|
||||
self.content_id = match.group("id")
|
||||
|
||||
self.is_movie = "watch" in title
|
||||
|
||||
# Static app identifiers from Android traffic
|
||||
self.API_AUTH = "laZOmogezono5ogekaso5oz4Mezimew1"
|
||||
self.USER_AGENT = "vidioandroid/7.14.6-e4d1de87f2 (3191683)"
|
||||
self.API_APP_INFO = "android/15/7.14.6-e4d1de87f2-3191683"
|
||||
self.VISITOR_ID = str(uuid.uuid4())
|
||||
|
||||
# Auth state
|
||||
self._email = None
|
||||
self._user_token = None
|
||||
self._access_token = None
|
||||
|
||||
# DRM state
|
||||
self.license_url = None
|
||||
self.custom_data = None
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise ValueError("Vidio requires email and password login.")
|
||||
|
||||
self._email = credential.username
|
||||
password = credential.password
|
||||
|
||||
cache_key = f"auth_tokens_{self._email}"
|
||||
cache = self.cache.get(cache_key)
|
||||
|
||||
# Check if valid tokens are already in the cache
|
||||
if cache and not cache.expired:
|
||||
self.log.info("Using cached authentication tokens")
|
||||
cached_data = cache.data
|
||||
self._user_token = cached_data.get("user_token")
|
||||
self._access_token = cached_data.get("access_token")
|
||||
if self._user_token and self._access_token:
|
||||
return
|
||||
|
||||
# If no valid cache, proceed with login
|
||||
self.log.info("Authenticating with username and password")
|
||||
headers = {
|
||||
"referer": "android-app://com.vidio.android",
|
||||
"x-api-platform": "app-android",
|
||||
"x-api-auth": self.API_AUTH,
|
||||
"user-agent": self.USER_AGENT,
|
||||
"x-api-app-info": self.API_APP_INFO,
|
||||
"accept-language": "en",
|
||||
"content-type": "application/x-www-form-urlencoded",
|
||||
"x-visitor-id": self.VISITOR_ID,
|
||||
}
|
||||
|
||||
data = f"login={self._email}&password={password}"
|
||||
r = self.session.post("https://api.vidio.com/api/login", headers=headers, data=data)
|
||||
r.raise_for_status()
|
||||
|
||||
auth_data = r.json()
|
||||
self._user_token = auth_data["auth"]["authentication_token"]
|
||||
self._access_token = auth_data["auth_tokens"]["access_token"]
|
||||
self.log.info(f"Authenticated as {self._email}")
|
||||
|
||||
try:
|
||||
expires_at_str = auth_data["auth_tokens"]["access_token_expires_at"]
|
||||
expires_at_dt = datetime.fromisoformat(expires_at_str)
|
||||
now_utc = datetime.now(timezone.utc)
|
||||
expiration_in_seconds = max(0, int((expires_at_dt - now_utc).total_seconds()))
|
||||
self.log.info(f"Token expires in {expiration_in_seconds / 60:.2f} minutes.")
|
||||
except (KeyError, ValueError) as e:
|
||||
self.log.warning(f"Could not parse token expiration: {e}. Defaulting to 1 hour.")
|
||||
expiration_in_seconds = 3600
|
||||
|
||||
cache.set({
|
||||
"user_token": self._user_token,
|
||||
"access_token": self._access_token
|
||||
}, expiration=expiration_in_seconds)
|
||||
|
||||
def _headers(self):
|
||||
if not self._user_token or not self._access_token:
|
||||
raise RuntimeError("Not authenticated. Call authenticate() first.")
|
||||
return {
|
||||
"referer": "android-app://com.vidio.android",
|
||||
"x-api-platform": "app-android",
|
||||
"x-api-auth": self.API_AUTH,
|
||||
"user-agent": self.USER_AGENT,
|
||||
"x-api-app-info": self.API_APP_INFO,
|
||||
"x-visitor-id": self.VISITOR_ID,
|
||||
"x-user-email": self._email,
|
||||
"x-user-token": self._user_token,
|
||||
"x-authorization": self._access_token,
|
||||
"accept-language": "en",
|
||||
"accept": "application/json",
|
||||
"accept-charset": "UTF-8",
|
||||
"content-type": "application/vnd.api+json",
|
||||
}
|
||||
|
||||
def _extract_subtitles_from_mpd(self, mpd_url: str) -> list[Subtitle]:
|
||||
"""
|
||||
Manually parse the MPD to extract subtitle tracks.
|
||||
Handles plain VTT format (for free content).
|
||||
"""
|
||||
subtitles = []
|
||||
|
||||
try:
|
||||
r = self.session.get(mpd_url)
|
||||
r.raise_for_status()
|
||||
mpd_content = r.text
|
||||
|
||||
# Get base URL for resolving relative paths
|
||||
base_url = mpd_url.rsplit('/', 1)[0] + '/'
|
||||
|
||||
# Remove namespace for easier parsing
|
||||
mpd_content_clean = re.sub(r'\sxmlns="[^"]+"', '', mpd_content)
|
||||
root = ET.fromstring(mpd_content_clean)
|
||||
|
||||
for adaptation_set in root.findall('.//AdaptationSet'):
|
||||
content_type = adaptation_set.get('contentType', '')
|
||||
|
||||
if content_type != 'text':
|
||||
continue
|
||||
|
||||
lang = adaptation_set.get('lang', 'und')
|
||||
|
||||
for rep in adaptation_set.findall('Representation'):
|
||||
mime_type = rep.get('mimeType', '')
|
||||
|
||||
# Handle plain VTT (free content)
|
||||
if mime_type == 'text/vtt':
|
||||
segment_list = rep.find('SegmentList')
|
||||
if segment_list is not None:
|
||||
for segment_url in segment_list.findall('SegmentURL'):
|
||||
media = segment_url.get('media')
|
||||
if media:
|
||||
full_url = urljoin(base_url, media)
|
||||
|
||||
# Determine if auto-generated
|
||||
is_auto = '-auto' in lang
|
||||
clean_lang = lang.replace('-auto', '')
|
||||
|
||||
subtitle = Subtitle(
|
||||
id_=md5(full_url.encode()).hexdigest()[0:16],
|
||||
url=full_url,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get(clean_lang),
|
||||
forced=False,
|
||||
sdh=False,
|
||||
)
|
||||
|
||||
subtitles.append(subtitle)
|
||||
self.log.debug(f"Found VTT subtitle: {lang} -> {full_url}")
|
||||
|
||||
except Exception as e:
|
||||
self.log.warning(f"Failed to extract subtitles from MPD: {e}")
|
||||
|
||||
return subtitles
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
headers = self._headers()
|
||||
|
||||
if self.is_movie:
|
||||
r = self.session.get(f"https://api.vidio.com/api/videos/{self.content_id}/detail", headers=headers)
|
||||
r.raise_for_status()
|
||||
video_data = r.json()["video"]
|
||||
year = None
|
||||
if video_data.get("publish_date"):
|
||||
try:
|
||||
year = int(video_data["publish_date"][:4])
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=video_data["id"],
|
||||
service=self.__class__,
|
||||
name=video_data["title"],
|
||||
description=video_data.get("description", ""),
|
||||
year=year,
|
||||
language=Language.get("id"),
|
||||
data=video_data,
|
||||
)
|
||||
])
|
||||
else:
|
||||
r = self.session.get(f"https://api.vidio.com/content_profiles/{self.content_id}", headers=headers)
|
||||
r.raise_for_status()
|
||||
root = r.json()["data"]
|
||||
series_title = root["attributes"]["title"]
|
||||
|
||||
r_playlists = self.session.get(
|
||||
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists",
|
||||
headers=headers
|
||||
)
|
||||
r_playlists.raise_for_status()
|
||||
playlists_data = r_playlists.json()
|
||||
|
||||
# Use metadata to identify season playlists
|
||||
season_playlist_ids = set()
|
||||
if "meta" in playlists_data and "playlist_group" in playlists_data["meta"]:
|
||||
for group in playlists_data["meta"]["playlist_group"]:
|
||||
if group.get("type") == "season":
|
||||
season_playlist_ids.update(group.get("playlist_ids", []))
|
||||
|
||||
season_playlists = []
|
||||
for pl in playlists_data["data"]:
|
||||
playlist_id = int(pl["id"])
|
||||
name = pl["attributes"]["name"].lower()
|
||||
|
||||
if season_playlist_ids:
|
||||
if playlist_id in season_playlist_ids:
|
||||
season_playlists.append(pl)
|
||||
else:
|
||||
if ("season" in name or name == "episode" or name == "episodes") and \
|
||||
"trailer" not in name and "extra" not in name:
|
||||
season_playlists.append(pl)
|
||||
|
||||
if not season_playlists:
|
||||
raise ValueError("No season playlists found for this series.")
|
||||
|
||||
def extract_season_number(pl):
|
||||
name = pl["attributes"]["name"]
|
||||
match = re.search(r"season\s*(\d+)", name, re.IGNORECASE)
|
||||
if match:
|
||||
return int(match.group(1))
|
||||
elif name.lower() in ["season", "episodes", "episode"]:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
season_playlists.sort(key=extract_season_number)
|
||||
|
||||
all_episodes = []
|
||||
|
||||
for playlist in season_playlists:
|
||||
playlist_id = playlist["id"]
|
||||
season_number = extract_season_number(playlist)
|
||||
|
||||
if season_number == 0:
|
||||
season_number = 1
|
||||
|
||||
self.log.debug(f"Processing playlist '{playlist['attributes']['name']}' as Season {season_number}")
|
||||
|
||||
page = 1
|
||||
while True:
|
||||
r_eps = self.session.get(
|
||||
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists/{playlist_id}/videos",
|
||||
params={
|
||||
"page[number]": page,
|
||||
"page[size]": 20,
|
||||
"sort": "order",
|
||||
"included": "upcoming_videos"
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
r_eps.raise_for_status()
|
||||
page_data = r_eps.json()
|
||||
|
||||
for raw_ep in page_data["data"]:
|
||||
attrs = raw_ep["attributes"]
|
||||
ep_number = len([e for e in all_episodes if e.season == season_number]) + 1
|
||||
all_episodes.append(
|
||||
Episode(
|
||||
id_=int(raw_ep["id"]),
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_number,
|
||||
number=ep_number,
|
||||
name=attrs["title"],
|
||||
description=attrs.get("description", ""),
|
||||
language=Language.get("id"),
|
||||
data=raw_ep,
|
||||
)
|
||||
)
|
||||
|
||||
if not page_data["links"].get("next"):
|
||||
break
|
||||
page += 1
|
||||
|
||||
if not all_episodes:
|
||||
raise ValueError("No episodes found in any season.")
|
||||
|
||||
return Series(all_episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
headers = self._headers()
|
||||
headers.update({
|
||||
"x-device-brand": "samsung",
|
||||
"x-device-model": "SM-A525F",
|
||||
"x-device-form-factor": "phone",
|
||||
"x-device-soc": "Qualcomm SM7125",
|
||||
"x-device-os": "Android 15 (API 35)",
|
||||
"x-device-android-mpc": "0",
|
||||
"x-device-cpu-arch": "arm64-v8a",
|
||||
"x-device-platform": "android",
|
||||
"x-app-version": "7.14.6-e4d1de87f2-3191683",
|
||||
})
|
||||
|
||||
video_id = str(title.id)
|
||||
url = f"https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
||||
|
||||
r = self.session.get(url, headers=headers)
|
||||
r.raise_for_status()
|
||||
stream = r.json()
|
||||
|
||||
if not isinstance(stream, dict):
|
||||
raise ValueError("Vidio returned invalid stream data.")
|
||||
|
||||
# Extract DRM info
|
||||
custom_data = stream.get("custom_data") or {}
|
||||
license_servers = stream.get("license_servers") or {}
|
||||
widevine_data = custom_data.get("widevine") if isinstance(custom_data, dict) else None
|
||||
license_url = license_servers.get("drm_license_url") if isinstance(license_servers, dict) else None
|
||||
|
||||
# Get stream URLs, check all possible HLS and DASH fields
|
||||
# HLS URLs (prefer in this order)
|
||||
hls_url = (
|
||||
stream.get("stream_hls_url") or
|
||||
stream.get("stream_token_hls_url") or
|
||||
stream.get("stream_token_url") # This is also HLS (m3u8)
|
||||
)
|
||||
|
||||
# DASH URLs
|
||||
dash_url = stream.get("stream_dash_url") or stream.get("stream_token_dash_url")
|
||||
|
||||
has_drm = widevine_data and license_url and dash_url and isinstance(widevine_data, str)
|
||||
|
||||
if has_drm:
|
||||
# DRM content: must use DASH
|
||||
self.log.info("Widevine DRM detected, using DASH")
|
||||
self.custom_data = widevine_data
|
||||
self.license_url = license_url
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
elif hls_url:
|
||||
# Non-DRM: prefer HLS (H.264, proper frame_rate metadata)
|
||||
self.log.info("No DRM detected, using HLS")
|
||||
self.custom_data = None
|
||||
self.license_url = None
|
||||
tracks = HLS.from_url(hls_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Clear HLS subtitles (they're segmented and incompatible)
|
||||
if tracks.subtitles:
|
||||
self.log.debug("Clearing HLS subtitles (incompatible format)")
|
||||
tracks.subtitles.clear()
|
||||
|
||||
# Get subtitles from DASH manifest (plain VTT) if available
|
||||
if dash_url:
|
||||
self.log.debug("Extracting subtitles from DASH manifest")
|
||||
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
||||
if manual_subs:
|
||||
for sub in manual_subs:
|
||||
tracks.add(sub)
|
||||
self.log.info(f"Added {len(manual_subs)} subtitle tracks from DASH")
|
||||
|
||||
elif dash_url:
|
||||
# Fallback to DASH only if no HLS available
|
||||
self.log.warning("No HLS available, using DASH (VP9 codec - may have issues)")
|
||||
self.custom_data = None
|
||||
self.license_url = None
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Try manual subtitle extraction for non-DRM DASH
|
||||
if not tracks.subtitles:
|
||||
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
||||
if manual_subs:
|
||||
for sub in manual_subs:
|
||||
tracks.add(sub)
|
||||
else:
|
||||
raise ValueError("No playable stream (DASH or HLS) available.")
|
||||
|
||||
self.log.info(f"Found {len(tracks.videos)} video tracks, {len(tracks.audio)} audio tracks, {len(tracks.subtitles)} subtitle tracks")
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def search(self):
|
||||
raise NotImplementedError("Search not implemented for Vidio.")
|
||||
|
||||
def get_widevine_service_certificate(self, **_) -> Union[bytes, str, None]:
|
||||
return None
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.license_url or not self.custom_data:
|
||||
raise ValueError("DRM license info missing.")
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Referer": "https://www.vidio.com/",
|
||||
"Origin": "https://www.vidio.com",
|
||||
"pallycon-customdata-v2": self.custom_data,
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
|
||||
self.log.debug(f"Requesting Widevine license from: {self.license_url}")
|
||||
response = self.session.post(
|
||||
self.license_url,
|
||||
data=challenge,
|
||||
headers=headers
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
error_summary = response.text[:200] if response.text else "No response body"
|
||||
raise Exception(f"License request failed ({response.status_code}): {error_summary}")
|
||||
|
||||
return response.content
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
endpoints:
|
||||
content_profile: "https://api.vidio.com/content_profiles/{content_id}"
|
||||
playlists: "https://api.vidio.com/content_profiles/{content_id}/playlists"
|
||||
playlist_videos: "https://api.vidio.com/content_profiles/{content_id}/playlists/{playlist_id}/videos"
|
||||
stream: "https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
||||
328
VIKI/__init__.py
328
VIKI/__init__.py
@ -1,328 +0,0 @@
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Generator
|
||||
|
||||
import click
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Movie, Movies, Series, Episode, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
from unshackle.core.drm import Widevine
|
||||
from langcodes import Language
|
||||
|
||||
|
||||
class VIKI(Service):
|
||||
"""
|
||||
Service code for Rakuten Viki (viki.com)
|
||||
Version: 1.4.0
|
||||
|
||||
Authorization: Required cookies (_viki_session, device_id).
|
||||
Security: FHD @ L3 (Widevine)
|
||||
|
||||
Supports:
|
||||
• Movies and TV Series
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?viki\.com)?/(?:movies|tv)/(?P<id>\d+c)-.+$"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="VIKI", short_help="https://viki.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VIKI(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
self.search_term = title
|
||||
self.title_url = None
|
||||
return
|
||||
|
||||
self.container_id = m.group("id")
|
||||
self.title_url = title
|
||||
self.video_id: Optional[str] = None
|
||||
self.api_access_key: Optional[str] = None
|
||||
self.drm_license_url: Optional[str] = None
|
||||
|
||||
self.cdm = ctx.obj.cdm
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config for VIKI.")
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
if not cookies:
|
||||
raise PermissionError("VIKI requires a cookie file for authentication.")
|
||||
|
||||
session_cookie = next((c for c in cookies if c.name == "_viki_session"), None)
|
||||
device_cookie = next((c for c in cookies if c.name == "device_id"), None)
|
||||
|
||||
if not session_cookie or not device_cookie:
|
||||
raise PermissionError("Your cookie file is missing '_viki_session' or 'device_id'.")
|
||||
|
||||
self.session.headers.update({
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"X-Viki-App-Ver": "14.64.0",
|
||||
"X-Viki-Device-ID": device_cookie.value,
|
||||
"Origin": "https://www.viki.com",
|
||||
"Referer": "https://www.viki.com/",
|
||||
})
|
||||
self.log.info("VIKI authentication cookies loaded successfully.")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if not self.title_url:
|
||||
raise ValueError("No URL provided to process.")
|
||||
|
||||
self.log.debug(f"Scraping page for API access key: {self.title_url}")
|
||||
r_page = self.session.get(self.title_url)
|
||||
r_page.raise_for_status()
|
||||
|
||||
match = re.search(r'"token":"([^"]+)"', r_page.text)
|
||||
if not match:
|
||||
raise RuntimeError("Failed to extract API access key from page source.")
|
||||
|
||||
self.api_access_key = match.group(1)
|
||||
self.log.debug(f"Extracted API access key: {self.api_access_key[:10]}...")
|
||||
|
||||
url = self.config["endpoints"]["container"].format(container_id=self.container_id)
|
||||
params = {
|
||||
"app": self.config["params"]["app"],
|
||||
"token": self.api_access_key,
|
||||
}
|
||||
r = self.session.get(url, params=params)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
content_type = data.get("type")
|
||||
if content_type == "film":
|
||||
return self._parse_movie(data)
|
||||
elif content_type == "series":
|
||||
return self._parse_series(data)
|
||||
else:
|
||||
self.log.error(f"Unknown content type '{content_type}' found.")
|
||||
return Movies([])
|
||||
|
||||
def _parse_movie(self, data: dict) -> Movies:
|
||||
name = data.get("titles", {}).get("en", "Unknown Title")
|
||||
year = int(data["created_at"][:4]) if "created_at" in data else None
|
||||
description = data.get("descriptions", {}).get("en", "")
|
||||
original_lang_code = data.get("origin", {}).get("language", "en")
|
||||
self.video_id = data.get("watch_now", {}).get("id")
|
||||
|
||||
if not self.video_id:
|
||||
raise ValueError(f"Could not find a playable video ID for container {self.container_id}.")
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=self.container_id,
|
||||
service=self.__class__,
|
||||
name=name,
|
||||
year=year,
|
||||
description=description,
|
||||
language=Language.get(original_lang_code),
|
||||
data=data,
|
||||
)
|
||||
])
|
||||
|
||||
def _parse_series(self, data: dict) -> Series:
|
||||
"""Parse series metadata and fetch episodes."""
|
||||
series_name = data.get("titles", {}).get("en", "Unknown Title")
|
||||
year = int(data["created_at"][:4]) if "created_at" in data else None
|
||||
description = data.get("descriptions", {}).get("en", "")
|
||||
original_lang_code = data.get("origin", {}).get("language", "en")
|
||||
|
||||
self.log.info(f"Parsing series: {series_name}")
|
||||
|
||||
# Fetch episode list IDs
|
||||
episodes_url = self.config["endpoints"]["episodes"].format(container_id=self.container_id)
|
||||
params = {
|
||||
"app": self.config["params"]["app"],
|
||||
"token": self.api_access_key,
|
||||
"direction": "asc",
|
||||
"with_upcoming": "true",
|
||||
"sort": "number",
|
||||
"blocked": "true",
|
||||
"only_ids": "true"
|
||||
}
|
||||
|
||||
r = self.session.get(episodes_url, params=params)
|
||||
r.raise_for_status()
|
||||
episodes_data = r.json()
|
||||
|
||||
episode_ids = episodes_data.get("response", [])
|
||||
self.log.info(f"Found {len(episode_ids)} episodes")
|
||||
|
||||
episodes = []
|
||||
for idx, ep_id in enumerate(episode_ids, 1):
|
||||
# Fetch individual episode metadata
|
||||
ep_url = self.config["endpoints"]["episode_meta"].format(video_id=ep_id)
|
||||
ep_params = {
|
||||
"app": self.config["params"]["app"],
|
||||
"token": self.api_access_key,
|
||||
}
|
||||
|
||||
try:
|
||||
r_ep = self.session.get(ep_url, params=ep_params)
|
||||
r_ep.raise_for_status()
|
||||
ep_data = r_ep.json()
|
||||
|
||||
ep_number = ep_data.get("number", idx)
|
||||
ep_title = ep_data.get("titles", {}).get("en", "")
|
||||
ep_description = ep_data.get("descriptions", {}).get("en", "")
|
||||
|
||||
# If no episode title, use generic name
|
||||
if not ep_title:
|
||||
ep_title = f"Episode {ep_number}"
|
||||
|
||||
# Store the video_id in the data dict
|
||||
ep_data["video_id"] = ep_id
|
||||
|
||||
self.log.debug(f"Episode {ep_number}: {ep_title} ({ep_id})")
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=ep_id,
|
||||
service=self.__class__,
|
||||
title=series_name, # Series title
|
||||
season=1, # VIKI typically doesn't separate seasons clearly
|
||||
number=ep_number,
|
||||
name=ep_title, # Episode title
|
||||
description=ep_description,
|
||||
language=Language.get(original_lang_code),
|
||||
data=ep_data
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
self.log.warning(f"Failed to fetch episode {ep_id}: {e}")
|
||||
# Create a basic episode entry even if metadata fetch fails
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=ep_id,
|
||||
service=self.__class__,
|
||||
title=series_name,
|
||||
season=1,
|
||||
number=idx,
|
||||
name=f"Episode {idx}",
|
||||
description="",
|
||||
language=Language.get(original_lang_code),
|
||||
data={"video_id": ep_id} # Store video_id in data
|
||||
)
|
||||
)
|
||||
|
||||
# Return Series with just the episodes list
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
# For episodes, get the video_id from the data dict
|
||||
if isinstance(title, Episode):
|
||||
self.video_id = title.data.get("video_id")
|
||||
if not self.video_id:
|
||||
# Fallback to episode id if video_id not in data
|
||||
self.video_id = title.data.get("id")
|
||||
elif not self.video_id:
|
||||
raise RuntimeError("video_id not set. Call get_titles() first.")
|
||||
|
||||
if not self.video_id:
|
||||
raise ValueError("Could not determine video_id for this title")
|
||||
|
||||
self.log.info(f"Getting tracks for video ID: {self.video_id}")
|
||||
|
||||
url = self.config["endpoints"]["playback"].format(video_id=self.video_id)
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
# Get the DRM-protected manifest from queue
|
||||
manifest_url = None
|
||||
for item in data.get("queue", []):
|
||||
if item.get("type") == "video" and item.get("format") == "mpd":
|
||||
manifest_url = item.get("url")
|
||||
break
|
||||
|
||||
if not manifest_url:
|
||||
raise ValueError("No DRM-protected manifest URL found in queue")
|
||||
|
||||
self.log.debug(f"Found DRM-protected manifest URL: {manifest_url}")
|
||||
|
||||
# Create headers for manifest download
|
||||
manifest_headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"X-Viki-App-Ver": "14.64.0",
|
||||
"X-Viki-Device-ID": self.session.headers.get("X-Viki-Device-ID", ""),
|
||||
"Origin": "https://www.viki.com",
|
||||
"Referer": "https://www.viki.com/",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
|
||||
# Parse tracks from the DRM-protected manifest
|
||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Subtitles
|
||||
title_language = title.language.language
|
||||
subtitles = []
|
||||
for sub in data.get("subtitles", []):
|
||||
sub_url = sub.get("src")
|
||||
lang_code = sub.get("srclang")
|
||||
if not sub_url or not lang_code:
|
||||
continue
|
||||
|
||||
subtitles.append(
|
||||
Subtitle(
|
||||
id_=lang_code,
|
||||
url=sub_url,
|
||||
language=Language.get(lang_code),
|
||||
is_original_lang=lang_code == title_language,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
name=sub.get("label", lang_code.upper()).split(" (")[0]
|
||||
)
|
||||
)
|
||||
tracks.subtitles = subtitles
|
||||
|
||||
# Store DRM license URL (only dt3) at service level
|
||||
drm_b64 = data.get("drm")
|
||||
if drm_b64:
|
||||
drm_data = json.loads(base64.b64decode(drm_b64))
|
||||
self.drm_license_url = drm_data.get("dt3") # Use dt3 as requested
|
||||
else:
|
||||
self.log.warning("No DRM info found, assuming unencrypted stream.")
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not hasattr(self, 'drm_license_url') or not self.drm_license_url:
|
||||
raise ValueError("DRM license URL not available.")
|
||||
|
||||
r = self.session.post(
|
||||
self.drm_license_url,
|
||||
data=challenge,
|
||||
headers={"Content-type": "application/octet-stream"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
self.log.warning("Search not yet implemented for VIKI.")
|
||||
return
|
||||
yield
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
@ -1,8 +0,0 @@
|
||||
params:
|
||||
app: "100000a"
|
||||
endpoints:
|
||||
container: "https://api.viki.io/v4/containers/{container_id}.json"
|
||||
episodes: "https://api.viki.io/v4/series/{container_id}/episodes.json" # New
|
||||
episode_meta: "https://api.viki.io/v4/videos/{video_id}.json" # New
|
||||
playback: "https://www.viki.com/api/videos/{video_id}"
|
||||
search: "https://api.viki.io/v4/search/all.json"
|
||||
465
VLD/__init__.py
465
VLD/__init__.py
@ -1,465 +0,0 @@
|
||||
import re
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Union
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks
|
||||
|
||||
|
||||
class VLD(Service):
|
||||
"""
|
||||
Service code for RTL's Dutch streaming service Videoland (https://v2.videoland.com)
|
||||
Version: 1.0.0
|
||||
|
||||
Authorization: Credentials
|
||||
|
||||
Security:
|
||||
- L1: >= 720p
|
||||
- L3: <= 576p
|
||||
|
||||
They are using the license server of DRMToday with encoded streams from CastLabs.
|
||||
It accepts Non-Whitelisted CDMs so every unrevoked L1 CDM should work.
|
||||
|
||||
Use full URL (for example - https://v2.videoland.com/title-p_12345) or title slug.
|
||||
"""
|
||||
|
||||
ALIASES = ("VLD", "videoland")
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?v2\.videoland\.com/)?(?P<title_id>[a-zA-Z0-9_-]+)"
|
||||
GEOFENCE = ("NL",)
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="Videoland", short_help="https://v2.videoland.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("-m", "--movie", is_flag=True, default=False, help="Specify if it's a movie")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VLD(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title, movie):
|
||||
super().__init__(ctx)
|
||||
|
||||
self.title = title
|
||||
self.movie = movie
|
||||
self.cdm = ctx.obj.cdm
|
||||
self.device_id = str(uuid.uuid1().int)
|
||||
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
|
||||
profile_name = ctx.parent.params.get("profile")
|
||||
self.profile = profile_name if profile_name else "default"
|
||||
|
||||
self.platform = self.config["platform"]["android_tv"]
|
||||
self.platform_token = "token-androidtv-3"
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise EnvironmentError("Service requires Credentials for Authentication.")
|
||||
|
||||
self.credential = credential # Store for potential re-auth
|
||||
|
||||
self.session.headers.update({
|
||||
"origin": "https://v2.videoland.com",
|
||||
"x-client-release": self.config["sdk"]["version"],
|
||||
"x-customer-name": "rtlnl",
|
||||
})
|
||||
|
||||
# Build cache key
|
||||
cache_key = f"tokens_{self.profile}"
|
||||
|
||||
# Check cache first
|
||||
cache = self.cache.get(cache_key)
|
||||
|
||||
if cache and not cache.expired:
|
||||
cached_data = cache.data
|
||||
if isinstance(cached_data, dict) and cached_data.get("username") == credential.username:
|
||||
self.log.info("Using cached tokens")
|
||||
self._restore_from_cache(cached_data)
|
||||
return
|
||||
|
||||
# Perform fresh login
|
||||
self.log.info("Retrieving new tokens")
|
||||
self._do_login(credential)
|
||||
|
||||
# Cache the tokens
|
||||
self._cache_tokens(credential.username, cache_key)
|
||||
|
||||
def _restore_from_cache(self, cached_data: dict) -> None:
|
||||
"""Restore authentication state from cached data."""
|
||||
self.access_token = cached_data["access_token"]
|
||||
self.gigya_uid = cached_data["gigya_uid"]
|
||||
self.profile_id = cached_data["profile_id"]
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.access_token}"})
|
||||
|
||||
def _cache_tokens(self, username: str, cache_key: str) -> None:
|
||||
"""Cache the current authentication tokens."""
|
||||
cache = self.cache.get(cache_key)
|
||||
cache.set(
|
||||
data={
|
||||
"username": username,
|
||||
"access_token": self.access_token,
|
||||
"gigya_uid": self.gigya_uid,
|
||||
"profile_id": self.profile_id,
|
||||
},
|
||||
expiration=3600 # 1 hour expiration, adjust as needed
|
||||
)
|
||||
|
||||
def _do_login(self, credential: Credential) -> None:
|
||||
"""Perform full login flow."""
|
||||
# Step 1: Authorize with Gigya
|
||||
auth_response = self.session.post(
|
||||
url=self.config["endpoints"]["authorization"],
|
||||
data={
|
||||
"loginID": credential.username,
|
||||
"password": credential.password,
|
||||
"sessionExpiration": "0",
|
||||
"targetEnv": "jssdk",
|
||||
"include": "profile,data",
|
||||
"includeUserInfo": "true",
|
||||
"lang": "nl",
|
||||
"ApiKey": self.config["sdk"]["apikey"],
|
||||
"authMode": "cookie",
|
||||
"pageURL": "https://v2.videoland.com/",
|
||||
"sdkBuild": self.config["sdk"]["build"],
|
||||
"format": "json",
|
||||
},
|
||||
).json()
|
||||
|
||||
if auth_response.get("errorMessage"):
|
||||
raise EnvironmentError(f"Could not authorize Videoland account: {auth_response['errorMessage']!r}")
|
||||
|
||||
self.gigya_uid = auth_response["UID"]
|
||||
uid_signature = auth_response["UIDSignature"]
|
||||
signature_timestamp = auth_response["signatureTimestamp"]
|
||||
|
||||
# Step 2: Get initial JWT token
|
||||
jwt_headers = {
|
||||
"x-auth-device-id": self.device_id,
|
||||
"x-auth-device-player-size-height": "3840",
|
||||
"x-auth-device-player-size-width": "2160",
|
||||
"X-Auth-gigya-signature": uid_signature,
|
||||
"X-Auth-gigya-signature-timestamp": signature_timestamp,
|
||||
"X-Auth-gigya-uid": self.gigya_uid,
|
||||
"X-Client-Release": self.config["sdk"]["version"],
|
||||
"X-Customer-Name": "rtlnl",
|
||||
}
|
||||
|
||||
jwt_response = self.session.get(
|
||||
url=self.config["endpoints"]["jwt_tokens"].format(platform=self.platform),
|
||||
headers=jwt_headers,
|
||||
).json()
|
||||
|
||||
if jwt_response.get("error"):
|
||||
raise EnvironmentError(f"Could not get Access Token: {jwt_response['error']['message']!r}")
|
||||
|
||||
initial_token = jwt_response["token"]
|
||||
|
||||
# Step 3: Get profiles
|
||||
profiles_response = self.session.get(
|
||||
url=self.config["endpoints"]["profiles"].format(
|
||||
platform=self.platform,
|
||||
gigya=self.gigya_uid,
|
||||
),
|
||||
headers={"Authorization": f"Bearer {initial_token}"},
|
||||
).json()
|
||||
|
||||
if isinstance(profiles_response, dict) and profiles_response.get("error"):
|
||||
raise EnvironmentError(f"Could not get profiles: {profiles_response['error']['message']!r}")
|
||||
|
||||
self.profile_id = profiles_response[0]["uid"]
|
||||
|
||||
# Step 4: Get final JWT token with profile
|
||||
jwt_headers["X-Auth-profile-id"] = self.profile_id
|
||||
|
||||
final_jwt_response = self.session.get(
|
||||
url=self.config["endpoints"]["jwt_tokens"].format(platform=self.platform),
|
||||
headers=jwt_headers,
|
||||
).json()
|
||||
|
||||
if final_jwt_response.get("error"):
|
||||
raise EnvironmentError(f"Could not get final Access Token: {final_jwt_response['error']['message']!r}")
|
||||
|
||||
self.access_token = final_jwt_response["token"]
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.access_token}"})
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
# Videoland doesn't have a documented search endpoint in the original code
|
||||
# This is a placeholder - you may need to implement based on actual API
|
||||
raise NotImplementedError("Search is not implemented for Videoland")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
title_match = re.match(self.TITLE_RE, self.title)
|
||||
if not title_match:
|
||||
raise ValueError(f"Invalid title format: {self.title}")
|
||||
|
||||
title_slug = title_match.group("title_id")
|
||||
|
||||
# Handle folder URLs (e.g., title-f_12345)
|
||||
if re.match(r".+?-f_[0-9]+", title_slug):
|
||||
title_slug = self._get_program_title(title_slug)
|
||||
|
||||
# Extract title ID from slug (e.g., "show-name-p_12345" -> "12345")
|
||||
title_id = title_slug.split("-p_")[-1] if "-p_" in title_slug else title_slug
|
||||
|
||||
metadata = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"program/{title_id}",
|
||||
),
|
||||
params={"nbPages": "10"},
|
||||
).json()
|
||||
|
||||
# Check for API errors
|
||||
if isinstance(metadata, dict) and metadata.get("error"):
|
||||
raise ValueError(f"API Error: {metadata.get('message', 'Unknown error')}")
|
||||
|
||||
# Determine if it's a movie based on metadata
|
||||
is_movie = "Seizoen" not in str(metadata)
|
||||
|
||||
if is_movie:
|
||||
movie_info = metadata["blocks"][0]["content"]["items"][0]
|
||||
viewable_id = movie_info["itemContent"]["action"]["target"]["value_layout"]["id"]
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=movie_info["ucid"],
|
||||
service=self.__class__,
|
||||
name=metadata["entity"]["metadata"]["title"],
|
||||
year=None,
|
||||
language=Language.get("nl"),
|
||||
data={
|
||||
"viewable": viewable_id,
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
])
|
||||
else:
|
||||
seasons = [
|
||||
block
|
||||
for block in metadata["blocks"]
|
||||
if block["featureId"] == "videos_by_season_by_program"
|
||||
]
|
||||
|
||||
# Fetch all episodes from all seasons with pagination
|
||||
for season in seasons:
|
||||
while len(season["content"]["items"]) != season["content"]["pagination"]["totalItems"]:
|
||||
season_data = self.session.get(
|
||||
url=self.config["endpoints"]["seasoning"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
program=title_id,
|
||||
season_id=season["id"],
|
||||
),
|
||||
params={
|
||||
"nbPages": "10",
|
||||
"page": season["content"]["pagination"]["nextPage"],
|
||||
},
|
||||
).json()
|
||||
|
||||
for episode in season_data["content"]["items"]:
|
||||
if episode not in season["content"]["items"]:
|
||||
season["content"]["items"].append(episode)
|
||||
|
||||
season["content"]["pagination"]["nextPage"] = season_data["content"]["pagination"]["nextPage"]
|
||||
|
||||
episodes = []
|
||||
for season in seasons:
|
||||
# Extract season number from title like "Seizoen 1" or "Season 1"
|
||||
season_title = season.get("title", {}).get("long", "")
|
||||
season_match = re.search(r"(\d+)", season_title)
|
||||
season_number = int(season_match.group(1)) if season_match else 1
|
||||
|
||||
for idx, episode_data in enumerate(season["content"]["items"]):
|
||||
# Get the extra title which contains episode info
|
||||
extra_title = episode_data["itemContent"].get("extraTitle", "")
|
||||
|
||||
# Extract episode number from extraTitle like "1. Hondenadoptiedag" or "14. Een Draak Op School (Deel 1)"
|
||||
episode_number = None
|
||||
episode_name = extra_title
|
||||
|
||||
ep_match = re.match(r"^(\d+)\.\s*(.*)$", extra_title)
|
||||
if ep_match:
|
||||
episode_number = int(ep_match.group(1))
|
||||
episode_name = ep_match.group(2)
|
||||
else:
|
||||
# Fallback to index + 1
|
||||
episode_number = idx + 1
|
||||
|
||||
viewable_id = episode_data["itemContent"]["action"]["target"]["value_layout"]["id"]
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=episode_data["ucid"],
|
||||
service=self.__class__,
|
||||
title=metadata["entity"]["metadata"]["title"],
|
||||
season=season_number,
|
||||
number=episode_number,
|
||||
name=episode_name,
|
||||
year=None,
|
||||
language=Language.get("nl"),
|
||||
data={
|
||||
"viewable": viewable_id,
|
||||
"episode_data": episode_data,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# Sort episodes by season and episode number
|
||||
episodes = sorted(episodes, key=lambda ep: (ep.season, ep.number))
|
||||
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
viewable_id = title.data["viewable"]
|
||||
|
||||
manifest_response = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"video/{viewable_id}",
|
||||
),
|
||||
params={"nbPages": "2"},
|
||||
).json()
|
||||
|
||||
player_block = next(
|
||||
(block for block in manifest_response["blocks"] if block["templateId"] == "Player"),
|
||||
None,
|
||||
)
|
||||
|
||||
if not player_block:
|
||||
raise ValueError("Could not find player block in manifest")
|
||||
|
||||
assets = player_block["content"]["items"][0]["itemContent"]["video"]["assets"]
|
||||
|
||||
if not assets:
|
||||
raise ValueError("Failed to load content manifest - no assets found")
|
||||
|
||||
# Prefer HD quality
|
||||
mpd_asset = next((asset for asset in assets if asset["quality"] == "hd"), None)
|
||||
if not mpd_asset:
|
||||
mpd_asset = next((asset for asset in assets if asset["quality"] == "sd"), None)
|
||||
|
||||
if not mpd_asset:
|
||||
raise ValueError("No suitable quality stream found")
|
||||
|
||||
mpd_url = mpd_asset["path"]
|
||||
|
||||
# Extract PlayReady PSSH from manifest
|
||||
manifest_content = self.session.get(mpd_url).text
|
||||
pssh_matches = re.findall(r'<cenc:pssh>(.+?)</cenc:pssh>', manifest_content)
|
||||
|
||||
self.pssh_playready = None
|
||||
for pssh in pssh_matches:
|
||||
if len(pssh) > 200:
|
||||
self.pssh_playready = pssh
|
||||
break
|
||||
|
||||
# Store viewable ID for license request
|
||||
self.current_viewable = viewable_id
|
||||
|
||||
tracks = DASH.from_url(url=mpd_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Fix track URLs - replace CDN hostname
|
||||
for track in tracks:
|
||||
if hasattr(track, 'url') and track.url:
|
||||
if isinstance(track.url, list):
|
||||
track.url = [
|
||||
re.sub(
|
||||
r"https://.+?\.videoland\.bedrock\.tech",
|
||||
"https://origin.vod.videoland.bedrock.tech",
|
||||
uri.split("?")[0],
|
||||
)
|
||||
for uri in track.url
|
||||
]
|
||||
elif isinstance(track.url, str):
|
||||
track.url = re.sub(
|
||||
r"https://.+?\.videoland\.bedrock\.tech",
|
||||
"https://origin.vod.videoland.bedrock.tech",
|
||||
track.url.split("?")[0],
|
||||
)
|
||||
|
||||
# Handle subtitles
|
||||
for subtitle in tracks.subtitles:
|
||||
if isinstance(subtitle.url, list) or (isinstance(subtitle.url, str) and "dash" in subtitle.url):
|
||||
subtitle.codec = Subtitle.Codec.SubRip
|
||||
else:
|
||||
self.log.warning("Unknown subtitle codec detected")
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_service_certificate(self, **_) -> Optional[str]:
|
||||
return self.config.get("certificate")
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
license_token = self._get_license_token(title)
|
||||
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["license_wv"],
|
||||
data=challenge,
|
||||
headers={"x-dt-auth-token": license_token},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Failed to get Widevine license: {response.status_code}")
|
||||
|
||||
return response.json().get("license")
|
||||
|
||||
def get_playready_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[bytes]:
|
||||
license_token = self._get_license_token(title)
|
||||
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["license_pr"],
|
||||
data=challenge,
|
||||
headers={"x-dt-auth-token": license_token},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Failed to get PlayReady license: {response.status_code}")
|
||||
|
||||
return response.content
|
||||
|
||||
def _get_license_token(self, title: Title_T) -> str:
|
||||
viewable_id = title.data["viewable"]
|
||||
|
||||
response = self.session.get(
|
||||
url=self.config["endpoints"]["license_token"].format(
|
||||
platform=self.platform,
|
||||
gigya=self.gigya_uid,
|
||||
clip=viewable_id,
|
||||
),
|
||||
).json()
|
||||
|
||||
return response["token"]
|
||||
|
||||
def _get_program_title(self, folder_title: str) -> str:
|
||||
folder_id = folder_title.split("-f_")[1]
|
||||
|
||||
response = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"folder/{folder_id}",
|
||||
),
|
||||
params={"nbPages": "2"},
|
||||
).json()
|
||||
|
||||
target = response["blocks"][0]["content"]["items"][0]["itemContent"]["action"]["target"]["value_layout"]
|
||||
parent_seo = target["parent"]["seo"]
|
||||
parent_id = target["parent"]["id"]
|
||||
|
||||
return f"{parent_seo}-p_{parent_id}"
|
||||
@ -1,29 +0,0 @@
|
||||
certificate: |
|
||||
CsECCAMSEBcFuRfMEgSGiwYzOi93KowYgrSCkgUijgIwggEKAoIBAQCZ7Vs7Mn2rXiTvw7YqlbWYUgrVvMs3UD4GRbgU2Ha430BRBEGtjOOtsRu4jE5yWl5
|
||||
KngeVKR1YWEAjp+GvDjipEnk5MAhhC28VjIeMfiG/+/7qd+EBnh5XgeikX0YmPRTmDoBYqGB63OBPrIRXsTeo1nzN6zNwXZg6IftO7L1KEMpHSQykfqpdQ4
|
||||
IY3brxyt4zkvE9b/tkQv0x4b9AsMYE0cS6TJUgpL+X7r1gkpr87vVbuvVk4tDnbNfFXHOggrmWEguDWe3OJHBwgmgNb2fG2CxKxfMTRJCnTuw3r0svAQxZ6
|
||||
ChD4lgvC2ufXbD8Xm7fZPvTCLRxG88SUAGcn1oJAgMBAAE6FGxpY2Vuc2Uud2lkZXZpbmUuY29tEoADrjRzFLWoNSl/JxOI+3u4y1J30kmCPN3R2jC5MzlR
|
||||
HrPMveoEuUS5J8EhNG79verJ1BORfm7BdqEEOEYKUDvBlSubpOTOD8S/wgqYCKqvS/zRnB3PzfV0zKwo0bQQQWz53ogEMBy9szTK/NDUCXhCOmQuVGE98K/
|
||||
PlspKkknYVeQrOnA+8XZ/apvTbWv4K+drvwy6T95Z0qvMdv62Qke4XEMfvKUiZrYZ/DaXlUP8qcu9u/r6DhpV51Wjx7zmVflkb1gquc9wqgi5efhn9joLK3
|
||||
/bNixbxOzVVdhbyqnFk8ODyFfUnaq3fkC3hR3f0kmYgI41sljnXXjqwMoW9wRzBMINk+3k6P8cbxfmJD4/Paj8FwmHDsRfuoI6Jj8M76H3CTsZCZKDJjM3B
|
||||
QQ6Kb2m+bQ0LMjfVDyxoRgvfF//M/EEkPrKWyU2C3YBXpxaBquO4C8A0ujVmGEEqsxN1HX9lu6c5OMm8huDxwWFd7OHMs3avGpr7RP7DUnTikXrh6X0
|
||||
|
||||
endpoints:
|
||||
layout: https://layout.videoland.bedrock.tech/front/v1/rtlnl/{platform}/main/{token}/{endpoint}/layout
|
||||
seasoning: https://layout.videoland.bedrock.tech/front/v1/rtlnl/{platform}/main/{token}/program/{program}/block/{season_id}
|
||||
license_pr: https://lic.drmtoday.com/license-proxy-headerauth/drmtoday/RightsManager.asmx
|
||||
license_wv: https://lic.drmtoday.com/license-proxy-widevine/cenc/
|
||||
license_token: https://drm.videoland.bedrock.tech/v1/customers/rtlnl/platforms/{platform}/services/videoland/users/{gigya}/videos/{clip}/upfront-token
|
||||
authorization: https://accounts.eu1.gigya.com/accounts.login
|
||||
jwt_tokens: https://front-auth.videoland.bedrock.tech/v2/platforms/{platform}/getJwt
|
||||
profiles: https://users.videoland.bedrock.tech/v2/platforms/{platform}/users/{gigya}/profiles
|
||||
|
||||
platform:
|
||||
web: m6group_web
|
||||
android_mob: m6group_android_mob
|
||||
android_tv: m6group_android_tv
|
||||
|
||||
sdk:
|
||||
apikey: 3_W6BPwMz2FGQEfH4_nVRaj4Ak1F1XDp33an_8y8nXULn8nk43FHvPIpb0TLOYIaUI
|
||||
build: "13414"
|
||||
version: 5.47.2
|
||||
264
VRT/__init__.py
264
VRT/__init__.py
@ -1,264 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import base64
|
||||
import warnings # Added
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, List
|
||||
from langcodes import Language
|
||||
|
||||
import click
|
||||
import jwt
|
||||
from bs4 import XMLParsedAsHTMLWarning # Added
|
||||
from collections.abc import Generator
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
|
||||
# Ignore the BeautifulSoup XML warning caused by STPP subtitles
|
||||
warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
|
||||
|
||||
# GraphQL Fragments and Queries
|
||||
FRAGMENTS = """
|
||||
fragment tileFragment on Tile {
|
||||
... on ITile {
|
||||
title
|
||||
action { ... on LinkAction { link } }
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
QUERY_PROGRAM = """
|
||||
query VideoProgramPage($pageId: ID!) {
|
||||
page(id: $pageId) {
|
||||
... on ProgramPage {
|
||||
title
|
||||
components {
|
||||
__typename
|
||||
... on PaginatedTileList { listId title }
|
||||
... on StaticTileList { listId title }
|
||||
... on ContainerNavigation {
|
||||
items {
|
||||
title
|
||||
components {
|
||||
__typename
|
||||
... on PaginatedTileList { listId }
|
||||
... on StaticTileList { listId }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
QUERY_PAGINATED_LIST = FRAGMENTS + """
|
||||
query PaginatedTileListPage($listId: ID!, $after: ID) {
|
||||
list(listId: $listId) {
|
||||
... on PaginatedTileList {
|
||||
paginatedItems(first: 50, after: $after) {
|
||||
edges { node { ...tileFragment } }
|
||||
pageInfo { endCursor hasNextPage }
|
||||
}
|
||||
}
|
||||
... on StaticTileList {
|
||||
items { ...tileFragment }
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
QUERY_PLAYBACK = """
|
||||
query EpisodePage($pageId: ID!) {
|
||||
page(id: $pageId) {
|
||||
... on PlaybackPage {
|
||||
title
|
||||
player { modes { streamId } }
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
class VRT(Service):
|
||||
"""
|
||||
Service code for VRT MAX (vrt.be)
|
||||
Version: 2.1.1
|
||||
Auth: Gigya + OIDC flow
|
||||
Security: FHD @ L3 (Widevine)
|
||||
Supports:
|
||||
- Movies: https://www.vrt.be/vrtmax/a-z/rikkie-de-ooievaar-2/
|
||||
Series: https://www.vrt.be/vrtmax/a-z/schaar-steen-papier/
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?vrt\.be/vrtmax/a-z/)?(?P<slug>[^/]+)(?:/(?P<season_num>\d+)/(?P<episode_slug>[^/]+))?/?$"
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="VRT", short_help="https://www.vrt.be/vrtmax/")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VRT(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if m:
|
||||
self.slug = m.group("slug")
|
||||
self.is_series_root = m.group("episode_slug") is None
|
||||
if "vrtmax/a-z" in title:
|
||||
self.page_id = "/" + title.split("vrt.be/")[1].split("?")[0]
|
||||
else:
|
||||
self.page_id = f"/vrtmax/a-z/{self.slug}/"
|
||||
else:
|
||||
self.search_term = title
|
||||
|
||||
self.access_token = None
|
||||
self.video_token = None
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
cache = self.cache.get("auth_data")
|
||||
if cache and not cache.expired:
|
||||
self.log.info("Using cached VRT session.")
|
||||
self.access_token = cache.data["access_token"]
|
||||
self.video_token = cache.data["video_token"]
|
||||
return
|
||||
|
||||
if not credential or not credential.username or not credential.password: return
|
||||
|
||||
self.log.info(f"Logging in to VRT as {credential.username}...")
|
||||
login_params = {
|
||||
"apiKey": self.config["settings"]["api_key"],
|
||||
"loginID": credential.username,
|
||||
"password": credential.password,
|
||||
"format": "json",
|
||||
"sdk": "Android_6.1.0"
|
||||
}
|
||||
r = self.session.post(self.config["endpoints"]["gigya_login"], data=login_params)
|
||||
gigya_data = r.json()
|
||||
if gigya_data.get("errorCode") != 0: raise PermissionError("Gigya login failed")
|
||||
|
||||
sso_params = {"UID": gigya_data["UID"], "UIDSignature": gigya_data["UIDSignature"], "signatureTimestamp": gigya_data["signatureTimestamp"]}
|
||||
r = self.session.get(self.config["endpoints"]["vrt_sso"], params=sso_params)
|
||||
|
||||
match = re.search(r'var response = "(.*?)";', r.text)
|
||||
token_data = json.loads(match.group(1).replace('\\"', '"'))
|
||||
self.access_token = token_data["tokens"]["access_token"]
|
||||
self.video_token = token_data["tokens"]["video_token"]
|
||||
|
||||
decoded = jwt.decode(self.access_token, options={"verify_signature": False})
|
||||
cache.set(data={"access_token": self.access_token, "video_token": self.video_token}, expiration=int(decoded["exp"] - time.time()) - 300)
|
||||
|
||||
def _get_gql_headers(self):
|
||||
return {
|
||||
"x-vrt-client-name": self.config["settings"]["client_name"],
|
||||
"x-vrt-client-version": self.config["settings"]["client_version"],
|
||||
"x-vrt-zone": "default",
|
||||
"authorization": f"Bearer {self.access_token}" if self.access_token else None,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if not self.is_series_root:
|
||||
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
||||
data = r.json()["data"]["page"]
|
||||
return Movies([Movie(id_=data["player"]["modes"][0]["streamId"], service=self.__class__, name=data["title"], language=Language.get("nl"), data={"page_id": self.page_id})])
|
||||
|
||||
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PROGRAM, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
||||
program_data = r.json().get("data", {}).get("page")
|
||||
if not program_data:
|
||||
raise ValueError(f"Series page not found: {self.page_id}")
|
||||
|
||||
series_name = program_data["title"]
|
||||
episodes = []
|
||||
list_ids = []
|
||||
|
||||
for comp in program_data.get("components", []):
|
||||
typename = comp.get("__typename")
|
||||
if typename in ("PaginatedTileList", "StaticTileList") and "listId" in comp:
|
||||
list_ids.append((comp.get("title") or "Episodes", comp["listId"]))
|
||||
elif typename == "ContainerNavigation":
|
||||
for item in comp.get("items", []):
|
||||
item_title = item.get("title", "Episodes")
|
||||
for sub in item.get("components", []):
|
||||
if "listId" in sub:
|
||||
list_ids.append((item_title, sub["listId"]))
|
||||
|
||||
seen_lists = set()
|
||||
unique_list_ids = []
|
||||
for title, lid in list_ids:
|
||||
if lid not in seen_lists:
|
||||
unique_list_ids.append((title, lid))
|
||||
seen_lists.add(lid)
|
||||
|
||||
for season_title, list_id in unique_list_ids:
|
||||
after = None
|
||||
while True:
|
||||
r_list = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PAGINATED_LIST, "variables": {"listId": list_id, "after": after}}, headers=self._get_gql_headers())
|
||||
list_resp = r_list.json().get("data", {}).get("list")
|
||||
if not list_resp: break
|
||||
|
||||
items_container = list_resp.get("paginatedItems")
|
||||
nodes = [e["node"] for e in items_container["edges"]] if items_container else list_resp.get("items", [])
|
||||
|
||||
for node in nodes:
|
||||
if not node.get("action"): continue
|
||||
link = node["action"]["link"]
|
||||
s_match = re.search(r'/(\d+)/.+s(\d+)a(\d+)', link)
|
||||
episodes.append(Episode(
|
||||
id_=link,
|
||||
service=self.__class__,
|
||||
title=series_name,
|
||||
season=int(s_match.group(2)) if s_match else 1,
|
||||
number=int(s_match.group(3)) if s_match else 0,
|
||||
name=node["title"],
|
||||
language=Language.get("nl"),
|
||||
data={"page_id": link}
|
||||
))
|
||||
|
||||
if items_container and items_container["pageInfo"]["hasNextPage"]:
|
||||
after = items_container["pageInfo"]["endCursor"]
|
||||
else:
|
||||
break
|
||||
|
||||
if not episodes:
|
||||
raise ValueError("No episodes found for this series.")
|
||||
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
page_id = title.data["page_id"]
|
||||
r_meta = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": page_id}}, headers=self._get_gql_headers())
|
||||
stream_id = r_meta.json()["data"]["page"]["player"]["modes"][0]["streamId"]
|
||||
|
||||
p_info = base64.urlsafe_b64encode(json.dumps(self.config["player_info"]).encode()).decode().replace("=", "")
|
||||
r_tok = self.session.post(self.config["endpoints"]["player_token"], json={"identityToken": self.video_token, "playerInfo": f"eyJhbGciOiJIUzI1NiJ9.{p_info}."})
|
||||
vrt_player_token = r_tok.json()["vrtPlayerToken"]
|
||||
|
||||
r_agg = self.session.get(self.config["endpoints"]["aggregator"].format(stream_id=stream_id), params={"client": self.config["settings"]["client_id"], "vrtPlayerToken": vrt_player_token})
|
||||
agg_data = r_agg.json()
|
||||
|
||||
dash_url = next(u["url"] for u in agg_data["targetUrls"] if u["type"] == "mpeg_dash")
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
self.drm_token = agg_data["drm"]
|
||||
|
||||
for sub in agg_data.get("subtitleUrls", []):
|
||||
tracks.add(Subtitle(id_=sub.get("label", "nl"), url=sub["url"], codec=Subtitle.Codec.WebVTT, language=Language.get(sub.get("language", "nl"))))
|
||||
|
||||
for tr in tracks.videos + tracks.audio:
|
||||
if tr.drm: tr.drm.license = lambda challenge, **kw: self.get_widevine_license(challenge, title, tr)
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
r = self.session.post(self.config["endpoints"]["license"], data=challenge, headers={"x-vudrm-token": self.drm_token, "Origin": "https://www.vrt.be", "Referer": "https://www.vrt.be/"})
|
||||
return r.content
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
@ -1,18 +0,0 @@
|
||||
endpoints:
|
||||
gigya_login: "https://accounts.eu1.gigya.com/accounts.login"
|
||||
vrt_sso: "https://www.vrt.be/vrtmax/sso/login"
|
||||
graphql: "https://www.vrt.be/vrtnu-api/graphql/v1"
|
||||
player_token: "https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v2/tokens"
|
||||
aggregator: "https://media-services-public.vrt.be/media-aggregator/v2/media-items/{stream_id}"
|
||||
license: "https://widevine-proxy.drm.technology/proxy"
|
||||
|
||||
settings:
|
||||
api_key: "3_qhEcPa5JGFROVwu5SWKqJ4mVOIkwlFNMSKwzPDAh8QZOtHqu6L4nD5Q7lk0eXOOG"
|
||||
client_name: "WEB"
|
||||
client_id: "vrtnu-web@PROD"
|
||||
client_version: "1.5.15"
|
||||
|
||||
player_info:
|
||||
drm: { widevine: "L3" }
|
||||
platform: "desktop"
|
||||
app: { type: "browser", name: "Firefox", version: "146.0" }
|
||||
Loading…
x
Reference in New Issue
Block a user