Compare commits
No commits in common. "main" and "main" have entirely different histories.
566
CPY/__init__.py
566
CPY/__init__.py
@ -1,566 +0,0 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from collections.abc import Generator
|
||||
from typing import Optional, Union
|
||||
|
||||
import click
|
||||
import requests as req_lib
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Video
|
||||
|
||||
|
||||
class CPY(Service):
|
||||
"""
|
||||
Service code for catchplay.com
|
||||
Version: 1.0.0
|
||||
|
||||
Authorization: Credentials
|
||||
|
||||
Security: HD@L3
|
||||
|
||||
Use full URL (for example - https://www.catchplay.com/id/video/1b8c1ba3-9015-4f99-8131-25dd45a4b033)
|
||||
or title ID (for example - 1b8c1ba3-9015-4f99-8131-25dd45a4b033).
|
||||
|
||||
IMPORTANT:
|
||||
CHANGE YOUR PARENTAL PIN IN THE CONFIG.YAML THAT ACCORDING TO YOURS
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?catchplay\.com/\w+/(?:movie|series|video)/)?(?P<title_id>[a-f0-9-]{36})"
|
||||
GEOFENCE = ("ID", "TW", "SG", "HK", "TH")
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="CPY", short_help="https://catchplay.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return CPY(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title):
|
||||
super().__init__(ctx)
|
||||
self.title = title
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
self.access_token: Optional[str] = None
|
||||
self.refresh_token: Optional[str] = None
|
||||
self.token_expiry: float = 0
|
||||
self.account_info: dict = {}
|
||||
|
||||
self.play_token: Optional[str] = None
|
||||
self.license_url: Optional[str] = None
|
||||
self.license_headers: Optional[dict] = None
|
||||
|
||||
profile_name = ctx.parent.params.get("profile")
|
||||
self.profile = profile_name or "default"
|
||||
|
||||
def authenticate(self, cookies=None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise EnvironmentError("Service requires Credentials for Authentication.")
|
||||
|
||||
self.credential = credential
|
||||
cache_key = f"tokens_{self.profile}"
|
||||
cache = self.cache.get(cache_key)
|
||||
|
||||
if cache and not cache.expired:
|
||||
cached = cache.data
|
||||
if isinstance(cached, dict) and cached.get("username") == credential.username:
|
||||
# Check if access token is still valid
|
||||
if cached.get("token_expiry", 0) > time.time():
|
||||
self.log.info("Using cached tokens")
|
||||
self._restore_from_cache(cached)
|
||||
return
|
||||
# Access token expired but we have a refresh token
|
||||
elif cached.get("refresh_token"):
|
||||
self.log.info("Access token expired, refreshing...")
|
||||
try:
|
||||
self._refresh_auth(cached["refresh_token"])
|
||||
self._cache_tokens(credential.username, cache_key)
|
||||
return
|
||||
except Exception as e:
|
||||
self.log.warning(f"Refresh failed ({e}), doing fresh login...")
|
||||
|
||||
# Fresh login
|
||||
self.log.info("Logging in...")
|
||||
self._do_login(credential)
|
||||
self._cache_tokens(credential.username, cache_key)
|
||||
|
||||
def _do_login(self, credential: Credential) -> None:
|
||||
"""Perform full guest token + credential login flow."""
|
||||
territory = self.config.get("territory", "ID")
|
||||
device = self.config["device"]
|
||||
|
||||
# Step 1: Guest token
|
||||
self.log.info("Fetching guest token...")
|
||||
guest_resp = self.session.get(
|
||||
url=self.config["endpoints"]["guest_token"],
|
||||
headers={"Referer": f"https://www.catchplay.com/{territory.lower()}/home"},
|
||||
).json()
|
||||
|
||||
if not guest_resp.get("access_token"):
|
||||
raise Exception("Failed to get guest token")
|
||||
|
||||
# Step 2: Login
|
||||
login_resp = self.session.post(
|
||||
url=self.config["endpoints"]["login"],
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"asiaplay-territory": territory,
|
||||
"asiaplay-device-type": device["type"],
|
||||
"asiaplay-device-version": device["version"],
|
||||
"Referer": f"https://www.catchplay.com/{territory.lower()}/login",
|
||||
},
|
||||
json={
|
||||
"username": credential.username,
|
||||
"password": credential.password,
|
||||
"rememberMe": False,
|
||||
},
|
||||
).json()
|
||||
|
||||
if not login_resp.get("access_token"):
|
||||
raise Exception(f"Login failed: {login_resp}")
|
||||
|
||||
self._apply_tokens(login_resp)
|
||||
|
||||
user = login_resp.get("user", {})
|
||||
self.account_info = user
|
||||
self.log.info(
|
||||
f" + Logged in as: {credential.username} "
|
||||
f"[{user.get('accountType', 'unknown')} / {user.get('accountStatus', 'unknown')}]"
|
||||
)
|
||||
|
||||
def _refresh_auth(self, refresh_token: str) -> None:
|
||||
"""Refresh the access token using the refresh token."""
|
||||
refresh_resp = self.session.get(
|
||||
url=self.config["endpoints"]["refresh"],
|
||||
headers={
|
||||
"Referer": "https://www.catchplay.com/",
|
||||
},
|
||||
cookies={"connect.sid": self._connect_sid} if hasattr(self, "_connect_sid") else {},
|
||||
).json()
|
||||
|
||||
if not refresh_resp.get("access_token"):
|
||||
raise Exception(f"Refresh failed: {refresh_resp}")
|
||||
|
||||
self._apply_tokens(refresh_resp)
|
||||
|
||||
user = refresh_resp.get("user", {})
|
||||
self.account_info = user
|
||||
self.log.info(
|
||||
f" + Token refreshed "
|
||||
f"[{user.get('accountType', 'unknown')} / {user.get('accountStatus', 'unknown')}]"
|
||||
)
|
||||
|
||||
def _apply_tokens(self, token_data: dict) -> None:
|
||||
"""Apply tokens from login or refresh response to session."""
|
||||
self.access_token = token_data["access_token"]
|
||||
self.refresh_token = token_data.get("refresh_token")
|
||||
|
||||
# Calculate expiry from JWT or expires_in
|
||||
expires_in = token_data.get("expires_in", 86400)
|
||||
self.token_expiry = time.time() + expires_in - 300 # 5 min buffer
|
||||
|
||||
territory = self.config.get("territory", "ID")
|
||||
device = self.config["device"]
|
||||
|
||||
self.session.headers.update({
|
||||
"authorization": f"Bearer {self.access_token}",
|
||||
"asiaplay-territory": territory,
|
||||
"asiaplay-device-type": device["type"],
|
||||
"asiaplay-device-version": device["version"],
|
||||
"asiaplay-os-type": device["os_type"],
|
||||
"asiaplay-os-version": device["os_version"],
|
||||
"origin": "https://www.catchplay.com",
|
||||
"referer": "https://www.catchplay.com/",
|
||||
})
|
||||
|
||||
def _cache_tokens(self, username: str, cache_key: str) -> None:
|
||||
"""Cache current tokens for reuse."""
|
||||
cache = self.cache.get(cache_key)
|
||||
cache.set(
|
||||
data={
|
||||
"username": username,
|
||||
"access_token": self.access_token,
|
||||
"refresh_token": self.refresh_token,
|
||||
"token_expiry": self.token_expiry,
|
||||
"account_info": self.account_info,
|
||||
}
|
||||
)
|
||||
|
||||
def _restore_from_cache(self, cached: dict) -> None:
|
||||
"""Restore session state from cached token data."""
|
||||
self.access_token = cached["access_token"]
|
||||
self.refresh_token = cached.get("refresh_token")
|
||||
self.token_expiry = cached.get("token_expiry", 0)
|
||||
self.account_info = cached.get("account_info", {})
|
||||
|
||||
territory = self.config.get("territory", "ID")
|
||||
device = self.config["device"]
|
||||
|
||||
self.session.headers.update({
|
||||
"authorization": f"Bearer {self.access_token}",
|
||||
"asiaplay-territory": territory,
|
||||
"asiaplay-device-type": device["type"],
|
||||
"asiaplay-device-version": device["version"],
|
||||
"asiaplay-os-type": device["os_type"],
|
||||
"asiaplay-os-version": device["os_version"],
|
||||
"origin": "https://www.catchplay.com",
|
||||
"referer": "https://www.catchplay.com/",
|
||||
})
|
||||
|
||||
self.log.info(
|
||||
f" + Restored session "
|
||||
f"[{self.account_info.get('accountType', 'unknown')} / "
|
||||
f"{self.account_info.get('accountStatus', 'unknown')}]"
|
||||
)
|
||||
|
||||
def _graphql(self, key: str, variables: dict) -> dict:
|
||||
"""Execute a GraphQL query defined in config."""
|
||||
cfg = self.config["graphql"][key]
|
||||
endpoint_key = cfg["endpoint"]
|
||||
url = self.config["endpoints"][endpoint_key]
|
||||
|
||||
resp = self.session.post(
|
||||
url=url,
|
||||
headers={
|
||||
"asiaplay-api-name": cfg["api_name"],
|
||||
"content-type": "application/json",
|
||||
},
|
||||
json={
|
||||
"operationName": cfg["operation"],
|
||||
"variables": variables,
|
||||
"query": cfg["query"],
|
||||
},
|
||||
).json()
|
||||
|
||||
if resp.get("errors"):
|
||||
raise Exception(f"GraphQL error ({key}): {resp['errors']}")
|
||||
|
||||
return resp["data"]
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
self.log.info(f"Searching for: {self.title}")
|
||||
|
||||
data = self._graphql("search", {"keyword": self.title})
|
||||
programs = data.get("searchKeywordSuggestions", {}).get("programs", [])
|
||||
|
||||
for program in programs:
|
||||
yield SearchResult(
|
||||
id_=program["id"],
|
||||
title=program["name"],
|
||||
label="TITLE",
|
||||
url=f"https://www.catchplay.com/id/video/{program['id']}",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _title_from(obj: dict) -> str:
|
||||
return obj.get("title", {}).get("eng") or obj.get("title", {}).get("local") or "Unknown"
|
||||
|
||||
@staticmethod
|
||||
def _extract_season_number(title: str) -> int:
|
||||
match = re.search(r"S(\d+)", title)
|
||||
return int(match.group(1)) if match else 1
|
||||
|
||||
@staticmethod
|
||||
def _extract_episode_number(title: str) -> int:
|
||||
match = re.search(r"Episode\s+(\d+)", title, re.IGNORECASE)
|
||||
return int(match.group(1)) if match else 0
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
title_id = re.match(self.TITLE_RE, self.title)
|
||||
if not title_id:
|
||||
raise ValueError(f"Could not parse title ID from: {self.title}")
|
||||
self.title = title_id.group("title_id")
|
||||
|
||||
self.log.info(f"Fetching metadata for: {self.title}")
|
||||
main = self._graphql("get_main_program", {"id": self.title})["getMainProgram"]
|
||||
|
||||
program_type = main.get("type", "MOVIE")
|
||||
series_title = self._title_from(main)
|
||||
selected = main.get("selected", {})
|
||||
release_year = selected.get("releaseYear")
|
||||
lang = Language.get(self.config.get("default_language", "en"))
|
||||
|
||||
if program_type == "MOVIE":
|
||||
title_name = self._title_from(selected) if selected else series_title
|
||||
program_meta = self._graphql("get_program", {"id": self.title})["getProgram"]
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=self.title,
|
||||
service=self.__class__,
|
||||
name=title_name,
|
||||
year=release_year,
|
||||
language=lang,
|
||||
data={"videoIntros": program_meta.get("videoIntros", {})},
|
||||
)
|
||||
])
|
||||
|
||||
elif program_type in ("SERIES", "SEASON"):
|
||||
episodes = []
|
||||
children = main.get("children", [])
|
||||
|
||||
for season_data in children:
|
||||
if season_data.get("type") == "SEASON":
|
||||
season_short = season_data.get("title", {}).get("short", "S1")
|
||||
season_num = self._extract_season_number(season_short)
|
||||
|
||||
selected_children = (
|
||||
selected.get("children", [])
|
||||
if selected.get("id") == season_data["id"]
|
||||
else []
|
||||
)
|
||||
selected_map = {ep["id"]: ep for ep in selected_children}
|
||||
|
||||
for idx, ep_data in enumerate(season_data.get("children", []), start=1):
|
||||
ep_id = ep_data["id"]
|
||||
ep_detail = selected_map.get(ep_id, {})
|
||||
|
||||
ep_title = (
|
||||
self._title_from(ep_detail) if ep_detail.get("title") else
|
||||
self._title_from(ep_data) if ep_data.get("title") else
|
||||
f"Episode {idx}"
|
||||
)
|
||||
|
||||
ep_num = self._extract_episode_number(ep_title) or idx
|
||||
|
||||
episodes.append(Episode(
|
||||
id_=ep_id,
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_num,
|
||||
number=ep_num,
|
||||
name=ep_title,
|
||||
year=release_year,
|
||||
language=lang,
|
||||
data={"season_id": season_data["id"]},
|
||||
))
|
||||
|
||||
elif season_data.get("type") == "EPISODE":
|
||||
ep_title = self._title_from(season_data)
|
||||
ep_num = self._extract_episode_number(ep_title) or len(episodes) + 1
|
||||
|
||||
episodes.append(Episode(
|
||||
id_=season_data["id"],
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=1,
|
||||
number=ep_num,
|
||||
name=ep_title,
|
||||
year=release_year,
|
||||
language=lang,
|
||||
data={},
|
||||
))
|
||||
|
||||
if not episodes:
|
||||
raise Exception(f"No episodes found for: {series_title}")
|
||||
|
||||
self.log.info(f" + Found {len(episodes)} episodes across {len(children)} season(s)")
|
||||
return Series(episodes)
|
||||
|
||||
else:
|
||||
raise NotImplementedError(f"Unsupported program type: {program_type}")
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
is_episode = isinstance(title, Episode)
|
||||
|
||||
# Play scenario
|
||||
self.log.info("Checking play scenario...")
|
||||
scenario = self._graphql(
|
||||
"get_play_scenario",
|
||||
{"input": {"programId": title.id}}
|
||||
)["getPlayScenario"]
|
||||
|
||||
behavior = scenario.get("behaviorType")
|
||||
self.log.info(f" + Play scenario: {behavior}")
|
||||
|
||||
if behavior != "PLAYABLE":
|
||||
reason = scenario.get("reason", {})
|
||||
raise Exception(
|
||||
f"Not playable. Behavior: {behavior}. "
|
||||
f"Reason: {reason.get('message', 'Unknown')}"
|
||||
)
|
||||
|
||||
# Parental control
|
||||
parental = scenario.get("parentalControl", {})
|
||||
if parental and parental.get("behaviorType") == "PIN_CODE_REQUIRED":
|
||||
self.log.info("Validating parental PIN...")
|
||||
pin = self.config.get("parental_pin", "0000")
|
||||
pin_result = self._graphql(
|
||||
"validate_pin",
|
||||
{"input": {"pinCode": pin}}
|
||||
)["validateParentalControlPinCode"]
|
||||
|
||||
if pin_result.get("status") != "SUCCESSFUL":
|
||||
raise Exception(f"PIN validation failed: {pin_result.get('status')}")
|
||||
self.log.info(" + PIN validated")
|
||||
|
||||
# Play token
|
||||
self.log.info("Getting play token...")
|
||||
play_resp = self.session.post(
|
||||
url=self.config["endpoints"]["play"],
|
||||
headers={"content-type": "application/json"},
|
||||
json={
|
||||
"force": False,
|
||||
"programType": "Video",
|
||||
"videoId": title.id,
|
||||
"watchType": "episode" if is_episode else "movie",
|
||||
},
|
||||
).json()
|
||||
|
||||
if play_resp.get("code") != "0":
|
||||
raise Exception(f"Play token failed: {play_resp}")
|
||||
|
||||
play_data = play_resp["data"]
|
||||
self.play_token = play_data.get("vcmsAccessToken") or play_data.get("playToken")
|
||||
video_id = play_data.get("catchplayVideoId")
|
||||
|
||||
if not self.play_token or not video_id:
|
||||
raise Exception("Missing play token or video ID")
|
||||
|
||||
self.log.info(f" + Play token for: {video_id}")
|
||||
|
||||
# Media info
|
||||
self.log.info("Fetching media info...")
|
||||
vcms = self.config["vcms"]
|
||||
|
||||
media_resp = self.session.get(
|
||||
url=self.config["endpoints"]["media_info"].format(video_id=video_id),
|
||||
headers={
|
||||
"authorization": f"Bearer {self.play_token}",
|
||||
"asiaplay-device-type": vcms["device_type"],
|
||||
"asiaplay-device-model": vcms["device_model"],
|
||||
"asiaplay-os-type": vcms["os_type"],
|
||||
"asiaplay-os-version": vcms["os_version"],
|
||||
"asiaplay-app-version": vcms["app_version"],
|
||||
"asiaplay-platform": vcms["platform"],
|
||||
"content-type": "application/x-www-form-urlencoded",
|
||||
},
|
||||
).json()
|
||||
|
||||
manifest_url = media_resp.get("videoUrl")
|
||||
if not manifest_url:
|
||||
raise Exception(f"No video URL: {media_resp}")
|
||||
|
||||
self.log.debug(f"Manifest: {manifest_url}")
|
||||
|
||||
# DRM
|
||||
license_info = media_resp.get("license", {})
|
||||
self.license_url = license_info.get("url", self.config["endpoints"]["widevine_license"])
|
||||
self.license_headers = license_info.get("extraHeaders", {})
|
||||
|
||||
# DASH manifest (clean CDN session)
|
||||
self.log.info("Parsing DASH manifest...")
|
||||
|
||||
cdn_session = req_lib.Session()
|
||||
cdn_session.headers.update({
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/147.0.0.0 Safari/537.36 Edg/147.0.0.0"
|
||||
),
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Origin": "https://www.catchplay.com",
|
||||
"Referer": "https://www.catchplay.com/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
})
|
||||
|
||||
tracks = DASH.from_url(url=manifest_url, session=cdn_session).to_tracks(language=title.language)
|
||||
|
||||
for video in tracks.videos:
|
||||
video.range = Video.Range.SDR
|
||||
|
||||
# VideoIntros for chapters
|
||||
if is_episode:
|
||||
meta = self._graphql("get_program", {"id": title.id})["getProgram"]
|
||||
title.data["videoIntros"] = meta.get("videoIntros", {})
|
||||
elif not title.data.get("videoIntros"):
|
||||
meta = self._graphql("get_program", {"id": title.id})["getProgram"]
|
||||
title.data["videoIntros"] = meta.get("videoIntros", {})
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
chapters = []
|
||||
intros = title.data.get("videoIntros", {})
|
||||
if not intros:
|
||||
return chapters
|
||||
|
||||
def to_ms(iso: str) -> Optional[int]:
|
||||
if not iso:
|
||||
return None
|
||||
m = re.match(r"PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?", iso)
|
||||
if not m:
|
||||
return None
|
||||
h, mi, s = int(m.group(1) or 0), int(m.group(2) or 0), int(m.group(3) or 0)
|
||||
return (h * 3600 + mi * 60 + s) * 1000
|
||||
|
||||
if intros.get("intro"):
|
||||
start = to_ms(intros["intro"].get("startTime"))
|
||||
end = to_ms(intros["intro"].get("endTime"))
|
||||
if start is not None:
|
||||
chapters.append(Chapter(timestamp=start, name="Intro"))
|
||||
if end is not None:
|
||||
chapters.append(Chapter(timestamp=end, name="After Intro"))
|
||||
|
||||
if intros.get("recap"):
|
||||
start = to_ms(intros["recap"].get("startTime"))
|
||||
end = to_ms(intros["recap"].get("endTime"))
|
||||
if start is not None:
|
||||
chapters.append(Chapter(timestamp=start, name="Recap"))
|
||||
if end is not None:
|
||||
chapters.append(Chapter(timestamp=end, name="After Recap"))
|
||||
|
||||
if intros.get("credits"):
|
||||
start = to_ms(intros["credits"].get("startTime"))
|
||||
if start is not None:
|
||||
chapters.append(Chapter(timestamp=start, name="Credits"))
|
||||
|
||||
chapters.sort(key=lambda c: c.timestamp)
|
||||
return chapters
|
||||
|
||||
def get_widevine_service_certificate(self, **_) -> Optional[str]:
|
||||
return self.config.get("certificate")
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
if not self.license_url:
|
||||
raise ValueError("No license URL. Call get_tracks() first.")
|
||||
|
||||
license_session = req_lib.Session()
|
||||
license_session.headers.update({
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/147.0.0.0 Safari/537.36 Edg/147.0.0.0"
|
||||
),
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Origin": "https://www.catchplay.com",
|
||||
"Referer": "https://www.catchplay.com/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
})
|
||||
license_session.headers.update(self.license_headers)
|
||||
|
||||
response = license_session.post(url=self.license_url, data=challenge)
|
||||
|
||||
if not response.ok:
|
||||
self.log.error(f"License error: {response.text}")
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
try:
|
||||
return response.json().get("license")
|
||||
except ValueError:
|
||||
return response.content
|
||||
136
CPY/config.yaml
136
CPY/config.yaml
@ -1,136 +0,0 @@
|
||||
territory: ID
|
||||
default_language: en
|
||||
parental_pin: "0000" #CHANGE THIS ACCORDING TO YOUR PIN
|
||||
|
||||
device:
|
||||
type: WEB_PC
|
||||
version: 3.0.138.4463
|
||||
os_type: Windows_Edge
|
||||
os_version: "10,146.0.0.0"
|
||||
|
||||
vcms:
|
||||
device_type: web
|
||||
device_model: windows
|
||||
os_type: chrome
|
||||
os_version: 147.0.0
|
||||
app_version: "3.0"
|
||||
platform: desktop
|
||||
|
||||
endpoints:
|
||||
guest_token: https://www.catchplay.com/api/v2/oauth
|
||||
login: https://www.catchplay.com/api/v2/oauth/login
|
||||
refresh: https://www.catchplay.com/api/v2/oauth/refresh
|
||||
graphql_program: https://sunapi.catchplay.com/program/v3/graphql
|
||||
graphql_membership: https://sunapi.catchplay.com/membership/v3/graphql
|
||||
graphql_membership_program: https://sunapi.catchplay.com/membership-program/v3/graphql
|
||||
play: https://hp2-api.catchplay.com/me/play
|
||||
media_info: "https://vcmsapi.catchplay.com/video/v3/mediaInfo/{video_id}"
|
||||
widevine_license: https://vcmsapi.catchplay.com/video-drm/widevine
|
||||
|
||||
graphql:
|
||||
search:
|
||||
operation: searchKeywordSuggestions
|
||||
api_name: searchKeywordSuggestions
|
||||
endpoint: graphql_membership_program
|
||||
query: |
|
||||
query searchKeywordSuggestions($keyword: String!) {
|
||||
searchKeywordSuggestions(keyword: $keyword) {
|
||||
programs {
|
||||
id
|
||||
name
|
||||
photoUrl
|
||||
orientation
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_main_program:
|
||||
operation: getMainProgram
|
||||
api_name: getMainProgram
|
||||
endpoint: graphql_program
|
||||
query: |
|
||||
query getMainProgram($id: ID!) {
|
||||
getMainProgram(id: $id) {
|
||||
id
|
||||
type
|
||||
title { local eng }
|
||||
totalChildren
|
||||
children {
|
||||
id
|
||||
type
|
||||
title { short local eng }
|
||||
children {
|
||||
id
|
||||
type
|
||||
title { local eng }
|
||||
publishedDate
|
||||
playerInfo { duration videoCode }
|
||||
}
|
||||
}
|
||||
selected {
|
||||
id
|
||||
type
|
||||
releaseYear
|
||||
synopsis
|
||||
title { local eng }
|
||||
children {
|
||||
id
|
||||
type
|
||||
title { local eng }
|
||||
synopsis
|
||||
publishedDate
|
||||
playerInfo { duration videoCode }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_program:
|
||||
operation: getProgram
|
||||
api_name: getProgram
|
||||
endpoint: graphql_program
|
||||
query: |
|
||||
query getProgram($id: ID!) {
|
||||
getProgram(id: $id) {
|
||||
id
|
||||
title { local eng }
|
||||
type
|
||||
videoIntros {
|
||||
intro { startTime endTime }
|
||||
recap { startTime endTime }
|
||||
credits { startTime endTime }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_play_scenario:
|
||||
operation: getPlayScenario
|
||||
api_name: getPlayScenario
|
||||
endpoint: graphql_membership_program
|
||||
query: |
|
||||
query getPlayScenario($input: PlayScenarioInput!) {
|
||||
getPlayScenario(input: $input) {
|
||||
behaviorType
|
||||
description
|
||||
reason { message }
|
||||
parentalControl { behaviorType title message }
|
||||
playProgram {
|
||||
id
|
||||
type
|
||||
title { local playing }
|
||||
playerInfo { videoCode }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
validate_pin:
|
||||
operation: validateParentalControlPinCode
|
||||
api_name: validateParentalControlPinCode
|
||||
endpoint: graphql_membership
|
||||
query: |
|
||||
query validateParentalControlPinCode($input: ValidateParentalControlInput!) {
|
||||
validateParentalControlPinCode(input: $input) {
|
||||
status
|
||||
description
|
||||
}
|
||||
}
|
||||
471
GLA/__init__.py
471
GLA/__init__.py
@ -1,471 +0,0 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from collections.abc import Generator
|
||||
from datetime import datetime
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Union
|
||||
from urllib.parse import urljoin, parse_qs, urlparse
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH, HLS
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks, Video
|
||||
|
||||
|
||||
class GLA(Service):
|
||||
"""
|
||||
Service code for gagaoolala.com
|
||||
Version: 1.0.1
|
||||
|
||||
Authorization: Email/Password or Cookies (PHPSESSID)
|
||||
Security: FHD@L3 (Widevine/PlayReady DRM via ExpressPlay)
|
||||
|
||||
Use full URL: https://www.gagaoolala.com/en/videos/6184/candy-2026
|
||||
Or title ID: 6184 (slug will be fetched from page if needed)
|
||||
"""
|
||||
|
||||
# Updated regex to optionally capture slug
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?gagaoolala\.com/(?:en/)?videos/)?(?P<title_id>\d+)(?:/(?P<slug>[^/?#]+))?"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
VIDEO_RANGE_MAP = {
|
||||
"SDR": "sdr",
|
||||
"HDR10": "hdr10",
|
||||
"DV": "dolby_vision",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="GLA", short_help="https://www.gagaoolala.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("-m", "--movie", is_flag=True, default=False, help="Specify if it's a movie")
|
||||
@click.option("-d", "--device", type=str, default="firefox_linux", help="Select device profile")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return GLA(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title, movie, device, email=None, password=None):
|
||||
super().__init__(ctx)
|
||||
|
||||
self.title = title
|
||||
self.movie = movie
|
||||
self.device = device
|
||||
self.email = email
|
||||
self.password = password
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
# Override codec/range for L3 CDM limitations
|
||||
if self.cdm and self.cdm.security_level == 3:
|
||||
self.track_request.codecs = [Video.Codec.AVC]
|
||||
self.track_request.ranges = [Video.Range.SDR]
|
||||
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
|
||||
self.profile = ctx.parent.params.get("profile") or "default"
|
||||
self.user_id = None
|
||||
self.license_data = {}
|
||||
self.slug = None # Store slug for API calls
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
if cookies:
|
||||
self.session.cookies.update(cookies)
|
||||
for cookie in cookies:
|
||||
if cookie.name == "gli":
|
||||
self.user_id = cookie.value
|
||||
break
|
||||
return
|
||||
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise EnvironmentError("Service requires Cookies or Credential (email/password) for Authentication.")
|
||||
|
||||
login_url = "https://www.gagaoolala.com/en/user/login"
|
||||
login_data = {
|
||||
"email": credential.username,
|
||||
"passwd": credential.password,
|
||||
}
|
||||
|
||||
headers = {
|
||||
"User-Agent": self.config["client"][self.device]["user_agent"],
|
||||
"Accept": "application/json, text/javascript, */*; q=0.01",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
"Origin": "https://www.gagaoolala.com",
|
||||
"Referer": login_url,
|
||||
}
|
||||
|
||||
response = self.session.post(login_url, data=login_data, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
|
||||
if not result.get("success"):
|
||||
error_msg = result.get("msg") or result.get("data", {}).get("msg") or "Unknown error"
|
||||
raise AuthenticationError(f"Login failed: {error_msg}")
|
||||
|
||||
self.user_id = result.get("data", {}).get("user_line_uid")
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
search_url = "https://www.gagaoolala.com/en/search"
|
||||
params = {"q": self.title}
|
||||
|
||||
headers = {
|
||||
"User-Agent": self.config["client"][self.device]["user_agent"],
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
}
|
||||
|
||||
response = self.session.get(search_url, params=params, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if "application/json" in content_type:
|
||||
try:
|
||||
data = response.json()
|
||||
except json.JSONDecodeError:
|
||||
data = None
|
||||
else:
|
||||
data = None
|
||||
|
||||
if not data:
|
||||
html = response.text
|
||||
|
||||
json_ld_match = re.search(
|
||||
r'<script[^>]+type=["\']application/ld\+json["\'][^>]*>\s*({.*?"@context".*?})\s*</script>',
|
||||
html,
|
||||
re.DOTALL | re.IGNORECASE
|
||||
)
|
||||
|
||||
if json_ld_match:
|
||||
json_str = json_ld_match.group(1)
|
||||
json_str = json_str.replace(r'\/', '/').replace(r'\"', '"')
|
||||
try:
|
||||
data = json.loads(json_str)
|
||||
except json.JSONDecodeError as e:
|
||||
self.log.debug(f"Failed to parse JSON-LD: {e}")
|
||||
data = None
|
||||
else:
|
||||
fallback_match = re.search(
|
||||
r'(\{[^{}]*"@context"[^{}]*"itemListElement"[^{}]*\[\s*\{[^{}]*"url"[^{}]*\][^{}]*\})',
|
||||
html,
|
||||
re.DOTALL
|
||||
)
|
||||
if fallback_match:
|
||||
try:
|
||||
data = json.loads(fallback_match.group(1))
|
||||
except json.JSONDecodeError:
|
||||
data = None
|
||||
|
||||
if not data or "itemListElement" not in data:
|
||||
self.log.warning(f"No search results found for '{self.title}'")
|
||||
return
|
||||
|
||||
for item in data["itemListElement"]:
|
||||
url = item.get("url", "")
|
||||
if not url:
|
||||
continue
|
||||
|
||||
match = re.match(self.TITLE_RE, url)
|
||||
if not match:
|
||||
continue
|
||||
|
||||
title_id = match.group("title_id")
|
||||
slug = match.group("slug")
|
||||
|
||||
# Extract title name from slug or URL
|
||||
title_name = slug if slug else url.rstrip("/").split("/")[-1]
|
||||
if "-" in title_name:
|
||||
parts = title_name.rsplit("-", 1)
|
||||
# Remove year suffix if present (e.g., candy-2026 -> candy)
|
||||
if parts[-1].isdigit() and len(parts[-1]) == 4:
|
||||
title_name = parts[0]
|
||||
title_name = title_name.replace("-", " ").title()
|
||||
|
||||
# Detect series vs movie
|
||||
is_series = bool(slug and ("-e" in slug or slug.endswith("-e01")))
|
||||
|
||||
yield SearchResult(
|
||||
id_=title_id,
|
||||
title=title_name,
|
||||
label="SERIES" if is_series else "MOVIE",
|
||||
url=url,
|
||||
)
|
||||
|
||||
def _clean_title(self, raw_title: str, slug: Optional[str] = None) -> str:
|
||||
"""Clean up page titles by removing SEO/marketing suffixes."""
|
||||
title = re.sub(r'\s*\|\s*GagaOOLala\s*$', '', raw_title).strip()
|
||||
|
||||
seo_patterns = [
|
||||
r'\s*-\s*Watch\s+Online.*$',
|
||||
r'\s*-\s*Find\s+Your\s+Story.*$',
|
||||
r'\s*-\s*Watch\s+BL\s+Movies.*$',
|
||||
r'\s*-\s*Stream\s+Online.*$',
|
||||
r'\s*-\s*Free\s+Streaming.*$',
|
||||
r'\s*-\s*GagaOOLala.*$',
|
||||
]
|
||||
for pattern in seo_patterns:
|
||||
title = re.sub(pattern, '', title, flags=re.IGNORECASE)
|
||||
|
||||
title = re.sub(r'\s*-\s*$', '', title).strip()
|
||||
|
||||
if slug:
|
||||
slug_title = slug.replace('-', ' ').title()
|
||||
year_match = re.search(r'(\d{4})$', slug)
|
||||
if year_match:
|
||||
year = year_match.group(1)
|
||||
slug_title = re.sub(r'\s*\d{4}\s*$', '', slug_title).strip()
|
||||
candidate = f"{slug_title} ({year})"
|
||||
if len(candidate) < len(title) or title.lower().startswith(slug_title.lower()):
|
||||
return candidate
|
||||
|
||||
return title if title else f"Title {self.title}"
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
match = re.match(self.TITLE_RE, self.title)
|
||||
if not match:
|
||||
raise ValueError(f"Could not parse title ID from: {self.title}")
|
||||
|
||||
title_id = match.group("title_id")
|
||||
self.slug = match.group("slug")
|
||||
|
||||
video_url = f"https://www.gagaoolala.com/en/videos/{title_id}"
|
||||
if self.slug:
|
||||
video_url += f"/{self.slug}"
|
||||
|
||||
response = self.session.get(video_url)
|
||||
|
||||
if response.status_code == 404 and self.slug:
|
||||
self.log.warning(f"URL with slug returned 404, trying without slug")
|
||||
video_url = f"https://www.gagaoolala.com/en/videos/{title_id}"
|
||||
response = self.session.get(video_url)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
episodes_match = re.search(r'var\s+videoEpisodes\s*=\s*(\[.*?\]);\s*var\s+videoSeasons', response.text, re.DOTALL)
|
||||
|
||||
if episodes_match:
|
||||
episodes_data = json.loads(episodes_match.group(1))
|
||||
series_episodes = [ep for ep in episodes_data if ep.get("is_series")]
|
||||
|
||||
if series_episodes:
|
||||
first_name = series_episodes[0].get("name", "")
|
||||
base_title = re.sub(r'\s*Episode\s*\d+.*$', '', first_name).strip()
|
||||
if not base_title and self.slug:
|
||||
base_title = self._clean_title(self.slug.replace('-', ' ').title(), None)
|
||||
if not base_title:
|
||||
base_title = f"Series {title_id}"
|
||||
|
||||
episodes = []
|
||||
for ep in series_episodes:
|
||||
ep_slug = ep.get("slug", f"{self.slug}-e{ep.get('episode', 1)}" if self.slug else None)
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=str(ep["id"]),
|
||||
service=self.__class__,
|
||||
title=base_title,
|
||||
season=ep.get("season", 1),
|
||||
number=ep.get("episode", 1),
|
||||
name=ep.get("name", f"Episode {ep.get('episode', 1)}"),
|
||||
description=None,
|
||||
year=None,
|
||||
language=Language.get("en"),
|
||||
data={**ep, "slug": ep_slug, "parent_slug": self.slug},
|
||||
)
|
||||
)
|
||||
return Series(episodes)
|
||||
|
||||
title_match = re.search(r'<title>([^<]+)</title>', response.text)
|
||||
raw_title = title_match.group(1) if title_match else (self.slug or f"Movie {title_id}")
|
||||
movie_title = self._clean_title(raw_title, self.slug)
|
||||
|
||||
year = None
|
||||
year_match = re.search(r'\((\d{4})\)\s*$', movie_title)
|
||||
if year_match:
|
||||
year = int(year_match.group(1))
|
||||
movie_title = re.sub(r'\s*\(\d{4}\)\s*$', '', movie_title).strip()
|
||||
elif self.slug:
|
||||
slug_year = re.search(r'(\d{4})$', self.slug)
|
||||
if slug_year:
|
||||
year = int(slug_year.group(1))
|
||||
|
||||
return Movies(
|
||||
[
|
||||
Movie(
|
||||
id_=title_id,
|
||||
service=self.__class__,
|
||||
name=movie_title,
|
||||
description=None,
|
||||
year=year,
|
||||
language=Language.get("en"),
|
||||
data={"url": video_url, "slug": self.slug},
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
def _fetch_variant(
|
||||
title: Title_T,
|
||||
codec: Optional[Video.Codec],
|
||||
range_: Video.Range,
|
||||
) -> Tracks:
|
||||
vcodec_str = "H265" if codec == Video.Codec.HEVC else "H264"
|
||||
range_str = range_.name
|
||||
video_format = self.VIDEO_RANGE_MAP.get(range_str, "sdr")
|
||||
|
||||
tracks = self._fetch_manifest(title)
|
||||
|
||||
if codec:
|
||||
tracks.videos = [v for v in tracks.videos if v.codec == codec]
|
||||
if range_ != Video.Range.SDR:
|
||||
tracks.videos = [v for v in tracks.videos if v.range == range_]
|
||||
|
||||
if not tracks.videos:
|
||||
raise ValueError(f"No tracks available for {codec} {range_}")
|
||||
|
||||
return tracks
|
||||
|
||||
tracks = self._get_tracks_for_variants(title, _fetch_variant)
|
||||
return tracks
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
def _fetch_variant(
|
||||
title: Title_T,
|
||||
codec: Optional[Video.Codec],
|
||||
range_: Video.Range,
|
||||
) -> Tracks:
|
||||
vcodec_str = "H265" if codec == Video.Codec.HEVC else "H264"
|
||||
range_str = range_.name
|
||||
video_format = self.VIDEO_RANGE_MAP.get(range_str, "sdr")
|
||||
|
||||
tracks = self._fetch_manifest(title)
|
||||
|
||||
if codec:
|
||||
tracks.videos = [v for v in tracks.videos if v.codec == codec]
|
||||
if range_ != Video.Range.SDR:
|
||||
tracks.videos = [v for v in tracks.videos if v.range == range_]
|
||||
|
||||
if not tracks.videos:
|
||||
raise ValueError(f"No tracks available for {codec} {range_}")
|
||||
|
||||
return tracks
|
||||
|
||||
return self._get_tracks_for_variants(title, _fetch_variant)
|
||||
|
||||
def _fetch_manifest(self, title: Title_T) -> Tracks:
|
||||
timestamp = int(time.time())
|
||||
|
||||
slug = title.data.get("slug") if isinstance(title.data, dict) else None
|
||||
if not slug:
|
||||
slug = title.data.get("parent_slug") if isinstance(title.data, dict) else self.slug
|
||||
|
||||
if not slug:
|
||||
match = re.match(self.TITLE_RE, self.title)
|
||||
if match:
|
||||
slug = match.group("slug")
|
||||
|
||||
if slug:
|
||||
play_url = f"https://www.gagaoolala.com/api/v1.0/en/videos/{title.id}/{slug}/play"
|
||||
else:
|
||||
play_url = f"https://www.gagaoolala.com/api/v1.0/en/videos/{title.id}/play"
|
||||
self.log.warning(f"No slug available, attempting play request without slug: {play_url}")
|
||||
|
||||
params = {"t": timestamp}
|
||||
|
||||
response = self.session.get(play_url, params=params)
|
||||
response.raise_for_status()
|
||||
|
||||
playback = response.json()
|
||||
if not playback.get("success"):
|
||||
raise ValueError(f"Failed to get playback info: {playback}")
|
||||
|
||||
data = playback["data"]
|
||||
|
||||
drm_info = data.get("drm")
|
||||
if drm_info:
|
||||
self.license_data = {
|
||||
"widevine": drm_info.get("widevine", {}).get("LA_URL"),
|
||||
"playready": drm_info.get("playready", {}).get("LA_URL"),
|
||||
}
|
||||
else:
|
||||
self.license_data = {}
|
||||
|
||||
manifest_url = data.get("dash") or data.get("m3u8")
|
||||
if not manifest_url:
|
||||
raise ValueError("No manifest URL found in playback response")
|
||||
|
||||
if ".mpd" in manifest_url:
|
||||
tracks = DASH.from_url(url=manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
elif ".m3u8" in manifest_url:
|
||||
tracks = HLS.from_url(url=manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
else:
|
||||
raise ValueError(f"Unsupported manifest format: {manifest_url}")
|
||||
|
||||
for video in tracks.videos:
|
||||
if video.codec == Video.Codec.HEVC and video.profile and "Main10" in str(video.profile):
|
||||
video.range = Video.Range.HDR10
|
||||
else:
|
||||
video.range = Video.Range.SDR
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_service_certificate(self, **_: any) -> str:
|
||||
return self.config.get("certificate", "")
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
if not self.license_data.get("widevine"):
|
||||
raise ValueError("Widevine license URL not available for this title")
|
||||
|
||||
license_url = self.license_data["widevine"]
|
||||
|
||||
headers = {
|
||||
"User-Agent": self.config["client"][self.device].get("license_user_agent",
|
||||
self.config["client"][self.device]["user_agent"]),
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
|
||||
response = self.session.post(
|
||||
url=license_url,
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
return response.content
|
||||
|
||||
def get_playready_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
if not self.license_data.get("playready"):
|
||||
raise ValueError("PlayReady license URL not available for this title")
|
||||
|
||||
license_url = self.license_data["playready"]
|
||||
|
||||
headers = {
|
||||
"User-Agent": self.config["client"][self.device].get("license_user_agent",
|
||||
self.config["client"][self.device]["user_agent"]),
|
||||
"Content-Type": "text/xml",
|
||||
"SOAPAction": "http://schemas.microsoft.com/DRM/2007/03/protocols/AcquireLicense",
|
||||
}
|
||||
|
||||
response = self.session.post(
|
||||
url=license_url,
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
return response.content
|
||||
@ -1,16 +0,0 @@
|
||||
# config.yaml for GLA (GagaOOLala)
|
||||
endpoints:
|
||||
login: https://www.gagaoolala.com/en/user/login
|
||||
play: https://www.gagaoolala.com/api/v1.0/en/videos/{title_id}/{slug}/play
|
||||
search: https://www.gagaoolala.com/en/search
|
||||
|
||||
client:
|
||||
firefox_linux:
|
||||
user_agent: "Mozilla/5.0 (X11; Linux x86_64; rv:149.0) Gecko/20100101 Firefox/149.0"
|
||||
license_user_agent: "Mozilla/5.0 (X11; Linux x86_64; rv:149.0) Gecko/20100101 Firefox/149.0"
|
||||
android_tv:
|
||||
user_agent: "Mozilla/5.0 (Linux; Android 10; Android TV) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.120 Safari/537.36"
|
||||
license_user_agent: "ExoPlayerLib/2.18.1"
|
||||
windows_chrome:
|
||||
user_agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||
license_user_agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
||||
514
HIDI/__init__.py
514
HIDI/__init__.py
@ -1,514 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
import base64
|
||||
import hashlib
|
||||
import click
|
||||
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Iterable
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Series, Movie, Movies, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle, Audio, Video
|
||||
from unshackle.core.utilities import import_module_by_path
|
||||
|
||||
|
||||
class HIDI(Service):
|
||||
"""
|
||||
Service code for HiDive (hidive.com)
|
||||
Version: 1.3.2
|
||||
Authorization: Email + password login, with automatic token refresh.
|
||||
Security: FHD@L3
|
||||
|
||||
IMPORTANT: UPDATE YOUR UNSHACKLE TO 2.3.0 TO GET THE NECESSARY FIX FOR THIS SERVICE
|
||||
Also when downloading a series, use the link from the first season of the series
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^https?://(?:www\.)?hidive\.com/(?:season/(?P<season_id>\d+)|playlist/(?P<playlist_id>\d+))$"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
API_BASE = "https://dce-frontoffice.imggaming.com/api/v4"
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="HIDI", short_help="https://hidive.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return HIDI(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
raise ValueError("Unsupported HiDive URL. Use /season/<id> or /playlist/<id>")
|
||||
|
||||
self.season_id = m.group("season_id")
|
||||
self.playlist_id = m.group("playlist_id")
|
||||
self.kind = "serie" if self.season_id else "movie"
|
||||
self.content_id = int(self.season_id or self.playlist_id)
|
||||
|
||||
if not self.config:
|
||||
raise EnvironmentError("Missing HIDI service config.")
|
||||
self.cdm = ctx.obj.cdm
|
||||
self._auth_token = None
|
||||
self._refresh_token = None
|
||||
self._drm_cache = {}
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
base_headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Accept-Language": "en-US",
|
||||
"Referer": "https://www.hidive.com/",
|
||||
"Origin": "https://www.hidive.com",
|
||||
"x-api-key": self.config["x_api_key"],
|
||||
"app": "dice",
|
||||
"Realm": "dce.hidive",
|
||||
"x-app-var": self.config["x_app_var"],
|
||||
}
|
||||
self.session.headers.update(base_headers)
|
||||
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise ValueError("HiDive requires email + password")
|
||||
|
||||
r_login = self.session.post(
|
||||
self.config["endpoints"]["login"],
|
||||
json={"id": credential.username, "secret": credential.password}
|
||||
)
|
||||
if r_login.status_code == 401:
|
||||
raise PermissionError("Invalid email or password.")
|
||||
r_login.raise_for_status()
|
||||
|
||||
login_data = r_login.json()
|
||||
self._auth_token = login_data["authorisationToken"]
|
||||
self._refresh_token = login_data["refreshToken"]
|
||||
|
||||
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
||||
self.log.info("HiDive login successful.")
|
||||
|
||||
def _refresh_auth(self):
|
||||
if not self._refresh_token:
|
||||
raise PermissionError("No refresh token available to renew session.")
|
||||
|
||||
self.log.warning("Auth token expired, refreshing...")
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["refresh"],
|
||||
json={"refreshToken": self._refresh_token}
|
||||
)
|
||||
if r.status_code == 401:
|
||||
raise PermissionError("Refresh token is invalid. Please log in again.")
|
||||
r.raise_for_status()
|
||||
|
||||
data = r.json()
|
||||
self._auth_token = data["authorisationToken"]
|
||||
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
||||
self.log.info("Auth token refreshed successfully.")
|
||||
|
||||
def _api_get(self, url, **kwargs):
|
||||
resp = self.session.get(url, **kwargs)
|
||||
if resp.status_code == 401:
|
||||
self._refresh_auth()
|
||||
resp = self.session.get(url, **kwargs)
|
||||
resp.raise_for_status()
|
||||
return resp
|
||||
|
||||
def _fetch_season_data(self, season_id: int) -> dict:
|
||||
"""Fetch season view data."""
|
||||
return self._api_get(
|
||||
self.config["endpoints"]["view"],
|
||||
params={
|
||||
"type": "season",
|
||||
"id": season_id,
|
||||
"timezone": "Europe/Amsterdam"
|
||||
}
|
||||
).json()
|
||||
|
||||
def _fetch_adjacent_seasons(self, series_id: int, season_id: int) -> dict:
|
||||
"""Fetch all seasons in a series using adjacentTo endpoint."""
|
||||
url = f"{self.API_BASE}/series/{series_id}/adjacentTo/{season_id}"
|
||||
return self._api_get(url, params={"size": 25}).json()
|
||||
|
||||
def _extract_series_info(self, season_data: dict) -> tuple[Optional[int], Optional[str]]:
|
||||
"""
|
||||
Extract series ID and title from season data.
|
||||
Checks multiple locations in the JSON structure.
|
||||
"""
|
||||
series_id = None
|
||||
series_title = None
|
||||
|
||||
# Method 1: Check metadata.series
|
||||
metadata = season_data.get("metadata", {})
|
||||
if metadata.get("series"):
|
||||
series_id = metadata["series"].get("seriesId")
|
||||
series_title = metadata["series"].get("title")
|
||||
if series_id:
|
||||
return series_id, series_title
|
||||
|
||||
# Method 2: Check elements for $type: "series"
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "series":
|
||||
attrs = elem.get("attributes", {})
|
||||
series_id = attrs.get("id")
|
||||
series_info = attrs.get("series", {})
|
||||
series_title = series_info.get("title") or series_title
|
||||
if series_id:
|
||||
return series_id, series_title
|
||||
|
||||
# Method 3: Check bucket elements for seriesId
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "bucket":
|
||||
attrs = elem.get("attributes", {})
|
||||
if attrs.get("seriesId"):
|
||||
series_id = attrs["seriesId"]
|
||||
return series_id, series_title
|
||||
|
||||
# Method 4: Check hero actions for seriesId
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "hero":
|
||||
for action in elem.get("attributes", {}).get("actions", []):
|
||||
action_data = action.get("attributes", {}).get("action", {}).get("data", {})
|
||||
if action_data.get("seriesId"):
|
||||
series_id = action_data["seriesId"]
|
||||
return series_id, series_title
|
||||
|
||||
return series_id, series_title
|
||||
|
||||
def _extract_season_number(self, season_data: dict) -> int:
|
||||
"""Extract season number from season data."""
|
||||
# Check metadata.currentSeason
|
||||
metadata = season_data.get("metadata", {})
|
||||
current_season = metadata.get("currentSeason", {})
|
||||
if current_season.get("title"):
|
||||
# Parse "Season 2" -> 2
|
||||
title = current_season["title"]
|
||||
if title.lower().startswith("season "):
|
||||
try:
|
||||
return int(title.split(" ")[1])
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
|
||||
# Check elements for series type with seasons info
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "series":
|
||||
seasons_items = elem.get("attributes", {}).get("seasons", {}).get("items", [])
|
||||
for item in seasons_items:
|
||||
if item.get("seasonNumber"):
|
||||
return item["seasonNumber"]
|
||||
|
||||
# Check bucket title
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "season":
|
||||
bucket_title = elem.get("attributes", {}).get("bucketTitle", "")
|
||||
if bucket_title.lower().startswith("season "):
|
||||
try:
|
||||
return int(bucket_title.split(" ")[1])
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
|
||||
return 1
|
||||
|
||||
def _parse_episodes_from_season(self, season_data: dict, series_title: str, season_number: int) -> list[Episode]:
|
||||
"""Parse episodes from season JSON data."""
|
||||
episodes = []
|
||||
|
||||
for elem in season_data.get("elements", []):
|
||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "season":
|
||||
items = elem.get("attributes", {}).get("items", [])
|
||||
|
||||
for idx, item in enumerate(items):
|
||||
if item.get("type") != "SEASON_VOD":
|
||||
continue
|
||||
|
||||
ep_title = item.get("title", "")
|
||||
ep_num = idx + 1
|
||||
|
||||
# Try to extract episode number from title "E1 - Title"
|
||||
if ep_title.startswith("E") and " - " in ep_title:
|
||||
try:
|
||||
ep_num = int(ep_title.split(" - ")[0][1:])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
episodes.append(Episode(
|
||||
id_=item["id"],
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_number,
|
||||
number=ep_num,
|
||||
name=ep_title,
|
||||
description=item.get("description", ""),
|
||||
language=Language.get("ja"),
|
||||
data=item,
|
||||
))
|
||||
break
|
||||
|
||||
return episodes
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
anchor_data = self._fetch_season_data(self.content_id)
|
||||
|
||||
if self.kind == "movie":
|
||||
vod_id = None
|
||||
movie_title = None
|
||||
description = ""
|
||||
|
||||
for elem in anchor_data.get("elements", []):
|
||||
if elem.get("$type") == "hero":
|
||||
hdr = (elem.get("attributes", {}).get("header", {}) or {}).get("attributes", {})
|
||||
movie_title = hdr.get("text", movie_title)
|
||||
for c in elem.get("attributes", {}).get("content", []):
|
||||
if c.get("$type") == "textblock":
|
||||
description = c.get("attributes", {}).get("text", description)
|
||||
|
||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "playlist":
|
||||
items = elem.get("attributes", {}).get("items", [])
|
||||
if items:
|
||||
vod_id = items[0]["id"]
|
||||
movie_title = movie_title or items[0].get("title")
|
||||
description = description or items[0].get("description", "")
|
||||
break
|
||||
|
||||
if not vod_id:
|
||||
raise ValueError("No VOD found in playlist data.")
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=vod_id,
|
||||
service=self.__class__,
|
||||
name=movie_title or "Unknown Title",
|
||||
description=description or "",
|
||||
year=None,
|
||||
language=Language.get("en"),
|
||||
data={"playlistId": self.content_id}
|
||||
)
|
||||
])
|
||||
|
||||
series_id, series_title = self._extract_series_info(anchor_data)
|
||||
series_title = series_title or "HiDive Series"
|
||||
anchor_season_num = self._extract_season_number(anchor_data)
|
||||
|
||||
if not series_id:
|
||||
self.log.warning("Could not determine Series ID. Fetching single season only.")
|
||||
episodes = self._parse_episodes_from_season(anchor_data, series_title, anchor_season_num)
|
||||
return Series(episodes)
|
||||
|
||||
try:
|
||||
adj_data = self._fetch_adjacent_seasons(series_id, self.content_id)
|
||||
except Exception as e:
|
||||
self.log.warning(f"Failed to fetch adjacent seasons: {e}. Falling back to single season.")
|
||||
episodes = self._parse_episodes_from_season(anchor_data, series_title, anchor_season_num)
|
||||
return Series(episodes)
|
||||
|
||||
# Build list of all seasons
|
||||
all_seasons = []
|
||||
|
||||
# Preceding seasons (these come before current season)
|
||||
for s in adj_data.get("precedingSeasons", []):
|
||||
all_seasons.append({
|
||||
"id": s["id"],
|
||||
"seasonNumber": s.get("seasonNumber", 0),
|
||||
"title": s.get("title", "")
|
||||
})
|
||||
|
||||
# Current/Anchor season
|
||||
all_seasons.append({
|
||||
"id": self.content_id,
|
||||
"seasonNumber": anchor_season_num,
|
||||
"title": f"Season {anchor_season_num}",
|
||||
"_data": anchor_data # Cache to avoid re-fetching
|
||||
})
|
||||
|
||||
# Following seasons (these come after current season)
|
||||
for s in adj_data.get("followingSeasons", []):
|
||||
all_seasons.append({
|
||||
"id": s["id"],
|
||||
"seasonNumber": s.get("seasonNumber", 0),
|
||||
"title": s.get("title", "")
|
||||
})
|
||||
|
||||
# Deduplicate by ID and sort by season number
|
||||
unique_seasons = {}
|
||||
for s in all_seasons:
|
||||
s_id = s["id"]
|
||||
if s_id not in unique_seasons:
|
||||
unique_seasons[s_id] = s
|
||||
elif "_data" in s:
|
||||
# Prefer the one with cached data
|
||||
unique_seasons[s_id] = s
|
||||
|
||||
sorted_seasons = sorted(unique_seasons.values(), key=lambda x: x["seasonNumber"])
|
||||
|
||||
all_episodes = []
|
||||
|
||||
for season_info in sorted_seasons:
|
||||
s_id = season_info["id"]
|
||||
s_num = season_info["seasonNumber"]
|
||||
|
||||
if "_data" in season_info:
|
||||
self.log.info(f"Processing Season {s_num} (ID: {s_id}) [cached]")
|
||||
season_data = season_info["_data"]
|
||||
else:
|
||||
self.log.info(f"Fetching Season {s_num} (ID: {s_id})")
|
||||
try:
|
||||
season_data = self._fetch_season_data(s_id)
|
||||
except Exception as e:
|
||||
self.log.error(f"Failed to fetch Season {s_num}: {e}")
|
||||
continue
|
||||
|
||||
episodes = self._parse_episodes_from_season(season_data, series_title, s_num)
|
||||
self.log.info(f" Found {len(episodes)} episodes")
|
||||
all_episodes.extend(episodes)
|
||||
|
||||
if not all_episodes:
|
||||
raise ValueError("No episodes found across all seasons.")
|
||||
|
||||
return Series(all_episodes)
|
||||
|
||||
def _get_audio_for_langs(self, mpd_url: str, langs: Iterable[Language]) -> list[Audio]:
|
||||
merged: list[Audio] = []
|
||||
seen = set()
|
||||
|
||||
# Use first available language as fallback, or "en" as ultimate fallback
|
||||
fallback_lang = langs[0] if langs else Language.get("en")
|
||||
|
||||
dash = DASH.from_url(mpd_url, session=self.session)
|
||||
try:
|
||||
# Parse with a valid fallback language
|
||||
base_tracks = dash.to_tracks(language=fallback_lang)
|
||||
except Exception:
|
||||
# Try with English as ultimate fallback
|
||||
base_tracks = dash.to_tracks(language=Language.get("en"))
|
||||
|
||||
all_audio = base_tracks.audio or []
|
||||
|
||||
for lang in langs:
|
||||
# Match by language prefix (e.g. en, ja)
|
||||
for audio in all_audio:
|
||||
lang_code = getattr(audio.language, "language", "en")
|
||||
if lang_code.startswith(lang.language[:2]):
|
||||
key = (lang_code, getattr(audio, "codec", None), getattr(audio, "bitrate", None))
|
||||
if key in seen:
|
||||
continue
|
||||
merged.append(audio)
|
||||
seen.add(key)
|
||||
|
||||
# If nothing matched, just return all available audio tracks
|
||||
if not merged and all_audio:
|
||||
merged = all_audio
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
vod_resp = self._api_get(
|
||||
self.config["endpoints"]["vod"].format(vod_id=title.id),
|
||||
params={"includePlaybackDetails": "URL"},
|
||||
)
|
||||
vod = vod_resp.json()
|
||||
|
||||
playback_url = vod.get("playerUrlCallback")
|
||||
if not playback_url:
|
||||
raise ValueError("No playback URL found.")
|
||||
|
||||
stream_data = self._api_get(playback_url).json()
|
||||
dash_list = stream_data.get("dash", [])
|
||||
if not dash_list:
|
||||
raise ValueError("No DASH streams available.")
|
||||
|
||||
entry = dash_list[0]
|
||||
mpd_url = entry["url"]
|
||||
|
||||
# Collect available HiDive metadata languages
|
||||
meta_audio_tracks = vod.get("onlinePlaybackMetadata", {}).get("audioTracks", [])
|
||||
available_langs = []
|
||||
for m in meta_audio_tracks:
|
||||
lang_code = (m.get("languageCode") or "").split("-")[0]
|
||||
if not lang_code:
|
||||
continue
|
||||
try:
|
||||
available_langs.append(Language.get(lang_code))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Use first available language as fallback, or English as ultimate fallback
|
||||
fallback_lang = available_langs[0] if available_langs else Language.get("en")
|
||||
|
||||
# Parse DASH manifest with a valid fallback language
|
||||
base_tracks = DASH.from_url(mpd_url, session=self.session).to_tracks(language=fallback_lang)
|
||||
|
||||
audio_tracks = self._get_audio_for_langs(mpd_url, available_langs)
|
||||
|
||||
# Map metadata labels
|
||||
meta_audio_map = {m.get("languageCode", "").split("-")[0]: m.get("label") for m in meta_audio_tracks}
|
||||
for a in audio_tracks:
|
||||
lang_code = getattr(a.language, "language", "en")
|
||||
a.name = meta_audio_map.get(lang_code, lang_code)
|
||||
a.is_original_lang = (lang_code == title.language.language)
|
||||
|
||||
base_tracks.audio = audio_tracks
|
||||
|
||||
# Subtitles
|
||||
subtitles = []
|
||||
for sub in entry.get("subtitles", []):
|
||||
if sub.get("format", "").lower() != "vtt":
|
||||
continue
|
||||
lang_code = sub.get("language", "en").replace("-", "_")
|
||||
try:
|
||||
lang = Language.get(lang_code)
|
||||
except Exception:
|
||||
lang = Language.get("en")
|
||||
subtitles.append(Subtitle(
|
||||
id_=f"{lang_code}:vtt",
|
||||
url=sub.get("url"),
|
||||
language=lang,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
name=lang.language_name(),
|
||||
))
|
||||
base_tracks.subtitles = subtitles
|
||||
|
||||
# DRM info
|
||||
drm = entry.get("drm", {}) or {}
|
||||
jwt = drm.get("jwtToken")
|
||||
lic_url = (drm.get("url") or "").strip()
|
||||
if jwt and lic_url:
|
||||
self._drm_cache[title.id] = (jwt, lic_url)
|
||||
|
||||
return base_tracks
|
||||
|
||||
def _hidive_get_drm_info(self, title: Title_T) -> tuple[str, str]:
|
||||
if title.id in self._drm_cache:
|
||||
return self._drm_cache[title.id]
|
||||
self.get_tracks(title)
|
||||
if title.id not in self._drm_cache:
|
||||
raise ValueError("DRM information not found for this title.")
|
||||
return self._drm_cache[title.id]
|
||||
|
||||
def _decode_hidive_license_payload(self, payload: bytes) -> bytes:
|
||||
text = payload.decode("utf-8", errors="ignore")
|
||||
prefix = "data:application/octet-stream;base64,"
|
||||
if text.startswith(prefix):
|
||||
b64 = text.split(",", 1)[1]
|
||||
return base64.b64decode(b64)
|
||||
return payload
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes | str | None:
|
||||
jwt_token, license_url = self._hidive_get_drm_info(title)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Accept": "*/*",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
|
||||
"Origin": "https://www.hidive.com",
|
||||
"Referer": "https://www.hidive.com/",
|
||||
"X-DRM-INFO": "eyJzeXN0ZW0iOiJjb20ud2lkZXZpbmUuYWxwaGEifQ==",
|
||||
}
|
||||
r = self.session.post(license_url, data=challenge, headers=headers, timeout=30)
|
||||
r.raise_for_status()
|
||||
return self._decode_hidive_license_payload(r.content)
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
@ -1,10 +0,0 @@
|
||||
x_api_key: "857a1e5d-e35e-4fdf-805b-a87b6f8364bf"
|
||||
x_app_var: "6.59.1.e16cdfd"
|
||||
|
||||
endpoints:
|
||||
init: "https://dce-frontoffice.imggaming.com/api/v1/init/"
|
||||
login: "https://dce-frontoffice.imggaming.com/api/v2/login"
|
||||
vod: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}?includePlaybackDetails=URL"
|
||||
adjacent: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}/adjacent"
|
||||
view: "https://dce-frontoffice.imggaming.com/api/v1/view" # Changed from season_view
|
||||
refresh: "https://dce-frontoffice.imggaming.com/api/v2/token/refresh"
|
||||
509
HPLA/__init__.py
509
HPLA/__init__.py
@ -1,509 +0,0 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
from typing import Optional, Union, Generator
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
from lxml import etree
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Movie, Movies, Title_T, Titles_T, Song, Album
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks, Audio
|
||||
|
||||
|
||||
class HPLA(Service):
|
||||
"""
|
||||
Service code for Hoopla Digital (https://www.hoopladigital.com)
|
||||
Version: 1.0.7
|
||||
|
||||
Authorization: Credentials (Email & Password)
|
||||
|
||||
Security:
|
||||
- SL2K/SL3K/L1/L3: SD/360p
|
||||
|
||||
They are using the license server of DRMToday with encoded streams from CastLabs.
|
||||
Supports movie and music (but kinda broken) at the moment
|
||||
Television kinda sucks since you need to borrow it one by one, idk why people would want this shit quality series anyways
|
||||
|
||||
Use full URL (for example - https://www.hoopladigital.com/movie/title-name/10979706) or content ID.
|
||||
"""
|
||||
|
||||
ALIASES = ("HPLA", "hoopla")
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?hoopladigital\.com/[^/]*/[^/]*/)?(?P<title_id>\d+)"
|
||||
GEOFENCE = ("US",)
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="HPLA", short_help="https://www.hoopladigital.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("-m", "--movie", is_flag=True, default=False, help="Specify if it's a movie")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return HPLA(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title, movie):
|
||||
super().__init__(ctx)
|
||||
self.title = title
|
||||
self.movie = movie
|
||||
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
|
||||
profile_name = ctx.parent.params.get("profile")
|
||||
self.profile = profile_name if profile_name else "default"
|
||||
|
||||
self.platform = self.config["platform"]["amazon"]
|
||||
|
||||
def authenticate(self, cookies: Optional[any] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise EnvironmentError("Service requires Credentials for Authentication.")
|
||||
|
||||
self.credential = credential
|
||||
|
||||
self.session.headers.update(self.platform["headers"])
|
||||
|
||||
cache_key = f"tokens_{self.profile}"
|
||||
|
||||
cache = self.cache.get(cache_key)
|
||||
|
||||
if cache and not cache.expired:
|
||||
cached_data = cache.data
|
||||
if isinstance(cached_data, dict) and cached_data.get("username") == credential.username:
|
||||
self.log.info("Using cached tokens")
|
||||
self._restore_from_cache(cached_data)
|
||||
return
|
||||
|
||||
self.log.info("Logging in...")
|
||||
self._do_login(credential)
|
||||
|
||||
self._cache_tokens(credential.username, cache_key)
|
||||
|
||||
def _restore_from_cache(self, cached_data: dict) -> None:
|
||||
"""Restore authentication state from cached data."""
|
||||
self.access_token = cached_data["access_token"]
|
||||
self.patron_id = cached_data["patron_id"]
|
||||
self.session.headers.update({
|
||||
"Authorization": f"Bearer {self.access_token}",
|
||||
"patron-id": self.patron_id,
|
||||
})
|
||||
|
||||
def _cache_tokens(self, username: str, cache_key: str) -> None:
|
||||
"""Cache the current authentication tokens."""
|
||||
cache = self.cache.get(cache_key)
|
||||
cache.set(
|
||||
data={
|
||||
"username": username,
|
||||
"access_token": self.access_token,
|
||||
"patron_id": self.patron_id,
|
||||
},
|
||||
expiration=3600
|
||||
)
|
||||
|
||||
def _is_music_mpd(self, mpd: etree._Element) -> bool:
|
||||
"""
|
||||
Detect if MPD represents a single-file music asset.
|
||||
"""
|
||||
adaptation_sets = mpd.findall(".//AdaptationSet")
|
||||
|
||||
for aset in adaptation_sets:
|
||||
if aset.get("contentType") == "video":
|
||||
return False
|
||||
|
||||
audio_reps = mpd.findall(".//AdaptationSet[@contentType='audio']/Representation")
|
||||
if len(audio_reps) != 1:
|
||||
return False
|
||||
|
||||
if mpd.find(".//SegmentTemplate") is not None:
|
||||
return False
|
||||
|
||||
return mpd.find(".//BaseURL") is not None
|
||||
|
||||
def _extract_music_audio(self, mpd: etree._Element, manifest_url: str) -> str:
|
||||
base = mpd.find(".//BaseURL")
|
||||
if base is None or not base.text:
|
||||
raise ValueError("Music MPD has no BaseURL")
|
||||
|
||||
return manifest_url.rsplit("/", 1)[0] + "/" + base.text
|
||||
|
||||
|
||||
def _do_login(self, credential: Credential) -> None:
|
||||
"""Perform full login flow."""
|
||||
# Step 1: Get Bearer Token
|
||||
login_response = self.session.post(
|
||||
url=self.config["endpoints"]["login"],
|
||||
data={
|
||||
"username": credential.username,
|
||||
"password": credential.password,
|
||||
},
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"}
|
||||
).json()
|
||||
|
||||
if login_response.get("tokenStatus") != "SUCCESS":
|
||||
raise EnvironmentError(f"Login failed: {login_response.get('tokenStatus', 'Unknown error')}")
|
||||
|
||||
self.access_token = login_response["token"]
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.access_token}"})
|
||||
|
||||
# Step 2: Get Patron ID
|
||||
self.log.info("Fetching Patron ID...")
|
||||
query = 'query { patron { id email } }'
|
||||
patron_data = self.session.post(
|
||||
url=self.config["endpoints"]["graphql"],
|
||||
json={"query": query},
|
||||
headers={"Content-Type": "application/json"}
|
||||
).json()
|
||||
|
||||
self.patron_id = patron_data["data"]["patron"]["id"]
|
||||
self.session.headers.update({"patron-id": self.patron_id})
|
||||
self.log.debug(f"Logged in as Patron ID: {self.patron_id}")
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
query = """
|
||||
query GetFilterSearchQuery($criteria: SearchCriteria!, $sort: Sort) {
|
||||
search(criteria: $criteria, sort: $sort) {
|
||||
hits {
|
||||
id
|
||||
title
|
||||
kind { name }
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
payload = {
|
||||
"operationName": "GetFilterSearchQuery",
|
||||
"variables": {
|
||||
"criteria": {
|
||||
"q": self.title,
|
||||
"availability": "ALL_TITLES",
|
||||
"pagination": {
|
||||
"page": 1,
|
||||
"pageSize": 48,
|
||||
},
|
||||
}
|
||||
},
|
||||
"query": query,
|
||||
}
|
||||
|
||||
resp = self.session.post(
|
||||
self.config["endpoints"]["graphql"],
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
).json()
|
||||
|
||||
hits = (
|
||||
resp
|
||||
.get("data", {})
|
||||
.get("search", {})
|
||||
.get("hits", [])
|
||||
)
|
||||
|
||||
for hit in hits:
|
||||
kind = hit["kind"]["name"]
|
||||
|
||||
label = {
|
||||
"MOVIE": "MOVIE",
|
||||
"TVSHOW": "SERIES",
|
||||
"MUSIC": "ALBUM",
|
||||
"AUDIOBOOK": "AUDIOBOOK",
|
||||
"EBOOK": "BOOK",
|
||||
"COMIC": "COMIC",
|
||||
}.get(kind, kind)
|
||||
|
||||
yield SearchResult(
|
||||
id_=hit["id"],
|
||||
title=hit["title"],
|
||||
label=label,
|
||||
url=f"https://www.hoopladigital.com/title/{hit['id']}",
|
||||
)
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
title_match = re.match(self.TITLE_RE, self.title)
|
||||
if not title_match:
|
||||
raise ValueError(f"Invalid title format: {self.title}")
|
||||
|
||||
content_id = title_match.group("title_id")
|
||||
|
||||
query = """
|
||||
query {
|
||||
contents(criteria:{contentIds:[%s]}) {
|
||||
contents {
|
||||
id
|
||||
title
|
||||
kind { id name }
|
||||
mediaKey
|
||||
circulation { id dueDate }
|
||||
year
|
||||
seconds
|
||||
primaryArtist { name }
|
||||
tracks {
|
||||
id
|
||||
mediaKey
|
||||
name
|
||||
seconds
|
||||
segmentNumber
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
""" % content_id
|
||||
|
||||
data = self.session.post(
|
||||
url=self.config["endpoints"]["graphql"],
|
||||
json={"query": query},
|
||||
headers={"Content-Type": "application/json"}
|
||||
).json()
|
||||
|
||||
contents = data.get("data", {}).get("contents", {}).get("contents", [])
|
||||
if not contents:
|
||||
raise ValueError("Content not found")
|
||||
|
||||
meta = contents[0]
|
||||
kind_name = meta["kind"]["name"]
|
||||
|
||||
if not meta.get("circulation"):
|
||||
raise ValueError("You must borrow this title on your Hoopla account before downloading.")
|
||||
|
||||
if kind_name == "MOVIE":
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=meta["id"],
|
||||
service=self.__class__,
|
||||
name=meta["title"],
|
||||
year=int(meta["year"]) if meta.get("year") else None,
|
||||
language=Language.get("en"),
|
||||
data={
|
||||
"mediaKey": meta["mediaKey"],
|
||||
"circulationId": meta["circulation"]["id"],
|
||||
"is_music": False,
|
||||
},
|
||||
)
|
||||
])
|
||||
|
||||
elif kind_name == "MUSIC":
|
||||
if not meta.get("tracks"):
|
||||
# Single-track album? Use main mediaKey
|
||||
songs = [
|
||||
Song(
|
||||
id_=meta["id"],
|
||||
service=self.__class__,
|
||||
name=meta["title"],
|
||||
artist=meta.get("primaryArtist", {}).get("name", "Unknown Artist"),
|
||||
album=meta["title"],
|
||||
track=1,
|
||||
disc=1,
|
||||
year=int(meta["year"]) if meta.get("year") else None,
|
||||
data={
|
||||
"mediaKey": meta["mediaKey"],
|
||||
"circulationId": meta["circulation"]["id"],
|
||||
"is_music": True,
|
||||
}
|
||||
)
|
||||
]
|
||||
else:
|
||||
songs = []
|
||||
for idx, track in enumerate(meta["tracks"], start=1):
|
||||
songs.append(
|
||||
Song(
|
||||
id_=track["id"],
|
||||
service=self.__class__,
|
||||
name=track["name"],
|
||||
artist=meta.get("primaryArtist", {}).get("name", "Unknown Artist"),
|
||||
album=meta["title"],
|
||||
track=track.get("segmentNumber", idx),
|
||||
disc=1,
|
||||
year=int(meta["year"]) if meta.get("year") else None,
|
||||
data={
|
||||
"mediaKey": track["mediaKey"], # ← Per-track mediaKey!
|
||||
"circulationId": meta["circulation"]["id"],
|
||||
"is_music": True,
|
||||
}
|
||||
)
|
||||
)
|
||||
return Album(songs)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported content type: {kind_name}. Only MOVIE and MUSIC are supported.")
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
media_key = title.data["mediaKey"]
|
||||
circulation_id = title.data["circulationId"]
|
||||
|
||||
# --- DRM bootstrap ---
|
||||
self.asset_id = self.session.get(
|
||||
self.config["endpoints"]["license_asset"].format(media_key=media_key)
|
||||
).text.strip()
|
||||
|
||||
self.auth_token = self.session.get(
|
||||
self.config["endpoints"]["license_token"].format(
|
||||
media_key=media_key,
|
||||
patron_id=self.patron_id,
|
||||
circulation_id=circulation_id,
|
||||
)
|
||||
).text.strip()
|
||||
|
||||
self.custom_data = self._extract_custom_data(self.auth_token)
|
||||
|
||||
manifest_url = self.config["endpoints"]["manifest"].format(media_key=media_key)
|
||||
mpd_xml = self.session.get(manifest_url).text
|
||||
mpd_xml = self._strip_namespaces(mpd_xml)
|
||||
mpd = etree.fromstring(mpd_xml.encode("utf-8"))
|
||||
|
||||
if self._is_music_mpd(mpd):
|
||||
self.log.info("Detected Hoopla music MPD")
|
||||
|
||||
audio_url = self._extract_music_audio(mpd, manifest_url)
|
||||
|
||||
tracks = Tracks()
|
||||
tracks.add(
|
||||
Audio(
|
||||
url=audio_url,
|
||||
drm=[],
|
||||
codec=Audio.Codec.AAC,
|
||||
language=title.language or "en",
|
||||
channels=2,
|
||||
)
|
||||
)
|
||||
return tracks
|
||||
|
||||
self.log.info("Detected Hoopla movie MPD")
|
||||
|
||||
tracks = DASH(mpd, manifest_url).to_tracks(
|
||||
language=title.language or Language.get("en")
|
||||
)
|
||||
|
||||
self._add_subtitles(tracks, manifest_url, media_key)
|
||||
return tracks
|
||||
|
||||
|
||||
def _strip_namespaces(self, xml_string: str) -> str:
|
||||
"""
|
||||
Strip namespace declarations and prefixes from XML string.
|
||||
This is needed because unshackle's DASH parser expects plain 'MPD' tag,
|
||||
not '{urn:mpeg:dash:schema:mpd:2011}MPD'.
|
||||
"""
|
||||
# Remove xmlns declarations (both default and prefixed)
|
||||
xml_string = re.sub(r'\s+xmlns(:\w+)?="[^"]+"', '', xml_string)
|
||||
|
||||
# Remove namespace prefixes from element tags (e.g., <cenc:pssh> -> <pssh>)
|
||||
xml_string = re.sub(r'<(/?)(\w+):', r'<\1', xml_string)
|
||||
|
||||
# Remove namespace prefixes from attributes (e.g., cenc:default_KID -> default_KID)
|
||||
xml_string = re.sub(r'\s+\w+:(\w+)=', r' \1=', xml_string)
|
||||
|
||||
# Remove urn: prefixed attributes entirely (e.g., urn:assetId="...")
|
||||
xml_string = re.sub(r'\s+urn:\w+="[^"]+"', '', xml_string)
|
||||
|
||||
return xml_string
|
||||
|
||||
def _extract_custom_data(self, jwt_token: str) -> str:
|
||||
"""Extract and encode optData from JWT for dt-custom-data header."""
|
||||
try:
|
||||
jwt_parts = jwt_token.split(".")
|
||||
padded_payload = jwt_parts[1] + "=" * (-len(jwt_parts[1]) % 4)
|
||||
payload_json = json.loads(base64.urlsafe_b64decode(padded_payload))
|
||||
|
||||
opt_data_str = payload_json.get("optData")
|
||||
if not opt_data_str:
|
||||
raise ValueError("optData not found in JWT")
|
||||
|
||||
return base64.b64encode(opt_data_str.encode("utf-8")).decode("utf-8")
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to process license token: {e}")
|
||||
|
||||
def _add_subtitles(self, tracks: Tracks, manifest_url: str, media_key: str) -> None:
|
||||
"""Add VTT subtitles from manifest if available."""
|
||||
base_url = manifest_url.rsplit('/', 1)[0]
|
||||
|
||||
vtt_patterns = [
|
||||
f"{base_url}/{media_key}-8784525650515056532-en/{media_key}-8784525650515056532-en.vtt",
|
||||
]
|
||||
|
||||
for vtt_url in vtt_patterns:
|
||||
try:
|
||||
response = self.session.head(vtt_url)
|
||||
if response.status_code == 200:
|
||||
tracks.add(
|
||||
Subtitle(
|
||||
id_=hashlib.md5(vtt_url.encode()).hexdigest()[0:6],
|
||||
url=vtt_url,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get("en"),
|
||||
sdh=True,
|
||||
)
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_service_certificate(self, **_) -> Optional[str]:
|
||||
return self.config.get("certificate")
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["license_wv"],
|
||||
params={
|
||||
"logRequestId": "unshackle",
|
||||
"assetId": self.asset_id,
|
||||
},
|
||||
headers={
|
||||
"dt-custom-data": self.custom_data,
|
||||
"x-dt-auth-token": self.auth_token,
|
||||
"Content-Type": "text/xml",
|
||||
},
|
||||
data=challenge,
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
self.log.error(f"License Error: {response.text}")
|
||||
raise ValueError(f"Failed to get Widevine license: {response.status_code}")
|
||||
|
||||
return response.json().get("license")
|
||||
|
||||
def get_playready_license(self, *, challenge: bytes | str, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not hasattr(self, 'auth_token') or not hasattr(self, 'custom_data'):
|
||||
raise RuntimeError("Authentication tokens missing. Call get_tracks() first.")
|
||||
|
||||
if isinstance(challenge, str):
|
||||
request_body = challenge.encode('utf-8')
|
||||
else:
|
||||
request_body = challenge
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "nl",
|
||||
"Cache-Control": "no-cache",
|
||||
"Content-Type": "text/xml; charset=utf-8",
|
||||
"dt-custom-data": self.custom_data,
|
||||
"x-dt-auth-token": self.auth_token,
|
||||
"soapaction": '"http://schemas.microsoft.com/DRM/2007/03/protocols/AcquireLicense"',
|
||||
"Origin": "https://www.hoopladigital.com",
|
||||
"Referer": "https://www.hoopladigital.com/",
|
||||
"Pragma": "no-cache",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0",
|
||||
}
|
||||
|
||||
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["license_pr"],
|
||||
data=request_body,
|
||||
headers=headers,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
self.log.error(f"PlayReady license failed: {response.status_code}")
|
||||
self.log.error(f"Response: {response.text[:1000]}")
|
||||
raise ValueError(f"PlayReady license failed: HTTP {response.status_code}")
|
||||
|
||||
return response.content
|
||||
@ -1,22 +0,0 @@
|
||||
endpoints:
|
||||
login: https://patron-api-gateway.hoopladigital.com/core/tokens
|
||||
graphql: https://patron-api-gateway.hoopladigital.com/graphql
|
||||
manifest: https://dash.hoopladigital.com/{media_key}/Manifest.mpd
|
||||
license_asset: https://patron-api-gateway.hoopladigital.com/license/castlabs/asset-id/{media_key}
|
||||
license_token: https://patron-api-gateway.hoopladigital.com/license/castlabs/upfront-auth-tokens/{media_key}/{patron_id}/{circulation_id}
|
||||
license_wv: https://lic.drmtoday.com/license-proxy-widevine/cenc/
|
||||
license_pr: https://lic.drmtoday.com/license-proxy-headerauth/drmtoday/RightsManager.asmx?persistent=false
|
||||
|
||||
platform:
|
||||
amazon:
|
||||
headers:
|
||||
app: AMAZON
|
||||
device-model: SM-A525F
|
||||
os: AMAZON
|
||||
User-Agent: Hoopla Amazon/4.84.1
|
||||
app-version: "4.84.1"
|
||||
os-version: "15"
|
||||
ws-api: "2.1"
|
||||
device-version: a52q
|
||||
hoopla-version: "4.84.1"
|
||||
Accept-Language: en-US
|
||||
663
KNPY/__init__.py
663
KNPY/__init__.py
@ -1,663 +0,0 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import List, Optional
|
||||
|
||||
from collections.abc import Generator
|
||||
import click
|
||||
import jwt
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Subtitle, Tracks
|
||||
|
||||
|
||||
class KNPY(Service):
|
||||
"""
|
||||
Service code for Kanopy (kanopy.com).
|
||||
Version: 1.1.0
|
||||
|
||||
Auth: Cookies (kapi_token) or Credential (username + password)
|
||||
Security: FHD@L3
|
||||
|
||||
Handles both Movies and Series (Playlists).
|
||||
Detects and stops for movies that require tickets.
|
||||
Caching included
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^https?://(?:www\.)?kanopy\.com/.+/(?P<id>\d+)$"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="KNPY", short_help="https://kanopy.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return KNPY(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
if not self.config:
|
||||
raise ValueError("KNPY configuration not found. Ensure config.yaml exists.")
|
||||
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
match = re.match(self.TITLE_RE, title)
|
||||
if match:
|
||||
self.content_id = match.group("id")
|
||||
else:
|
||||
self.content_id = None
|
||||
self.search_query = title
|
||||
|
||||
self.API_VERSION = self.config["client"]["api_version"]
|
||||
self.USER_AGENT = self.config["client"]["user_agent"]
|
||||
self.WIDEVINE_UA = self.config["client"]["widevine_ua"]
|
||||
|
||||
self.session.headers.update({
|
||||
"x-version": self.API_VERSION,
|
||||
"user-agent": self.USER_AGENT
|
||||
})
|
||||
|
||||
self._jwt = None
|
||||
self._visitor_id = None
|
||||
self._user_id = None
|
||||
self._domain_id = None
|
||||
self.widevine_license_url = None
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
"""
|
||||
Authenticate using either cookies or credentials.
|
||||
|
||||
Cookie-based auth: Requires 'kapi_token' cookie from browser.
|
||||
Credential-based auth: Requires email and password.
|
||||
"""
|
||||
|
||||
if cookies:
|
||||
jwt_token = None
|
||||
cookie_visitor_id = None
|
||||
cookie_uid = None
|
||||
|
||||
# Extract relevant cookies
|
||||
for cookie in cookies:
|
||||
if cookie.name == "kapi_token":
|
||||
jwt_token = cookie.value
|
||||
elif cookie.name == "visitor_id":
|
||||
cookie_visitor_id = cookie.value
|
||||
elif cookie.name == "uid":
|
||||
cookie_uid = cookie.value
|
||||
|
||||
if jwt_token:
|
||||
self.log.info("Attempting cookie-based authentication...")
|
||||
self._jwt = jwt_token
|
||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||
|
||||
try:
|
||||
# Decode JWT to extract user information
|
||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||
|
||||
# Check if token is expired
|
||||
exp_timestamp = decoded_jwt.get("exp")
|
||||
if exp_timestamp and exp_timestamp < datetime.now(timezone.utc).timestamp():
|
||||
self.log.warning("Cookie token has expired.")
|
||||
if credential:
|
||||
self.log.info("Falling back to credential-based authentication...")
|
||||
else:
|
||||
raise ValueError("Cookie token expired and no credentials provided.")
|
||||
else:
|
||||
# Extract user data from JWT
|
||||
jwt_data = decoded_jwt.get("data", {})
|
||||
self._user_id = jwt_data.get("uid") or cookie_uid
|
||||
self._visitor_id = jwt_data.get("visitor_id") or cookie_visitor_id
|
||||
|
||||
if not self._user_id:
|
||||
raise ValueError("Could not extract user_id from cookie token")
|
||||
|
||||
self.log.info(f"Successfully authenticated via cookies (user_id: {self._user_id})")
|
||||
|
||||
# Fetch user library memberships to get domain_id
|
||||
self._fetch_user_details()
|
||||
return
|
||||
|
||||
except jwt.DecodeError as e:
|
||||
self.log.error(f"Failed to decode cookie token: {e}")
|
||||
if credential:
|
||||
self.log.info("Falling back to credential-based authentication...")
|
||||
else:
|
||||
raise ValueError(f"Invalid kapi_token cookie: {e}")
|
||||
except KeyError as e:
|
||||
self.log.error(f"Missing expected field in cookie token: {e}")
|
||||
if credential:
|
||||
self.log.info("Falling back to credential-based authentication...")
|
||||
else:
|
||||
raise ValueError(f"Invalid kapi_token structure: {e}")
|
||||
else:
|
||||
self.log.info("No kapi_token found in cookies.")
|
||||
if not credential:
|
||||
raise ValueError("No kapi_token cookie found and no credentials provided.")
|
||||
self.log.info("Falling back to credential-based authentication...")
|
||||
|
||||
if not self._jwt: # Only proceed if not already authenticated via cookies
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise ValueError("Kanopy requires either cookies (with kapi_token) or email/password for authentication.")
|
||||
|
||||
# Check for cached credential-based token
|
||||
cache = self.cache.get("auth_token")
|
||||
|
||||
if cache and not cache.expired:
|
||||
cached_data = cache.data
|
||||
valid_token = None
|
||||
|
||||
if isinstance(cached_data, dict) and "token" in cached_data:
|
||||
if cached_data.get("username") == credential.username:
|
||||
valid_token = cached_data["token"]
|
||||
self.log.info("Using cached authentication token")
|
||||
else:
|
||||
self.log.info(f"Cached token belongs to '{cached_data.get('username')}', but logging in as '{credential.username}'. Re-authenticating.")
|
||||
|
||||
elif isinstance(cached_data, str):
|
||||
self.log.info("Found legacy cached token format. Re-authenticating to ensure correct user.")
|
||||
|
||||
if valid_token:
|
||||
self._jwt = valid_token
|
||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||
|
||||
if not self._user_id or not self._domain_id or not self._visitor_id:
|
||||
try:
|
||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||
self._user_id = decoded_jwt["data"]["uid"]
|
||||
self._visitor_id = decoded_jwt["data"]["visitor_id"]
|
||||
self.log.info(f"Extracted user_id and visitor_id from cached token.")
|
||||
self._fetch_user_details()
|
||||
return
|
||||
except (KeyError, jwt.DecodeError) as e:
|
||||
self.log.error(f"Could not decode cached token: {e}. Re-authenticating.")
|
||||
|
||||
# Perform fresh login with credentials
|
||||
self.log.info("Performing handshake to get visitor token...")
|
||||
r = self.session.get(self.config["endpoints"]["handshake"])
|
||||
r.raise_for_status()
|
||||
handshake_data = r.json()
|
||||
self._visitor_id = handshake_data["visitorId"]
|
||||
initial_jwt = handshake_data["jwt"]
|
||||
|
||||
self.log.info(f"Logging in as {credential.username}...")
|
||||
login_payload = {
|
||||
"credentialType": "email",
|
||||
"emailUser": {
|
||||
"email": credential.username,
|
||||
"password": credential.password
|
||||
}
|
||||
}
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["login"],
|
||||
json=login_payload,
|
||||
headers={"authorization": f"Bearer {initial_jwt}"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
login_data = r.json()
|
||||
self._jwt = login_data["jwt"]
|
||||
self._user_id = login_data["userId"]
|
||||
|
||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||
self.log.info(f"Successfully authenticated as {credential.username}")
|
||||
|
||||
self._fetch_user_details()
|
||||
|
||||
# Cache the token
|
||||
try:
|
||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||
exp_timestamp = decoded_jwt.get("exp")
|
||||
|
||||
cache_payload = {
|
||||
"token": self._jwt,
|
||||
"username": credential.username
|
||||
}
|
||||
|
||||
if exp_timestamp:
|
||||
expiration_in_seconds = int(exp_timestamp - datetime.now(timezone.utc).timestamp())
|
||||
self.log.info(f"Caching token for {expiration_in_seconds / 60:.2f} minutes.")
|
||||
cache.set(data=cache_payload, expiration=expiration_in_seconds)
|
||||
else:
|
||||
self.log.warning("JWT has no 'exp' claim, caching for 1 hour as a fallback.")
|
||||
cache.set(data=cache_payload, expiration=3600)
|
||||
except Exception as e:
|
||||
self.log.error(f"Failed to decode JWT for caching: {e}. Caching for 1 hour as a fallback.")
|
||||
cache.set(
|
||||
data={"token": self._jwt, "username": credential.username},
|
||||
expiration=3600
|
||||
)
|
||||
|
||||
def _fetch_user_details(self):
|
||||
"""Fetch user library memberships to determine the active domain_id."""
|
||||
self.log.info("Fetching user library memberships...")
|
||||
r = self.session.get(self.config["endpoints"]["memberships"].format(user_id=self._user_id))
|
||||
r.raise_for_status()
|
||||
memberships = r.json()
|
||||
|
||||
# Look for the default active membership
|
||||
for membership in memberships.get("list", []):
|
||||
if membership.get("status") == "active" and membership.get("isDefault", False):
|
||||
self._domain_id = str(membership["domainId"])
|
||||
self.log.info(f"Using default library domain: {membership.get('sitename', 'Unknown')} (ID: {self._domain_id})")
|
||||
return
|
||||
|
||||
# Fallback to first active membership
|
||||
for membership in memberships.get("list", []):
|
||||
if membership.get("status") == "active":
|
||||
self._domain_id = str(membership["domainId"])
|
||||
self.log.warning(f"No default library found. Using first active domain: {self._domain_id}")
|
||||
return
|
||||
|
||||
if memberships.get("list"):
|
||||
self._domain_id = str(memberships["list"][0]["domainId"])
|
||||
self.log.warning(f"No active library found. Using first available domain: {self._domain_id}")
|
||||
else:
|
||||
raise ValueError("No library memberships found for this user.")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if not self.content_id:
|
||||
raise ValueError("A content ID is required to get titles. Use a URL or run a search first.")
|
||||
if not self._domain_id:
|
||||
raise ValueError("Domain ID not set. Authentication may have failed.")
|
||||
|
||||
r = self.session.get(self.config["endpoints"]["video_info"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||
r.raise_for_status()
|
||||
content_data = r.json()
|
||||
|
||||
content_type = content_data.get("type")
|
||||
|
||||
def parse_lang(taxonomies_data: dict) -> Language:
|
||||
"""Parses language from the taxonomies dictionary."""
|
||||
try:
|
||||
langs = taxonomies_data.get("languages", [])
|
||||
if langs:
|
||||
lang_name = langs[0].get("name")
|
||||
if lang_name:
|
||||
return Language.find(lang_name)
|
||||
except (IndexError, AttributeError, TypeError):
|
||||
pass
|
||||
return Language.get("en") # Default to English
|
||||
|
||||
if content_type == "video":
|
||||
video_data = content_data["video"]
|
||||
movie = Movie(
|
||||
id_=str(video_data["videoId"]),
|
||||
service=self.__class__,
|
||||
name=video_data["title"],
|
||||
year=video_data.get("productionYear"),
|
||||
description=video_data.get("descriptionHtml", ""),
|
||||
language=parse_lang(video_data.get("taxonomies", {})),
|
||||
data=video_data,
|
||||
)
|
||||
return Movies([movie])
|
||||
|
||||
elif content_type == "playlist":
|
||||
playlist_data = content_data.get("playlist")
|
||||
if not playlist_data:
|
||||
raise ValueError("Could not find 'playlist' data dictionary.")
|
||||
|
||||
series_title = playlist_data["title"]
|
||||
series_year = playlist_data.get("productionYear")
|
||||
|
||||
season_match = re.search(r'(?:Season|S)\s*(\d+)', series_title, re.IGNORECASE)
|
||||
season_num = int(season_match.group(1)) if season_match else 1
|
||||
|
||||
r_items = self.session.get(self.config["endpoints"]["video_items"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||
r_items.raise_for_status()
|
||||
items_data = r_items.json()
|
||||
|
||||
episodes = []
|
||||
for i, item in enumerate(items_data.get("list", [])):
|
||||
if item.get("type") != "video":
|
||||
continue
|
||||
|
||||
video_data = item["video"]
|
||||
ep_num = i + 1
|
||||
|
||||
ep_title_str = video_data.get("title", "")
|
||||
ep_match = re.search(r'Ep(?:isode)?\.?\s*(\d+)', ep_title_str, re.IGNORECASE)
|
||||
if ep_match:
|
||||
ep_num = int(ep_match.group(1))
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=str(video_data["videoId"]),
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_num,
|
||||
number=ep_num,
|
||||
name=video_data["title"],
|
||||
description=video_data.get("descriptionHtml", ""),
|
||||
year=video_data.get("productionYear", series_year),
|
||||
language=parse_lang(video_data.get("taxonomies", {})),
|
||||
data=video_data,
|
||||
)
|
||||
)
|
||||
|
||||
series = Series(episodes)
|
||||
series.name = series_title
|
||||
series.description = playlist_data.get("descriptionHtml", "")
|
||||
series.year = series_year
|
||||
return series
|
||||
|
||||
elif content_type == "collection":
|
||||
collection_data = content_data.get("collection")
|
||||
if not collection_data:
|
||||
raise ValueError("Could not find 'collection' data dictionary.")
|
||||
|
||||
series_title_main = collection_data["title"]
|
||||
series_description_main = collection_data.get("descriptionHtml", "")
|
||||
series_year_main = collection_data.get("productionYear")
|
||||
|
||||
r_seasons = self.session.get(self.config["endpoints"]["video_items"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||
r_seasons.raise_for_status()
|
||||
seasons_data = r_seasons.json()
|
||||
|
||||
all_episodes = []
|
||||
self.log.info(f"Processing collection '{series_title_main}', found {len(seasons_data.get('list', []))} seasons.")
|
||||
|
||||
season_counter = 1
|
||||
for season_item in seasons_data.get("list", []):
|
||||
if season_item.get("type") != "playlist":
|
||||
self.log.warning(f"Skipping unexpected item of type '{season_item.get('type')}' in collection.")
|
||||
continue
|
||||
|
||||
season_playlist_data = season_item["playlist"]
|
||||
season_id = season_playlist_data["videoId"]
|
||||
season_title = season_playlist_data["title"]
|
||||
|
||||
self.log.info(f"Fetching episodes for season: {season_title}")
|
||||
|
||||
season_match = re.search(r'(?:Season|S)\s*(\d+)', season_title, re.IGNORECASE)
|
||||
if season_match:
|
||||
season_num = int(season_match.group(1))
|
||||
else:
|
||||
self.log.warning(f"Could not parse season number from '{season_title}'. Using sequential number {season_counter}.")
|
||||
season_num = season_counter
|
||||
season_counter += 1
|
||||
|
||||
r_episodes = self.session.get(self.config["endpoints"]["video_items"].format(video_id=season_id, domain_id=self._domain_id))
|
||||
r_episodes.raise_for_status()
|
||||
episodes_data = r_episodes.json()
|
||||
|
||||
for i, episode_item in enumerate(episodes_data.get("list", [])):
|
||||
if episode_item.get("type") != "video":
|
||||
continue
|
||||
|
||||
video_data = episode_item["video"]
|
||||
ep_num = i + 1
|
||||
|
||||
ep_title_str = video_data.get("title", "")
|
||||
ep_match = re.search(r'Ep(?:isode)?\.?\s*(\d+)', ep_title_str, re.IGNORECASE)
|
||||
if ep_match:
|
||||
ep_num = int(ep_match.group(1))
|
||||
|
||||
all_episodes.append(
|
||||
Episode(
|
||||
id_=str(video_data["videoId"]),
|
||||
service=self.__class__,
|
||||
title=series_title_main,
|
||||
season=season_num,
|
||||
number=ep_num,
|
||||
name=video_data["title"],
|
||||
description=video_data.get("descriptionHtml", ""),
|
||||
year=video_data.get("productionYear", series_year_main),
|
||||
language=parse_lang(video_data.get("taxonomies", {})),
|
||||
data=video_data,
|
||||
)
|
||||
)
|
||||
|
||||
if not all_episodes:
|
||||
self.log.error(f"Collection '{series_title_main}' did not yield any episodes. The structure may have changed.")
|
||||
return Series([])
|
||||
|
||||
series = Series(all_episodes)
|
||||
series.name = series_title_main
|
||||
series.description = series_description_main
|
||||
series.year = series_year_main
|
||||
return series
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported content type: {content_type}")
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
play_payload = {
|
||||
"videoId": int(title.id),
|
||||
"domainId": int(self._domain_id),
|
||||
"userId": int(self._user_id),
|
||||
"visitorId": self._visitor_id
|
||||
}
|
||||
|
||||
self.session.headers.setdefault("authorization", f"Bearer {self._jwt}")
|
||||
self.session.headers.setdefault("x-version", self.API_VERSION)
|
||||
self.session.headers.setdefault("user-agent", self.USER_AGENT)
|
||||
|
||||
r = self.session.post(self.config["endpoints"]["plays"], json=play_payload)
|
||||
response_json = None
|
||||
try:
|
||||
response_json = r.json()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if r.status_code == 403:
|
||||
if response_json and response_json.get("errorSubcode") == "playRegionRestricted":
|
||||
self.log.error("Kanopy reports: This video is not available in your country.")
|
||||
raise PermissionError(
|
||||
"Playback blocked by region restriction. Try connecting through a supported country or verify your library's access region."
|
||||
)
|
||||
else:
|
||||
self.log.error(f"Access forbidden (HTTP 403). Response: {response_json}")
|
||||
raise PermissionError("Kanopy denied access to this video. It may require a different library membership or authentication.")
|
||||
|
||||
r.raise_for_status()
|
||||
play_data = response_json or r.json()
|
||||
|
||||
manifest_url = None
|
||||
manifest_type = None
|
||||
drm_info = {}
|
||||
|
||||
for manifest in play_data.get("manifests", []):
|
||||
manifest_type_raw = manifest["manifestType"]
|
||||
url = manifest["url"].strip()
|
||||
|
||||
if url.startswith("/"):
|
||||
url = f"https://kanopy.com{url}"
|
||||
|
||||
drm_type = manifest.get("drmType")
|
||||
|
||||
if manifest_type_raw == "dash":
|
||||
manifest_url = url
|
||||
manifest_type = "dash"
|
||||
|
||||
if drm_type == "kanopyDrm":
|
||||
play_id = play_data.get("playId")
|
||||
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(
|
||||
license_id=f"{play_id}-0"
|
||||
)
|
||||
elif drm_type == "studioDrm":
|
||||
license_id = manifest.get("drmLicenseID", f"{play_data.get('playId')}-1")
|
||||
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(
|
||||
license_id=license_id
|
||||
)
|
||||
else:
|
||||
self.log.warning(f"Unknown DASH drmType: {drm_type}")
|
||||
self.widevine_license_url = None
|
||||
break
|
||||
|
||||
elif manifest_type_raw == "hls" and not manifest_url:
|
||||
manifest_url = url
|
||||
manifest_type = "hls"
|
||||
|
||||
if drm_type == "fairplay":
|
||||
self.log.warning("HLS with FairPlay DRM detected - not currently supported by this service")
|
||||
self.widevine_license_url = None
|
||||
drm_info["fairplay"] = True
|
||||
else:
|
||||
self.widevine_license_url = None
|
||||
drm_info["clear"] = True
|
||||
|
||||
if not manifest_url:
|
||||
raise ValueError("Could not find a DASH or HLS manifest for this title.")
|
||||
if manifest_type == "dash" and not self.widevine_license_url:
|
||||
raise ValueError("Could not construct Widevine license URL for DASH manifest.")
|
||||
|
||||
self.log.info(f"Fetching {manifest_type.upper()} manifest from: {manifest_url}")
|
||||
r = self.session.get(manifest_url)
|
||||
r.raise_for_status()
|
||||
|
||||
if manifest_type == "dash":
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
# Parse and clean the MPD to remove PlayReady ContentProtection
|
||||
ET.register_namespace('', 'urn:mpeg:dash:schema:mpd:2011')
|
||||
ET.register_namespace('cenc', 'urn:mpeg:cenc:2013')
|
||||
ET.register_namespace('mspr', 'urn:microsoft:playready')
|
||||
|
||||
root = ET.fromstring(r.text)
|
||||
|
||||
# Remove PlayReady ContentProtection elements
|
||||
for adaptation_set in root.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet'):
|
||||
for cp in list(adaptation_set.findall('{urn:mpeg:dash:schema:mpd:2011}ContentProtection')):
|
||||
scheme_id = cp.get('schemeIdUri', '')
|
||||
# Remove PlayReady but keep Widevine and CENC
|
||||
if '9a04f079-9840-4286-ab92-e65be0885f95' in scheme_id:
|
||||
adaptation_set.remove(cp)
|
||||
self.log.debug("Removed PlayReady ContentProtection element")
|
||||
|
||||
cleaned_mpd = ET.tostring(root, encoding='unicode')
|
||||
tracks = DASH.from_text(cleaned_mpd, url=manifest_url).to_tracks(language=title.language)
|
||||
|
||||
elif manifest_type == "hls":
|
||||
try:
|
||||
from unshackle.core.manifests import HLS
|
||||
tracks = HLS.from_text(r.text, url=manifest_url).to_tracks(language=title.language)
|
||||
self.log.info("Successfully parsed HLS manifest")
|
||||
except ImportError:
|
||||
self.log.error(
|
||||
"HLS manifest parser not available in unshackle.core.manifests. "
|
||||
"Ensure your unshackle installation supports HLS parsing."
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
self.log.error(f"Failed to parse HLS manifest: {e}")
|
||||
raise
|
||||
else:
|
||||
raise ValueError(f"Unsupported manifest type: {manifest_type}")
|
||||
|
||||
# Update session headers for CDN segment downloads
|
||||
self.session.headers.update({
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Origin": "https://www.kanopy.com",
|
||||
"Referer": "https://www.kanopy.com/",
|
||||
})
|
||||
# Remove API-specific headers that CDN doesn't need
|
||||
self.session.headers.pop("x-version", None)
|
||||
self.session.headers.pop("authorization", None)
|
||||
|
||||
# START: SUBTITLE FIX
|
||||
for caption_data in play_data.get("captions", []):
|
||||
lang = caption_data.get("language", "en")
|
||||
# Use the descriptive label for uniqueness, fallback to the language code
|
||||
label = caption_data.get("label", lang)
|
||||
|
||||
# Create a clean, repeatable "slug" from the label for the track ID
|
||||
slug = label.lower()
|
||||
slug = re.sub(r'[\s\[\]\(\)]+', '-', slug) # Replace spaces and brackets with hyphens
|
||||
slug = re.sub(r'[^a-z0-9-]', '', slug) # Remove other non-alphanumeric chars
|
||||
slug = slug.strip('-')
|
||||
|
||||
# Combine with lang code for a robust, unique ID
|
||||
track_id = f"caption-{lang}-{slug}"
|
||||
|
||||
for file_info in caption_data.get("files", []):
|
||||
if file_info.get("type") == "webvtt":
|
||||
tracks.add(Subtitle(
|
||||
id_=track_id,
|
||||
name=label, # Use the original label for display
|
||||
url=file_info["url"].strip(),
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get(lang)
|
||||
))
|
||||
# Found the file for this caption entry, move to the next one
|
||||
break
|
||||
# END: SUBTITLE FIX
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.widevine_license_url:
|
||||
raise ValueError("Widevine license URL was not set. Call get_tracks first.")
|
||||
|
||||
license_headers = {
|
||||
"Content-Type": "application/octet-stream",
|
||||
"User-Agent": self.WIDEVINE_UA,
|
||||
"Authorization": f"Bearer {self._jwt}",
|
||||
"X-Version": self.API_VERSION
|
||||
}
|
||||
|
||||
r = self.session.post(
|
||||
self.widevine_license_url,
|
||||
data=challenge,
|
||||
headers=license_headers
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
if not hasattr(self, 'search_query') or not self.search_query:
|
||||
self.log.error("Search query not set. Cannot search.")
|
||||
return
|
||||
|
||||
self.log.info(f"Searching for '{self.search_query}'...")
|
||||
|
||||
# Ensure we have a domain ID (Library ID) before searching
|
||||
if not self._domain_id:
|
||||
self._fetch_user_details()
|
||||
|
||||
params = {
|
||||
"query": self.search_query,
|
||||
"sort": "relevance",
|
||||
"domainId": self._domain_id,
|
||||
"isKids": "false",
|
||||
"page": 0,
|
||||
"perPage": 40
|
||||
}
|
||||
|
||||
r = self.session.get(self.config["endpoints"]["search"], params=params)
|
||||
r.raise_for_status()
|
||||
search_data = r.json()
|
||||
|
||||
# The API returns results in a "list" key
|
||||
results_list = search_data.get("list", [])
|
||||
|
||||
if not results_list:
|
||||
self.log.warning(f"No results found for '{self.search_query}'")
|
||||
return
|
||||
|
||||
for item in results_list:
|
||||
# Kanopy search results use 'videoId' as the unique identifier
|
||||
video_id = item.get("videoId")
|
||||
if not video_id:
|
||||
continue
|
||||
|
||||
title = item.get("title", "Unknown Title")
|
||||
|
||||
yield SearchResult(
|
||||
id_=str(video_id),
|
||||
title=title,
|
||||
label="VIDEO/SERIES",
|
||||
url=f"https://www.kanopy.com/video/{video_id}"
|
||||
)
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list:
|
||||
return []
|
||||
@ -1,15 +0,0 @@
|
||||
client:
|
||||
api_version: "Android/com.kanopy/6.21.0/952 (SM-A525F; Android 15)"
|
||||
user_agent: "okhttp/5.2.1"
|
||||
widevine_ua: "KanopyApplication/6.21.0 (Linux;Android 15) AndroidXMedia3/1.8.0"
|
||||
|
||||
endpoints:
|
||||
handshake: "https://kanopy.com/kapi/handshake"
|
||||
login: "https://kanopy.com/kapi/login"
|
||||
memberships: "https://kanopy.com/kapi/memberships?userId={user_id}"
|
||||
video_info: "https://kanopy.com/kapi/videos/{video_id}?domainId={domain_id}"
|
||||
video_items: "https://kanopy.com/kapi/videos/{video_id}/items?domainId={domain_id}"
|
||||
search: "https://kanopy.com/kapi/search/videos"
|
||||
plays: "https://kanopy.com/kapi/plays"
|
||||
access_expires_in: "https://kanopy.com/kapi/users/{user_id}/history/videos/{video_id}/access_expires_in?domainId={domain_id}"
|
||||
widevine_license: "https://kanopy.com/kapi/licenses/widevine/{license_id}"
|
||||
309
KOCW/__init__.py
309
KOCW/__init__.py
@ -1,309 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.titles import Episode, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Subtitle, Tracks
|
||||
from unshackle.core.utilities import is_close_match
|
||||
import uuid
|
||||
import hashlib
|
||||
|
||||
class KOCW(Service):
|
||||
"""
|
||||
Service code for Kocowa Plus (kocowa.com).
|
||||
Version: 1.0.0
|
||||
|
||||
Auth: Credential (username + password)
|
||||
Security: FHD@L3
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?kocowa\.com/[^/]+/season/)?(?P<title_id>\d+)"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="kocw", short_help="https://www.kocowa.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("--extras", is_flag=True, default=False, help="Include teasers/extras")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return KOCW(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str, extras: bool = False):
|
||||
super().__init__(ctx)
|
||||
match = re.match(self.TITLE_RE, title)
|
||||
if match:
|
||||
self.title_id = match.group("title_id")
|
||||
else:
|
||||
self.title_id = title # fallback to use as search keyword
|
||||
self.include_extras = extras
|
||||
self.brightcove_account_id = None
|
||||
self.brightcove_pk = None
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
if not credential:
|
||||
raise ValueError("KOWP requires username and password")
|
||||
|
||||
|
||||
email = credential.username.lower().strip()
|
||||
uuid_seed = hashlib.md5(email.encode()).digest()
|
||||
fake_uuid = str(uuid.UUID(bytes=uuid_seed[:16]))
|
||||
|
||||
device_id = f"a_{fake_uuid}_{email}"
|
||||
push_token = "fkiTs_a0SAaMYx957n-qA-:APA91bFb39IjJd_iA5bVmh-fjvaUKonvKDWw1PfKKcdpkSXanj0Jlevv_QlMPPD5ZykAQE4ELa3bs6p-Gnmz0R54U-B1o1ukBPLQEDLDdM3hU2ozZIRiy9I"
|
||||
|
||||
payload = {
|
||||
"username": credential.username,
|
||||
"password": credential.password,
|
||||
"device_id": device_id,
|
||||
"device_type": "mobile",
|
||||
"device_model": "SM-A525F",
|
||||
"device_version": "Android 15",
|
||||
"push_token": None,
|
||||
"app_version": "v4.0.11",
|
||||
}
|
||||
|
||||
self.log.debug(f"Authenticating with device_id: {device_id}")
|
||||
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["login"],
|
||||
json=payload,
|
||||
headers={"Authorization": "anonymous", "Origin": "https://www.kocowa.com"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
res = r.json()
|
||||
if res.get("code") != "0000":
|
||||
raise PermissionError(f"Login failed: {res.get('message')}")
|
||||
|
||||
self.access_token = res["object"]["access_token"]
|
||||
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["middleware_auth"],
|
||||
json={"token": f"wA-Auth.{self.access_token}"},
|
||||
headers={"Origin": "https://www.kocowa.com"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
self.middleware_token = r.json()["token"]
|
||||
|
||||
self._fetch_brightcove_config()
|
||||
|
||||
def _fetch_brightcove_config(self):
|
||||
"""Fetch Brightcove account_id and policy_key from Kocowa's public config endpoint."""
|
||||
try:
|
||||
r = self.session.get(
|
||||
"https://middleware.bcmw.kocowa.com/api/config",
|
||||
headers={
|
||||
"Origin": "https://www.kocowa.com",
|
||||
"Referer": "https://www.kocowa.com/",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36 Edg/142.0.0.0"
|
||||
}
|
||||
)
|
||||
r.raise_for_status()
|
||||
config = r.json()
|
||||
|
||||
self.brightcove_account_id = config.get("VC_ACCOUNT_ID")
|
||||
self.brightcove_pk = config.get("BCOV_POLICY_KEY")
|
||||
|
||||
if not self.brightcove_account_id:
|
||||
raise ValueError("VC_ACCOUNT_ID missing in /api/config response")
|
||||
if not self.brightcove_pk:
|
||||
raise ValueError("BCOV_POLICY_KEY missing in /api/config response")
|
||||
|
||||
self.log.info(f"Brightcove config loaded: account_id={self.brightcove_account_id}")
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to fetch or parse Brightcove config: {e}")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
all_episodes = []
|
||||
offset = 0
|
||||
limit = 20
|
||||
series_title = None # Store the title from the first request
|
||||
|
||||
while True:
|
||||
url = self.config["endpoints"]["metadata"].format(title_id=self.title_id)
|
||||
sep = "&" if "?" in url else "?"
|
||||
url += f"{sep}offset={offset}&limit={limit}"
|
||||
|
||||
r = self.session.get(
|
||||
url,
|
||||
headers={"Authorization": self.access_token, "Origin": "https://www.kocowa.com"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
data = r.json()["object"]
|
||||
|
||||
# Extract the series title only from the very first page
|
||||
if series_title is None and "meta" in data:
|
||||
series_title = data["meta"]["title"]["en"]
|
||||
|
||||
page_objects = data.get("next_episodes", {}).get("objects", [])
|
||||
if not page_objects:
|
||||
break
|
||||
|
||||
for ep in page_objects:
|
||||
is_episode = ep.get("detail_type") == "episode"
|
||||
is_extra = ep.get("detail_type") in ("teaser", "extra")
|
||||
if is_episode or (self.include_extras and is_extra):
|
||||
all_episodes.append(ep)
|
||||
|
||||
offset += limit
|
||||
total = data.get("next_episodes", {}).get("total_count", 0)
|
||||
if len(all_episodes) >= total or len(page_objects) < limit:
|
||||
break
|
||||
|
||||
# If we never got the series title, exit with an error
|
||||
if series_title is None:
|
||||
raise ValueError("Could not retrieve series metadata to get the title.")
|
||||
|
||||
episodes = []
|
||||
for ep in all_episodes:
|
||||
meta = ep["meta"]
|
||||
ep_type = "Episode" if ep["detail_type"] == "episode" else ep["detail_type"].capitalize()
|
||||
ep_num = meta.get("episode_number", 0)
|
||||
title = meta["title"].get("en") or f"{ep_type} {ep_num}"
|
||||
desc = meta["description"].get("en") or ""
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=str(ep["id"]),
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=meta.get("season_number", 1),
|
||||
number=ep_num,
|
||||
name=title,
|
||||
description=desc,
|
||||
year=None,
|
||||
language=Language.get("en"),
|
||||
data=ep,
|
||||
)
|
||||
)
|
||||
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
# Authorize playback
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["authorize"].format(episode_id=title.id),
|
||||
headers={"Authorization": f"Bearer {self.middleware_token}"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
auth_data = r.json()
|
||||
if not auth_data.get("Success"):
|
||||
raise PermissionError("Playback authorization failed")
|
||||
self.playback_token = auth_data["token"]
|
||||
|
||||
# Fetch Brightcove manifest
|
||||
manifest_url = (
|
||||
f"https://edge.api.brightcove.com/playback/v1/accounts/{self.brightcove_account_id}/videos/ref:{title.id}"
|
||||
)
|
||||
r = self.session.get(
|
||||
manifest_url,
|
||||
headers={"Accept": f"application/json;pk={self.brightcove_pk}"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
manifest = r.json()
|
||||
|
||||
# Get DASH URL + Widevine license
|
||||
dash_url = widevine_url = None
|
||||
for src in manifest.get("sources", []):
|
||||
if src.get("type") == "application/dash+xml":
|
||||
dash_url = src["src"]
|
||||
widevine_url = (
|
||||
src.get("key_systems", {})
|
||||
.get("com.widevine.alpha", {})
|
||||
.get("license_url")
|
||||
)
|
||||
if dash_url and widevine_url:
|
||||
break
|
||||
|
||||
if not dash_url or not widevine_url:
|
||||
raise ValueError("No Widevine DASH stream found")
|
||||
|
||||
self.widevine_license_url = widevine_url
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
for sub in manifest.get("text_tracks", []):
|
||||
srclang = sub.get("srclang")
|
||||
if not srclang or srclang == "thumbnails":
|
||||
continue
|
||||
|
||||
subtitle_track = Subtitle(
|
||||
id_=sub["id"],
|
||||
url=sub["src"],
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get(srclang),
|
||||
sdh=True, # Kocowa subs are SDH - mark them as such
|
||||
forced=False,
|
||||
)
|
||||
tracks.add(subtitle_track)
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
r = self.session.post(
|
||||
self.widevine_license_url,
|
||||
data=challenge,
|
||||
headers={
|
||||
"BCOV-Auth": self.playback_token,
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Origin": "https://www.kocowa.com",
|
||||
"Referer": "https://www.kocowa.com/",
|
||||
}
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def search(self) -> List[SearchResult]:
|
||||
url = "https://prod-fms.kocowa.com/api/v01/fe/gks/autocomplete"
|
||||
params = {
|
||||
"search_category": "All",
|
||||
"search_input": self.title_id,
|
||||
"include_webtoon": "true",
|
||||
}
|
||||
|
||||
r = self.session.get(
|
||||
url,
|
||||
params=params,
|
||||
headers={
|
||||
"Authorization": self.access_token,
|
||||
"Origin": "https://www.kocowa.com ",
|
||||
"Referer": "https://www.kocowa.com/ ",
|
||||
}
|
||||
)
|
||||
r.raise_for_status()
|
||||
response = r.json()
|
||||
contents = response.get("object", {}).get("contents", [])
|
||||
|
||||
results = []
|
||||
for item in contents:
|
||||
if item.get("detail_type") != "season":
|
||||
continue
|
||||
|
||||
meta = item["meta"]
|
||||
title_en = meta["title"].get("en") or "[No Title]"
|
||||
description_en = meta["description"].get("en") or ""
|
||||
show_id = str(item["id"])
|
||||
|
||||
results.append(
|
||||
SearchResult(
|
||||
id_=show_id,
|
||||
title=title_en,
|
||||
description=description_en,
|
||||
label="season",
|
||||
url=f"https://www.kocowa.com/en_us/season/{show_id}/"
|
||||
)
|
||||
)
|
||||
return results
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list:
|
||||
return []
|
||||
@ -1,5 +0,0 @@
|
||||
endpoints:
|
||||
login: "https://prod-sgwv3.kocowa.com/api/v01/user/signin"
|
||||
middleware_auth: "https://middleware.bcmw.kocowa.com/authenticate-user"
|
||||
metadata: "https://prod-fms.kocowa.com/api/v01/fe/content/get?id={title_id}"
|
||||
authorize: "https://middleware.bcmw.kocowa.com/api/playback/authorize/{episode_id}"
|
||||
452
MUBI/__init__.py
452
MUBI/__init__.py
@ -1,452 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Generator
|
||||
from langcodes import Language
|
||||
import base64
|
||||
import click
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Title_T, Titles_T, Series
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
from unshackle.core.search_result import SearchResult
|
||||
|
||||
class MUBI(Service):
|
||||
"""
|
||||
Service code for MUBI (mubi.com)
|
||||
Version: 1.2.1 (Cookie-only + Auto-UHD + Search)
|
||||
Authorization: Cookies ONLY (lt token + _mubi_session)
|
||||
Security: UHD @ L3/SL2K (Widevine/PlayReady)
|
||||
Supports:
|
||||
• Series ↦ https://mubi.com/en/nl/series/twin-peaks
|
||||
• Movies ↦ https://mubi.com/en/nl/films/the-substance
|
||||
"""
|
||||
SERIES_TITLE_RE = r"^https?://(?:www\.)?mubi\.com(?:/[^/]+)*?/series/(?P<series_slug>[^/]+)(?:/season/(?P<season_slug>[^/]+))?$"
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?mubi\.com)(?:/[^/]+)*?/films/(?P<slug>[^/?#]+)$"
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="MUBI", short_help="https://mubi.com ")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("-c", "--country", default=None, type=str,
|
||||
help="With VPN set country code other than the one assigned to the account.")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return MUBI(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str, country: str):
|
||||
super().__init__(ctx)
|
||||
self.raw_title = title # Store raw input for search mode
|
||||
self.country = country
|
||||
|
||||
# Only parse as URL if it matches MUBI patterns
|
||||
m_film = re.match(self.TITLE_RE, title)
|
||||
m_series = re.match(self.SERIES_TITLE_RE, title)
|
||||
|
||||
self.is_series = bool(m_series)
|
||||
self.slug = m_film.group("slug") if m_film else None
|
||||
self.series_slug = m_series.group("series_slug") if m_series else None
|
||||
self.season_slug = m_series.group("season_slug") if m_series else None
|
||||
|
||||
# Core state
|
||||
self.film_id: Optional[int] = None
|
||||
self.lt_token: Optional[str] = None
|
||||
self.session_token: Optional[str] = None
|
||||
self.user_id: Optional[int] = None
|
||||
self.country_code: Optional[str] = None
|
||||
self.set_country_code: Optional[str] = country
|
||||
self.anonymous_user_id: Optional[str] = None
|
||||
self.default_country: Optional[str] = None
|
||||
self.reels_data: Optional[list] = None
|
||||
|
||||
# ALWAYS enable UHD/HEVC path - no user flag required
|
||||
self.uhd = True
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config for MUBI.")
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
if not cookies:
|
||||
raise PermissionError("MUBI requires login cookies (lt + _mubi_session). Credentials login is not supported.")
|
||||
|
||||
# IP geolocation for country detection
|
||||
try:
|
||||
r_ip = self.session.get(self.config["endpoints"]["ip_geolocation"], timeout=5)
|
||||
r_ip.raise_for_status()
|
||||
ip_data = r_ip.json()
|
||||
if ip_data.get("country"):
|
||||
self.default_country = ip_data["country"]
|
||||
self.log.debug(f"Detected country from IP: {self.default_country}")
|
||||
else:
|
||||
self.log.warning("IP geolocation response did not contain a country code.")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to fetch IP geolocation: {e}")
|
||||
|
||||
# Extract essential tokens from cookies
|
||||
lt_cookie = next((c for c in cookies if c.name == "lt"), None)
|
||||
session_cookie = next((c for c in cookies if c.name == "_mubi_session"), None)
|
||||
snow_id_cookie = next((c for c in cookies if c.name == "_snow_id.c006"), None)
|
||||
|
||||
if not lt_cookie:
|
||||
raise PermissionError("Missing 'lt' cookie (Bearer token).")
|
||||
if not session_cookie:
|
||||
raise PermissionError("Missing '_mubi_session' cookie.")
|
||||
|
||||
self.lt_token = lt_cookie.value
|
||||
self.session_token = session_cookie.value
|
||||
|
||||
# Extract or generate anonymous_user_id
|
||||
if snow_id_cookie and "." in snow_id_cookie.value:
|
||||
self.anonymous_user_id = snow_id_cookie.value.split(".")[0]
|
||||
else:
|
||||
self.anonymous_user_id = str(uuid.uuid4())
|
||||
self.log.warning(f"No _snow_id.c006 cookie found — generated new anonymous_user_id: {self.anonymous_user_id}")
|
||||
|
||||
# Configure session headers for UHD access
|
||||
base_headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Linux; Android 13; SM-G975F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Mobile Safari/537.36",
|
||||
"Origin": "https://mubi.com",
|
||||
"Referer": "https://mubi.com/",
|
||||
"CLIENT": "web",
|
||||
"Client-Accept-Video-Codecs": "h265,vp9,h264",
|
||||
"Client-Accept-Audio-Codecs": "eac3,ac3,aac",
|
||||
"Authorization": f"Bearer {self.lt_token}",
|
||||
"ANONYMOUS_USER_ID": self.anonymous_user_id,
|
||||
"Client-Country": self.default_country,
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
self.session.headers.update(base_headers)
|
||||
|
||||
# Fetch account info
|
||||
r_account = self.session.get(self.config["endpoints"]["account"])
|
||||
if not r_account.ok:
|
||||
raise PermissionError(f"Failed to fetch MUBI account: {r_account.status_code} {r_account.text}")
|
||||
|
||||
account_data = r_account.json()
|
||||
self.user_id = account_data.get("id")
|
||||
self.country_code = (account_data.get("country") or {}).get("code", "NL")
|
||||
|
||||
if self.set_country_code is not None:
|
||||
self.country_code = self.set_country_code.upper()
|
||||
|
||||
self.session.headers["Client-Country"] = self.country_code
|
||||
self.GEOFENCE = (self.country_code,)
|
||||
self._bind_anonymous_user()
|
||||
|
||||
self.log.info(
|
||||
f"Authenticated as user {self.user_id}, "
|
||||
f"country: {self.country_code}, "
|
||||
f"anonymous_id: {self.anonymous_user_id}"
|
||||
)
|
||||
|
||||
def _bind_anonymous_user(self):
|
||||
try:
|
||||
r = self.session.put(
|
||||
self.config["endpoints"]["current_user"],
|
||||
json={"anonymous_user_uuid": self.anonymous_user_id},
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
if r.ok:
|
||||
self.log.debug("Anonymous user ID successfully bound to account.")
|
||||
else:
|
||||
self.log.warning(f"Failed to bind anonymous_user_uuid: {r.status_code}")
|
||||
except Exception as e:
|
||||
self.log.warning(f"Exception while binding anonymous_user_uuid: {e}")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if self.is_series:
|
||||
return self._get_series_titles()
|
||||
else:
|
||||
return self._get_film_title()
|
||||
|
||||
def _get_film_title(self) -> Movies:
|
||||
url = self.config["endpoints"]["film_by_slug"].format(slug=self.slug)
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
self.film_id = data["id"]
|
||||
|
||||
# Fetch reels for language detection and subtitle names
|
||||
url_reels = self.config["endpoints"]["reels"].format(film_id=self.film_id)
|
||||
r_reels = self.session.get(url_reels)
|
||||
r_reels.raise_for_status()
|
||||
self.reels_data = r_reels.json()
|
||||
|
||||
# Detect original language from first audio track
|
||||
original_language_code = "en"
|
||||
if self.reels_data and self.reels_data[0].get("audio_tracks"):
|
||||
first_audio_track = self.reels_data[0]["audio_tracks"][0]
|
||||
if "language_code" in first_audio_track:
|
||||
original_language_code = first_audio_track["language_code"]
|
||||
self.log.debug(f"Detected original language from reels: '{original_language_code}'")
|
||||
|
||||
description = (
|
||||
data.get("default_editorial_html", "")
|
||||
.replace("<p>", "").replace("</p>", "").replace("<em>", "").replace("</em>", "").strip()
|
||||
)
|
||||
year = data.get("year")
|
||||
name = data.get("title", "Unknown")
|
||||
|
||||
movie = Movie(
|
||||
id_=self.film_id,
|
||||
service=self.__class__,
|
||||
name=name,
|
||||
year=year,
|
||||
description=description,
|
||||
language=Language.get(original_language_code),
|
||||
data=data,
|
||||
)
|
||||
return Movies([movie])
|
||||
|
||||
def _get_series_titles(self) -> Titles_T:
|
||||
series_url = self.config["endpoints"]["series"].format(series_slug=self.series_slug)
|
||||
r_series = self.session.get(series_url)
|
||||
r_series.raise_for_status()
|
||||
series_data = r_series.json()
|
||||
episodes = []
|
||||
|
||||
if self.season_slug:
|
||||
eps_url = self.config["endpoints"]["season_episodes"].format(
|
||||
series_slug=self.series_slug,
|
||||
season_slug=self.season_slug
|
||||
)
|
||||
r_eps = self.session.get(eps_url)
|
||||
if r_eps.status_code == 404:
|
||||
raise ValueError(f"Season '{self.season_slug}' not found.")
|
||||
r_eps.raise_for_status()
|
||||
episodes_data = r_eps.json().get("episodes", [])
|
||||
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
||||
else:
|
||||
seasons = series_data.get("seasons", [])
|
||||
if not seasons:
|
||||
raise ValueError("No seasons found for this series.")
|
||||
for season in seasons:
|
||||
season_slug = season["slug"]
|
||||
eps_url = self.config["endpoints"]["season_episodes"].format(
|
||||
series_slug=self.series_slug,
|
||||
season_slug=season_slug
|
||||
)
|
||||
self.log.debug(f"Fetching episodes for season: {season_slug}")
|
||||
r_eps = self.session.get(eps_url)
|
||||
if r_eps.status_code == 404:
|
||||
self.log.info(f"Season '{season_slug}' not available, skipping.")
|
||||
continue
|
||||
r_eps.raise_for_status()
|
||||
episodes_data = r_eps.json().get("episodes", [])
|
||||
if not episodes_data:
|
||||
self.log.info(f"No episodes found in season '{season_slug}'.")
|
||||
continue
|
||||
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
||||
|
||||
return Series(sorted(episodes, key=lambda x: (x.season, x.number)))
|
||||
|
||||
def _add_episodes_to_list(self, episodes_list: list, episodes_data: list, series_data: dict):
|
||||
for ep in episodes_data:
|
||||
playback_langs = ep.get("consumable", {}).get("playback_languages", {})
|
||||
audio_langs = playback_langs.get("audio_options", ["English"])
|
||||
lang_code = audio_langs[0].split()[0].lower() if audio_langs else "en"
|
||||
try:
|
||||
detected_lang = Language.get(lang_code)
|
||||
except:
|
||||
detected_lang = Language.get("en")
|
||||
episodes_list.append(Episode(
|
||||
id_=ep["id"],
|
||||
service=self.__class__,
|
||||
title=series_data["title"],
|
||||
season=ep["episode"]["season_number"],
|
||||
number=ep["episode"]["number"],
|
||||
name=ep["title"],
|
||||
description=ep.get("short_synopsis", ""),
|
||||
language=detected_lang,
|
||||
data=ep,
|
||||
))
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
film_id = getattr(title, "id", None)
|
||||
if not film_id:
|
||||
raise RuntimeError("Title ID not found.")
|
||||
|
||||
# Initiate viewing session
|
||||
url_view = self.config["endpoints"]["initiate_viewing"].format(film_id=film_id)
|
||||
r_view = self.session.post(url_view, json={}, headers={"Content-Type": "application/json"})
|
||||
r_view.raise_for_status()
|
||||
view_data = r_view.json()
|
||||
reel_id = view_data["reel_id"]
|
||||
|
||||
# Fetch reels data if not cached
|
||||
if not self.film_id:
|
||||
self.film_id = film_id
|
||||
if not self.reels_data:
|
||||
url_reels = self.config["endpoints"]["reels"].format(film_id=film_id)
|
||||
r_reels = self.session.get(url_reels)
|
||||
r_reels.raise_for_status()
|
||||
self.reels_data = r_reels.json()
|
||||
reels = self.reels_data
|
||||
text_tracks_reel = reels[0]["text_tracks"]
|
||||
reel = next((r for r in reels if r["id"] == reel_id), reels[0])
|
||||
|
||||
# Get secure streaming URL
|
||||
url_secure = self.config["endpoints"]["secure_url"].format(film_id=film_id)
|
||||
r_secure = self.session.get(url_secure)
|
||||
r_secure.raise_for_status()
|
||||
secure_data = r_secure.json()
|
||||
|
||||
# Find DASH manifest URL
|
||||
manifest_url = None
|
||||
for entry in secure_data.get("urls", []):
|
||||
if entry.get("content_type") == "application/dash+xml":
|
||||
manifest_url = entry["src"]
|
||||
break
|
||||
if not manifest_url:
|
||||
raise ValueError("No DASH manifest URL found.")
|
||||
|
||||
manifest_url = re.sub(
|
||||
r'/default/ver1\.AVC1\.[^/]*\.mpd',
|
||||
'/default/ver1.hevc.ex-vtt.mpd',
|
||||
manifest_url
|
||||
)
|
||||
# Fallback for non-AVC URLs
|
||||
if '/default/ver1.hevc.ex-vtt.mpd' not in manifest_url:
|
||||
manifest_url = re.sub(
|
||||
r'/default/[^/]*\.mpd',
|
||||
'/default/ver1.hevc.ex-vtt.mpd',
|
||||
manifest_url
|
||||
)
|
||||
|
||||
# Parse DASH manifest
|
||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Add enhanced subtitles (forced/SDH detection)
|
||||
subtitles = []
|
||||
for sub in secure_data.get("text_track_urls", []):
|
||||
lang_code = sub.get("language_code", "und")
|
||||
vtt_url = sub.get("url")
|
||||
role = sub.get("role")
|
||||
forced = False
|
||||
sdh = False
|
||||
if not vtt_url:
|
||||
continue
|
||||
try:
|
||||
disp_name = (next(filter(lambda x: x['id'] == sub["id"], text_tracks_reel), None))["display_name"]
|
||||
except:
|
||||
disp_name = sub.get("role", "") + " " + lang_code.upper()
|
||||
if role == "forced-subtitle":
|
||||
forced = True
|
||||
if role == "caption":
|
||||
sdh = True
|
||||
if "(SDH)" in disp_name:
|
||||
disp_name = disp_name.replace("(SDH)", "").strip()
|
||||
is_original = lang_code == title.language.language
|
||||
subtitles.append(
|
||||
Subtitle(
|
||||
id_=sub["id"],
|
||||
url=vtt_url,
|
||||
language=Language.get(lang_code),
|
||||
is_original_lang=is_original,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
name=disp_name,
|
||||
forced=forced,
|
||||
sdh=sdh,
|
||||
)
|
||||
)
|
||||
tracks.subtitles = subtitles
|
||||
return tracks
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
"""
|
||||
Search MUBI films using official API endpoint.
|
||||
Returns only playable films with proper metadata formatting.
|
||||
"""
|
||||
params = {
|
||||
"query": self.raw_title,
|
||||
"page": 1,
|
||||
"per_page": 24,
|
||||
"playable": "true",
|
||||
"all_films_on_zero_hits": "true"
|
||||
}
|
||||
|
||||
response = self.session.get(
|
||||
url=self.config["endpoints"]["search"],
|
||||
params=params
|
||||
)
|
||||
response.raise_for_status()
|
||||
results = response.json()
|
||||
|
||||
for film in results.get("films", []):
|
||||
display_title = f"{film['title']} ({film['year']})"
|
||||
yield SearchResult(
|
||||
id_=film["id"],
|
||||
title=display_title,
|
||||
label="MOVIE",
|
||||
url=film["web_url"].rstrip() # Clean trailing spaces
|
||||
)
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.user_id:
|
||||
raise RuntimeError("user_id not set — authenticate first.")
|
||||
|
||||
# Cookie-based license request (NO dtinfo - credentials removed)
|
||||
dt_custom_data = {
|
||||
"userId": self.user_id,
|
||||
"sessionId": self.lt_token,
|
||||
"merchant": "mubi"
|
||||
}
|
||||
dt_custom_data_b64 = base64.b64encode(json.dumps(dt_custom_data).encode()).decode()
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Linux; Android 13; SM-G975F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Mobile Safari/537.36",
|
||||
"Accept": "*/*",
|
||||
"Origin": "https://mubi.com",
|
||||
"Referer": "https://mubi.com/",
|
||||
"dt-custom-data": dt_custom_data_b64,
|
||||
}
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["license"],
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
r.raise_for_status()
|
||||
license_data = r.json()
|
||||
if license_data.get("status") != "OK":
|
||||
raise PermissionError(f"DRM license error: {license_data}")
|
||||
return base64.b64decode(license_data["license"])
|
||||
|
||||
def get_playready_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.user_id:
|
||||
raise RuntimeError("user_id not set — authenticate first.")
|
||||
|
||||
# Cookie-based PlayReady license request (NO dtinfo - credentials removed)
|
||||
dt_custom_data = {
|
||||
"userId": self.user_id,
|
||||
"sessionId": self.lt_token,
|
||||
"merchant": "mubi"
|
||||
}
|
||||
dt_custom_data_b64 = base64.b64encode(json.dumps(dt_custom_data).encode()).decode()
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0",
|
||||
"Accept": "*/*",
|
||||
"Origin": "https://mubi.com",
|
||||
"Referer": "https://mubi.com/",
|
||||
"dt-custom-data": dt_custom_data_b64,
|
||||
}
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["license_pr"],
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
r.raise_for_status()
|
||||
if r.status_code != 200:
|
||||
raise PermissionError(f"DRM license error")
|
||||
return r.content
|
||||
@ -1,14 +0,0 @@
|
||||
endpoints:
|
||||
account: "https://api.mubi.com/v4/account"
|
||||
current_user: "https://api.mubi.com/v4/current_user"
|
||||
film_by_slug: "https://api.mubi.com/v4/films/{slug}"
|
||||
playback_languages: "https://api.mubi.com/v4/films/{film_id}/playback_languages"
|
||||
initiate_viewing: "https://api.mubi.com/v4/films/{film_id}/viewing?parental_lock_enabled=true"
|
||||
reels: "https://api.mubi.com/v4/films/{film_id}/reels"
|
||||
secure_url: "https://api.mubi.com/v4/films/{film_id}/viewing/secure_url"
|
||||
license: "https://lic.drmtoday.com/license-proxy-widevine/cenc/"
|
||||
ip_geolocation: "https://directory.cookieyes.com/api/v1/ip"
|
||||
series: "https://api.mubi.com/v4/series/{series_slug}"
|
||||
season_episodes: "https://api.mubi.com/v4/series/{series_slug}/seasons/{season_slug}/episodes/available"
|
||||
license_pr: "https://lic.drmtoday.com/license-proxy-headerauth/drmtoday/RightsManager.asmx?persistent=false"
|
||||
search: "https://api.mubi.com/v4/search/films"
|
||||
185
NPO/__init__.py
185
NPO/__init__.py
@ -5,8 +5,7 @@ from typing import Optional
|
||||
from langcodes import Language
|
||||
|
||||
import click
|
||||
from collections.abc import Generator
|
||||
from unshackle.core.search_result import SearchResult
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
@ -18,21 +17,21 @@ from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
class NPO(Service):
|
||||
"""
|
||||
Service code for NPO Start (npo.nl)
|
||||
Version: 1.1.0
|
||||
Version: 1.0.0
|
||||
|
||||
Authorization: optional cookies (free/paid content supported)
|
||||
Security: FHD @ L3
|
||||
FHD @ SL3000
|
||||
(Widevine and PlayReady support)
|
||||
Security: FHD @ L3 (Widevine)
|
||||
|
||||
Supports:
|
||||
• Series ↦ https://npo.nl/start/serie/{slug}
|
||||
• Movies ↦ https://npo.nl/start/video/{slug}
|
||||
|
||||
Only supports widevine at the moment
|
||||
|
||||
Note: Movie inside a series can be downloaded as movie by converting URL to:
|
||||
https://npo.nl/start/video/slug
|
||||
|
||||
To change between Widevine and Playready, you need to change the DrmType in config.yaml to either widevine or playready
|
||||
Note: Movie that is inside in a series (e.g.
|
||||
https://npo.nl/start/serie/zappbios/.../zappbios-captain-nova/afspelen)
|
||||
can be downloaded as movies by converting the URL to:
|
||||
https://npo.nl/start/video/zappbios-captain-nova
|
||||
"""
|
||||
|
||||
TITLE_RE = (
|
||||
@ -56,8 +55,10 @@ class NPO(Service):
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
self.search_term = title
|
||||
return
|
||||
raise ValueError(
|
||||
f"Unsupported NPO URL: {title}\n"
|
||||
"Use /video/slug for movies or /serie/slug for series."
|
||||
)
|
||||
|
||||
self.slug = m.group("slug")
|
||||
self.kind = m.group("type") or "video"
|
||||
@ -67,9 +68,6 @@ class NPO(Service):
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config.")
|
||||
|
||||
# Store CDM reference
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
if not cookies:
|
||||
@ -93,79 +91,59 @@ class NPO(Service):
|
||||
else:
|
||||
self.log.warning("NPO auth check failed.")
|
||||
|
||||
def _fetch_next_data(self, slug: str) -> dict:
|
||||
"""Fetch and parse __NEXT_DATA__ from video/series page."""
|
||||
def _get_build_id(self, slug: str) -> str:
|
||||
"""Fetch buildId from the actual video/series page."""
|
||||
url = f"https://npo.nl/start/{'video' if self.kind == 'video' else 'serie'}/{slug}"
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
match = re.search(r'<script id="__NEXT_DATA__" type="application/json">({.*?})</script>', r.text, re.DOTALL)
|
||||
if not match:
|
||||
raise RuntimeError("Failed to extract __NEXT_DATA__")
|
||||
return json.loads(match.group(1))
|
||||
data = json.loads(match.group(1))
|
||||
return data["buildId"]
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
next_data = self._fetch_next_data(self.slug)
|
||||
page_props = next_data["props"]["pageProps"]
|
||||
queries = page_props["dehydratedState"]["queries"]
|
||||
build_id = self._get_build_id(self.slug)
|
||||
|
||||
def get_query_data(fragment: str):
|
||||
if self.kind == "serie":
|
||||
url = self.config["endpoints"]["metadata_series"].format(build_id=build_id, slug=self.slug)
|
||||
else:
|
||||
url = self.config["endpoints"]["metadata"].format(build_id=build_id, slug=self.slug)
|
||||
|
||||
resp = self.session.get(url)
|
||||
resp.raise_for_status()
|
||||
queries = resp.json()["pageProps"]["dehydratedState"]["queries"]
|
||||
|
||||
def get_data(fragment: str):
|
||||
return next((q["state"]["data"] for q in queries if fragment in str(q.get("queryKey", ""))), None)
|
||||
|
||||
if self.kind == "serie":
|
||||
series_data = get_query_data("series:detail-")
|
||||
series_data = get_data("series:detail-")
|
||||
if not series_data:
|
||||
raise ValueError("Series metadata not found")
|
||||
|
||||
# Get list of all available seasons
|
||||
seasons_list = get_query_data("series:seasons-") or []
|
||||
if not seasons_list:
|
||||
self.log.warning("No seasons found for this series.")
|
||||
|
||||
all_episodes = []
|
||||
series_type = series_data.get("type", "timeless_series")
|
||||
|
||||
for season in seasons_list:
|
||||
season_guid = season["guid"]
|
||||
season_number = int(season.get("seasonKey", 0))
|
||||
|
||||
# Try to find episode data in the initial page data first
|
||||
eps_data = get_query_data(f"programs:season-{season_guid}")
|
||||
|
||||
# If not in initial data, fetch from the API
|
||||
if not eps_data:
|
||||
r = self.session.get(
|
||||
self.config["endpoints"]["series_episodes"],
|
||||
params={
|
||||
"guid": season_guid,
|
||||
"type": series_type,
|
||||
"includePremiumContent": "true"
|
||||
}
|
||||
)
|
||||
if r.ok:
|
||||
eps_data = r.json()
|
||||
|
||||
if not eps_data:
|
||||
continue
|
||||
|
||||
for e in eps_data:
|
||||
all_episodes.append(
|
||||
episodes = []
|
||||
seasons = get_data("series:seasons-") or []
|
||||
for season in seasons:
|
||||
eps = get_data(f"programs:season-{season['guid']}") or []
|
||||
for e in eps:
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=e["guid"],
|
||||
service=self.__class__,
|
||||
title=series_data["title"],
|
||||
season=season_number,
|
||||
number=int(e.get("programKey") or 0),
|
||||
name=e.get("title"),
|
||||
season=int(season["seasonKey"]),
|
||||
number=int(e["programKey"]),
|
||||
name=e["title"],
|
||||
description=(e.get("synopsis", {}) or {}).get("long", ""),
|
||||
language=Language.get("nl"),
|
||||
data=e,
|
||||
)
|
||||
)
|
||||
|
||||
return Series(all_episodes)
|
||||
return Series(episodes)
|
||||
|
||||
# Movie Logic
|
||||
item = get_query_data("program:detail-") or queries[0]["state"]["data"]
|
||||
# Movie
|
||||
item = get_data("program:detail-") or queries[0]["state"]["data"]
|
||||
synopsis = item.get("synopsis", {})
|
||||
desc = synopsis.get("long") or synopsis.get("short", "") if isinstance(synopsis, dict) else str(synopsis)
|
||||
year = (int(item["firstBroadcastDate"]) // 31536000 + 1970) if item.get("firstBroadcastDate") else None
|
||||
@ -187,6 +165,7 @@ class NPO(Service):
|
||||
if not product_id:
|
||||
raise ValueError("no productId detected.")
|
||||
|
||||
# Get JWT
|
||||
token_url = self.config["endpoints"]["player_token"].format(product_id=product_id)
|
||||
r_tok = self.session.get(token_url, headers={"Referer": f"https://npo.nl/start/video/{self.slug}"})
|
||||
r_tok.raise_for_status()
|
||||
@ -197,7 +176,7 @@ class NPO(Service):
|
||||
self.config["endpoints"]["streams"],
|
||||
json={
|
||||
"profileName": "dash",
|
||||
"drmType": self.config["DrmType"],
|
||||
"drmType": "widevine",
|
||||
"referrerUrl": f"https://npo.nl/start/video/{self.slug}",
|
||||
"ster": {"identifier": "npo-app-desktop", "deviceType": 4, "player": "web"},
|
||||
},
|
||||
@ -226,17 +205,12 @@ class NPO(Service):
|
||||
|
||||
# Subtitles
|
||||
subtitles = []
|
||||
for sub in (data.get("assets", {}) or {}).get("subtitles", []) or []:
|
||||
if not isinstance(sub, dict):
|
||||
continue
|
||||
for sub in data.get("assets", {}).get("subtitles", []):
|
||||
lang = sub.get("iso", "und")
|
||||
location = sub.get("location")
|
||||
if not location:
|
||||
continue # skip if no URL provided
|
||||
subtitles.append(
|
||||
Subtitle(
|
||||
id_=sub.get("name", lang),
|
||||
url=location.strip(),
|
||||
url=sub["location"].strip(),
|
||||
language=Language.get(lang),
|
||||
is_original_lang=lang == "nl",
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
@ -259,14 +233,9 @@ class NPO(Service):
|
||||
|
||||
for tr in tracks.videos + tracks.audio:
|
||||
if getattr(tr, "drm", None):
|
||||
if drm_type == "playready":
|
||||
tr.drm.license = lambda challenge, **kw: self.get_playready_license(
|
||||
challenge=challenge, title=title, track=tr
|
||||
)
|
||||
else:
|
||||
tr.drm.license = lambda challenge, **kw: self.get_widevine_license(
|
||||
challenge=challenge, title=title, track=tr
|
||||
)
|
||||
tr.drm.license = lambda challenge, **kw: self.get_widevine_license(
|
||||
challenge=challenge, title=title, track=tr
|
||||
)
|
||||
|
||||
return tracks
|
||||
|
||||
@ -275,63 +244,11 @@ class NPO(Service):
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.drm_token:
|
||||
raise ValueError("DRM token not set, login or paid content may be required.")
|
||||
raise ValueError("DRM token not set – login or paid content may be required.")
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["license"],
|
||||
self.config["endpoints"]["widevine_license"],
|
||||
params={"custom_data": self.drm_token},
|
||||
data=challenge,
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def get_playready_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.drm_token:
|
||||
raise ValueError("DRM token not set, login or paid content may be required.")
|
||||
headers = {
|
||||
"Content-Type": "text/xml; charset=utf-8",
|
||||
"SOAPAction": "http://schemas.microsoft.com/DRM/2007/03/protocols/AcquireLicense",
|
||||
"Origin": "https://npo.nl",
|
||||
"Referer": "https://npo.nl/",
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/141.0.0.0 Safari/537.36 Edg/141.0.0.0"
|
||||
),
|
||||
}
|
||||
r = self.session.post(
|
||||
self.config["endpoints"]["license"],
|
||||
params={"custom_data": self.drm_token},
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
query = getattr(self, "search_term", None) or getattr(self, "title", None)
|
||||
search = self.session.get(
|
||||
url=self.config["endpoints"]["search"],
|
||||
params={
|
||||
"searchQuery": query, # always use the correct attribute
|
||||
"searchType": "series",
|
||||
"subscriptionType": "premium",
|
||||
"includePremiumContent": "true",
|
||||
},
|
||||
headers={
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Origin": "https://npo.nl",
|
||||
"Referer": f"https://npo.nl/start/zoeken?zoekTerm={query}",
|
||||
}
|
||||
).json()
|
||||
for result in search.get("items", []):
|
||||
yield SearchResult(
|
||||
id_=result.get("guid"),
|
||||
title=result.get("title"),
|
||||
label=result.get("type", "SERIES").upper() if result.get("type") else "SERIES",
|
||||
url=f"https://npo.nl/start/serie/{result.get('slug')}" if result.get("type") == "timeless_series" else
|
||||
f"https://npo.nl/start/video/{result.get('slug')}"
|
||||
)
|
||||
|
||||
|
||||
|
||||
return r.content
|
||||
@ -1,11 +1,8 @@
|
||||
endpoints:
|
||||
metadata: "https://npo.nl/start/_next/data/{build_id}/video/{slug}.json"
|
||||
metadata_series: "https://npo.nl/start/_next/data/{build_id}/serie/{slug}/afleveringen.json"
|
||||
metadata_series: "https://npo.nl/start/_next/data/{build_id}/serie/{slug}.json"
|
||||
metadata_episode: "https://npo.nl/start/_next/data/{build_id}/serie/{series_slug}/seizoen-{season_slug}/{episode_slug}.json"
|
||||
series_episodes: "https://npo.nl/start/api/domain/programs-by-season"
|
||||
streams: "https://prod.npoplayer.nl/stream-link"
|
||||
player_token: "https://npo.nl/start/api/domain/player-token?productId={product_id}"
|
||||
license: "https://npo-drm-gateway.samgcloud.nepworldwide.nl/authentication"
|
||||
widevine_license: "https://npo-drm-gateway.samgcloud.nepworldwide.nl/authentication"
|
||||
homepage: "https://npo.nl/start"
|
||||
search: "https://npo.nl/start/api/domain/search-collection-items"
|
||||
DrmType: "widevine"
|
||||
|
||||
@ -16,26 +16,24 @@ from unshackle.core.tracks import Tracks
|
||||
class PTHS(Service):
|
||||
"""
|
||||
Service code for Pathé Thuis (pathe-thuis.nl)
|
||||
Version: 1.1.0 (PlayReady Support Added)
|
||||
Version: 1.0.0
|
||||
|
||||
Security: SD/FHD @ L1/L3 (Widevine)
|
||||
SD/FHD @ SL2K/SL3K (Playready)
|
||||
Authorization: Cookies with authenticationToken + XSRF-TOKEN
|
||||
Security: SD @ L3 (Widevine)
|
||||
FHD @ L1
|
||||
Authorization: Cookies or authentication token
|
||||
|
||||
Supported:
|
||||
• Movies → https://www.pathe-thuis.nl/film/{id}
|
||||
|
||||
Note:
|
||||
Pathé Thuis does not have episodic content, only movies.
|
||||
Subtitles are hardcoded here so yeah I can't do anything about it
|
||||
The quality is depend on what you rented for, is it SD or HD?
|
||||
"""
|
||||
|
||||
TITLE_RE = (
|
||||
r"^(?:https?://(?:www\.)?pathe-thuis\.nl/film/)?(?P<id>\d+)(?:/[^/]+)?$"
|
||||
)
|
||||
GEOFENCE = ("NL",)
|
||||
NO_SUBTITLES = True
|
||||
NO_SUBTITLES = True
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="PTHS", short_help="https://www.pathe-thuis.nl")
|
||||
@ -46,15 +44,17 @@ class PTHS(Service):
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
raise ValueError(
|
||||
f"Unsupported Pathé Thuis URL or ID: {title}\n"
|
||||
"Use e.g. https://www.pathe-thuis.nl/film/30591"
|
||||
)
|
||||
|
||||
self.movie_id = m.group("id")
|
||||
self.drm_token = None
|
||||
self.license_url = None
|
||||
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config for Pathé Thuis.")
|
||||
|
||||
@ -65,27 +65,18 @@ class PTHS(Service):
|
||||
self.log.warning("No cookies provided, proceeding unauthenticated.")
|
||||
return
|
||||
|
||||
# Extract critical cookies
|
||||
auth_token = next((c.value for c in cookies if c.name == "authenticationToken"), None)
|
||||
xsrf_token = next((c.value for c in cookies if c.name == "XSRF-TOKEN"), None)
|
||||
|
||||
if not auth_token:
|
||||
token = next((c.value for c in cookies if c.name == "authenticationToken"), None)
|
||||
if not token:
|
||||
self.log.info("No authenticationToken cookie found, unauthenticated mode.")
|
||||
return
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0",
|
||||
"X-Pathe-Device-Identifier": "web-1",
|
||||
"X-Pathe-Auth-Session-Token": auth_token,
|
||||
}
|
||||
|
||||
if xsrf_token:
|
||||
headers["X-XSRF-TOKEN"] = xsrf_token
|
||||
self.log.debug(f"XSRF-TOKEN header set: {xsrf_token[:10]}...")
|
||||
|
||||
self.session.headers.update(headers)
|
||||
auth_status = "with XSRF" if xsrf_token else "without XSRF"
|
||||
self.log.info(f"Authentication token attached ({auth_status}).")
|
||||
self.session.headers.update({
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"X-Pathe-Device-Identifier": "web-widevine-1",
|
||||
"X-Pathe-Auth-Session-Token": token,
|
||||
})
|
||||
self.log.info("Authentication token successfully attached to session.")
|
||||
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
url = self.config["endpoints"]["metadata"].format(movie_id=self.movie_id)
|
||||
@ -99,16 +90,16 @@ class PTHS(Service):
|
||||
name=data["name"],
|
||||
description=data.get("intro", ""),
|
||||
year=data.get("year"),
|
||||
language=Language.get(data.get("language", "nl")), # Default to Dutch
|
||||
language=Language.get(data.get("language", "en")),
|
||||
data=data,
|
||||
)
|
||||
return Movies([movie])
|
||||
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
ticket_id = self._get_ticket_id(title)
|
||||
base_url = self.config["endpoints"]["ticket"].format(ticket_id=ticket_id)
|
||||
url = f"{base_url}?drmType=dash-widevine"
|
||||
|
||||
url = self.config["endpoints"]["ticket"].format(ticket_id=ticket_id)
|
||||
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
@ -116,17 +107,16 @@ class PTHS(Service):
|
||||
|
||||
manifest_url = stream.get("url") or stream.get("drmurl")
|
||||
if not manifest_url:
|
||||
raise ValueError("No stream manifest URL found in ticket response.")
|
||||
raise ValueError("No stream manifest URL found.")
|
||||
|
||||
# Store DRM context for license acquisition
|
||||
self.drm_token = stream["token"]
|
||||
self.license_url = stream["rawData"]["licenseserver"]
|
||||
drm_type = stream["rawData"].get("type", "unknown")
|
||||
self.log.info(f"Acquired {drm_type.upper()} stream manifest. License URL set.")
|
||||
|
||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
return tracks
|
||||
|
||||
|
||||
def _get_ticket_id(self, title: Title_T) -> str:
|
||||
"""Fetch the user's owned ticket ID if present."""
|
||||
data = title.data
|
||||
@ -135,45 +125,12 @@ class PTHS(Service):
|
||||
return str(t["id"])
|
||||
raise ValueError("No valid ticket found for this movie. Ensure purchase or login.")
|
||||
|
||||
|
||||
def get_chapters(self, title: Title_T):
|
||||
return []
|
||||
|
||||
def get_playready_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
"""
|
||||
Acquire PlayReady license using the authentication token.
|
||||
Matches the license request pattern observed in browser traffic.
|
||||
"""
|
||||
if not self.license_url or not self.drm_token:
|
||||
raise ValueError("Missing license URL or DRM token. Call get_tracks() first.")
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Authorization": f"Bearer {self.drm_token}",
|
||||
|
||||
}
|
||||
params = {"custom_data": self.drm_token}
|
||||
|
||||
self.log.debug(f"Requesting PlayReady license from {self.license_url}")
|
||||
r = self.session.post(
|
||||
self.license_url,
|
||||
params=params,
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
timeout=10
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
if not r.content or len(r.content) < 10:
|
||||
raise ValueError(
|
||||
"Invalid PlayReady license response. "
|
||||
"Check: 1) Valid session 2) XSRF token 3) Active rental/purchase"
|
||||
)
|
||||
|
||||
self.log.info(f"Successfully acquired PlayReady license ({len(r.content)} bytes)")
|
||||
return r.content
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
"""Widevine license acquisition . """
|
||||
if not self.license_url or not self.drm_token:
|
||||
raise ValueError("Missing license URL or token.")
|
||||
|
||||
@ -181,6 +138,7 @@ class PTHS(Service):
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Authorization": f"Bearer {self.drm_token}",
|
||||
}
|
||||
|
||||
params = {"custom_data": self.drm_token}
|
||||
|
||||
r = self.session.post(self.license_url, params=params, data=challenge, headers=headers)
|
||||
@ -188,4 +146,4 @@ class PTHS(Service):
|
||||
|
||||
if not r.content:
|
||||
raise ValueError("Empty license response, likely invalid or expired token.")
|
||||
return r.content
|
||||
return r.content
|
||||
53
README.md
53
README.md
@ -1,51 +1,4 @@
|
||||
These services is new and in development. Please feel free to submit pull requests for any mistakes or suggestions.
|
||||
Acknowledgment
|
||||
|
||||
# These services is new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
||||
|
||||
### If you have personal questions or want to request a service, DM me at discord (jerukpurut)
|
||||
|
||||
|
||||
- Roadmap:
|
||||
|
||||
1. NPO:
|
||||
- To add search functionality
|
||||
- More accurate metadata (the year of showing is not according the year of release)
|
||||
- Have a automatic CDM recognition option instead of the user puts it manually in the config for drmType
|
||||
2. KOWP:
|
||||
- Audio mislabel as English
|
||||
- To add Playready Support
|
||||
3. PTHS:
|
||||
- Search Functionality
|
||||
- Account login if possible
|
||||
4. HIDI:
|
||||
- Subtitle is a bit misplace if second sentences came up making the last sentence on the first order and vice versa (needs to be fixed)
|
||||
5. MUBI:
|
||||
- Creds login
|
||||
6. VIKI:
|
||||
- CSRF Token is now scraped, would be from a api requests soon
|
||||
7. VIDO:
|
||||
- Subtitle has little quirk of having javanese and sundanese language labeled on the HLS one but not the DASH one
|
||||
- Search functionality not available yet
|
||||
8. KNPY:
|
||||
- HLS downloading is not working
|
||||
9. VRT:
|
||||
- Search functionality
|
||||
- Fixing few hickups
|
||||
10. SKST (the hardest service I ever dealt upon now):
|
||||
- Subtitle has been fixed, hopefully no issue
|
||||
11. VLD:
|
||||
- All seems fine working for now
|
||||
12. HPLA:
|
||||
- No support for Television yet
|
||||
- Music needs to be fixed since the output is a mp4 instead of m4a
|
||||
13. SHUD:
|
||||
- PlayReady needed
|
||||
14. GLA:
|
||||
- Subs sometimes broken (it's on there side)
|
||||
15. CPY:
|
||||
- Currently it supports only 720p because there is no TV parameter, needed that
|
||||
|
||||
- Acknowledgment
|
||||
|
||||
Thanks to Adef for the NPO start downloader.
|
||||
Thanks to UPS0 for fixing MUBI script
|
||||
|
||||
Thanks to Adef for the NPO start downloader.
|
||||
|
||||
718
SHUD/__init__.py
718
SHUD/__init__.py
@ -1,718 +0,0 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
from collections.abc import Generator
|
||||
from datetime import datetime, timedelta
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Union
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH, HLS
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks, Video
|
||||
|
||||
|
||||
class SHUD(Service):
|
||||
"""
|
||||
Service code for watch.shudder.com
|
||||
Version: 1.0.0
|
||||
|
||||
Authorization: Bearer JWT Token
|
||||
|
||||
Security: FHD@L3
|
||||
|
||||
Use full URL (for example - https://watch.shudder.com/watch/927436) or title ID (for example - 927436).
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://watch\.shudder\.com/[^/]+/)?(?P<title_id>\d+)"
|
||||
GEOFENCE = ("US", "CA", "GB", "AU", "IE", "NZ")
|
||||
NO_SUBTITLES = False
|
||||
|
||||
VIDEO_RANGE_MAP = {
|
||||
"SDR": "sdr",
|
||||
"HDR10": "hdr10",
|
||||
"DV": "dolby_vision",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="SHUD", short_help="https://watch.shudder.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("-m", "--movie", is_flag=True, default=False, help="Specify if it's a movie")
|
||||
@click.option("-d", "--device", type=str, default="web", help="Select device from the config file")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return SHUD(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title, movie, device):
|
||||
super().__init__(ctx)
|
||||
|
||||
self.title = title
|
||||
self.movie = movie
|
||||
self.device = device
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
# Track request overrides based on device/CDM capabilities
|
||||
if any(r != Video.Range.SDR for r in self.track_request.ranges):
|
||||
self.track_request.codecs = [Video.Codec.HEVC]
|
||||
|
||||
if self.cdm and self.cdm.security_level == 3:
|
||||
self.track_request.codecs = [Video.Codec.AVC]
|
||||
self.track_request.ranges = [Video.Range.SDR]
|
||||
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
|
||||
profile_name = ctx.parent.params.get("profile")
|
||||
self.profile = profile_name or "default"
|
||||
self.license_data = {}
|
||||
self.realm = "dce.shudder"
|
||||
self.api_key = self.config["api_key"]
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
# Set required headers for all requests
|
||||
self.session.headers.update({
|
||||
"User-Agent": self.config["client"][self.device]["user_agent"],
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Accept-Language": "en-US",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"Referer": "https://watch.shudder.com/",
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": self.api_key,
|
||||
"app": "dice",
|
||||
"x-app-var": self.config["client"][self.device]["app_version"],
|
||||
"Origin": "https://watch.shudder.com",
|
||||
"Connection": "keep-alive",
|
||||
})
|
||||
|
||||
# Handle credential-based auth (email/password)
|
||||
if credential:
|
||||
self.log.info("Authenticating with credentials")
|
||||
|
||||
# First get init tokens
|
||||
init_params = {
|
||||
"lk": "language",
|
||||
"pk": "subTitleLanguage,subtitlePreferenceMode,subtitlePreferenceMap,audioLanguage,autoAdvance,pluginAccessTokens,videoBackgroundAutoPlay",
|
||||
"readLicences": "true",
|
||||
"countEvents": "LIVE",
|
||||
"menuTargetPlatform": "WEB",
|
||||
"readIconStore": "ENABLED",
|
||||
"readUserProfiles": "true"
|
||||
}
|
||||
|
||||
init_resp = self.session.get(
|
||||
url=self.config["endpoints"]["init"],
|
||||
params=init_params
|
||||
)
|
||||
init_resp.raise_for_status()
|
||||
init_data = init_resp.json()
|
||||
|
||||
# Login with credentials
|
||||
login_resp = self.session.post(
|
||||
url=self.config["endpoints"]["login"],
|
||||
headers={
|
||||
"Authorization": f"Bearer {init_data.get('authentication', {}).get('authorisationToken', '')}",
|
||||
"Realm": self.realm,
|
||||
},
|
||||
json={
|
||||
"id": credential.username,
|
||||
"secret": credential.password,
|
||||
}
|
||||
)
|
||||
login_resp.raise_for_status()
|
||||
login_data = login_resp.json()
|
||||
|
||||
self.auth_token = login_data.get("authorisationToken")
|
||||
self.refresh_token = login_data.get("refreshToken")
|
||||
|
||||
if not self.auth_token:
|
||||
raise ValueError("Authentication failed - no token received")
|
||||
|
||||
self.session.headers.update({
|
||||
"Authorization": f"Bearer {self.auth_token}",
|
||||
"Realm": self.realm,
|
||||
})
|
||||
|
||||
self.log.info("Authentication successful")
|
||||
return
|
||||
|
||||
if cookies:
|
||||
self.log.info("Authenticating with cookies")
|
||||
for cookie in cookies:
|
||||
if cookie.name == "auth_token":
|
||||
self.auth_token = cookie.value
|
||||
self.session.headers.update({
|
||||
"Authorization": f"Bearer {self.auth_token}",
|
||||
"Realm": self.realm,
|
||||
})
|
||||
return
|
||||
raise ValueError("No valid auth_token cookie found")
|
||||
|
||||
raise EnvironmentError("Service requires Credentials or Cookies for Authentication.")
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
"""Search for titles on Shudder"""
|
||||
search_resp = self.session.get(
|
||||
url=self.config["endpoints"]["search"],
|
||||
params={
|
||||
"query": self.title,
|
||||
"timezone": self.config.get("timezone", "UTC"),
|
||||
}
|
||||
)
|
||||
search_resp.raise_for_status()
|
||||
search_data = search_resp.json()
|
||||
|
||||
cards = []
|
||||
for element in search_data.get("elements", []):
|
||||
if element.get("$type") != "cardList":
|
||||
continue
|
||||
cards.extend(element.get("attributes", {}).get("cards", []))
|
||||
|
||||
for card in cards:
|
||||
attrs = card.get("attributes", {})
|
||||
action = attrs.get("action", {})
|
||||
route = action.get("data", {}) if action.get("type") == "route" else {}
|
||||
|
||||
if not route:
|
||||
continue
|
||||
|
||||
content_type = str(route.get("type", "")).upper()
|
||||
if content_type not in ("VOD", "SERIES"):
|
||||
continue
|
||||
|
||||
raw_id = str(route.get("id", ""))
|
||||
if not raw_id:
|
||||
continue
|
||||
|
||||
# "VOD#877410" -> "877410"
|
||||
# "SERIES#3311" -> "3311"
|
||||
title_id = raw_id.split("#", 1)[-1].strip()
|
||||
if not title_id:
|
||||
continue
|
||||
|
||||
is_series = content_type == "SERIES"
|
||||
|
||||
yield SearchResult(
|
||||
id_=title_id,
|
||||
title=route.get("title", ""),
|
||||
label="SERIES" if is_series else "MOVIE",
|
||||
url=f"https://watch.shudder.com/{'series' if is_series else 'watch'}/{title_id}",
|
||||
)
|
||||
|
||||
def _parse_title_input(self) -> tuple[str, Optional[str], Optional[str]]:
|
||||
"""
|
||||
Returns:
|
||||
(title_id, kind, season_id)
|
||||
|
||||
kind:
|
||||
- "watch" for movie/episode URLs like /watch/927436
|
||||
- "series" for series URLs like /series/3713?seasonId=33510
|
||||
- None for raw numeric ids
|
||||
"""
|
||||
raw = str(self.title).strip()
|
||||
|
||||
if raw.isdigit():
|
||||
return raw, None, None
|
||||
|
||||
parsed = urlparse(raw)
|
||||
if parsed.scheme and parsed.netloc:
|
||||
parts = [p for p in parsed.path.split("/") if p]
|
||||
kind = parts[0].lower() if parts else None
|
||||
title_id = parts[1] if len(parts) > 1 else None
|
||||
season_id = parse_qs(parsed.query).get("seasonId", [None])[0]
|
||||
|
||||
if title_id and title_id.isdigit():
|
||||
return title_id, kind, season_id
|
||||
|
||||
match = re.match(self.TITLE_RE, raw)
|
||||
if not match:
|
||||
raise ValueError(f"Invalid Shudder title: {raw}")
|
||||
|
||||
return match.group("title_id"), None, None
|
||||
|
||||
|
||||
def _build_manifest_payload(self, video_id: Union[str, int]) -> dict:
|
||||
return {
|
||||
"mediaCapabilities": [
|
||||
{
|
||||
"protocols": ["HLS", "DASH"],
|
||||
"audioCodecs": ["aac"],
|
||||
"videoCodecs": ["h264", "hevc"],
|
||||
},
|
||||
{
|
||||
"keySystem": "WIDEVINE",
|
||||
"robustness": "software",
|
||||
"protocols": ["HLS", "DASH"],
|
||||
"audioCodecs": ["aac"],
|
||||
"encryptionMode": ["CBC", "CTR"],
|
||||
"videoCodecs": ["h264"],
|
||||
},
|
||||
],
|
||||
"macros": {
|
||||
"CM-APP-NAME": "Website",
|
||||
"CM-APP-VERSION": self.config["client"][self.device]["app_version"],
|
||||
"CM-DVC-DNT": "0",
|
||||
"CM-DVC-H": "1200",
|
||||
"CM-DVC-W": "1920",
|
||||
"CM-DVC-LANG": "en-US",
|
||||
"CM-DVC-OS": "14",
|
||||
"CM-DVC-TYPE": "2",
|
||||
"CM-WEB-MBL": "0",
|
||||
"CM-WEB-PAGE": f"/video/{video_id}",
|
||||
"CM-CST-TCF": "",
|
||||
"CM-CST-USP": "",
|
||||
"CM-DVC-ATS": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_video_metadata(self, video_id: str) -> Optional[dict]:
|
||||
resp = self.session.post(
|
||||
url=self.config["endpoints"]["video"].format(video_id=video_id),
|
||||
params={"includePlaybackDetails": "URL", "displayGeoblocked": "HIDE"},
|
||||
json=self._build_manifest_payload(video_id),
|
||||
)
|
||||
|
||||
if resp.status_code in (404, 405):
|
||||
return None
|
||||
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def _get_series_view(self, series_id: str, season_id: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Supports both config styles:
|
||||
1. full templated URL:
|
||||
https://.../api/v1/view?type=series&id={series_id}&timezone=UTC
|
||||
2. base URL:
|
||||
https://.../api/v1/view
|
||||
"""
|
||||
endpoint = self.config["endpoints"]["series_view"]
|
||||
params = {}
|
||||
|
||||
if "{series_id}" in endpoint:
|
||||
url = endpoint.format(series_id=series_id)
|
||||
else:
|
||||
url = endpoint
|
||||
params.update({
|
||||
"type": "series",
|
||||
"id": series_id,
|
||||
"timezone": self.config.get("timezone", "UTC"),
|
||||
})
|
||||
|
||||
if season_id:
|
||||
params["seasonId"] = season_id
|
||||
|
||||
resp = self.session.get(url=url, params=params or None)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def _get_series_element(self, data: dict) -> dict:
|
||||
for element in data.get("elements", []):
|
||||
if element.get("$type") == "series":
|
||||
return element.get("attributes", {})
|
||||
return {}
|
||||
|
||||
|
||||
def _get_season_bucket(self, data: dict) -> dict:
|
||||
for element in data.get("elements", []):
|
||||
if element.get("$type") != "bucket":
|
||||
continue
|
||||
attrs = element.get("attributes", {})
|
||||
if attrs.get("tab") == "season" or attrs.get("type") == "season":
|
||||
return attrs
|
||||
return {}
|
||||
|
||||
|
||||
def _extract_series_description(self, data: dict) -> str:
|
||||
for element in data.get("elements", []):
|
||||
if element.get("$type") != "hero":
|
||||
continue
|
||||
for item in element.get("attributes", {}).get("content", []):
|
||||
if item.get("$type") == "textblock":
|
||||
text = item.get("attributes", {}).get("text")
|
||||
if text:
|
||||
return text
|
||||
return ""
|
||||
|
||||
|
||||
def _extract_series_year(self, data: dict) -> Optional[int]:
|
||||
for element in data.get("elements", []):
|
||||
if element.get("$type") != "hero":
|
||||
continue
|
||||
for item in element.get("attributes", {}).get("content", []):
|
||||
if item.get("$type") != "tagList":
|
||||
continue
|
||||
for tag in item.get("attributes", {}).get("tags", []):
|
||||
text = str(tag.get("attributes", {}).get("text", "")).strip()
|
||||
if re.fullmatch(r"\d{4}", text):
|
||||
return int(text)
|
||||
return None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _parse_episode_label(label: str, fallback_number: int) -> tuple[int, str]:
|
||||
label = (label or "").strip()
|
||||
if not label:
|
||||
return fallback_number, f"Episode {fallback_number}"
|
||||
|
||||
m = re.match(r"^E(?P<number>\d+)\s*[-:]\s*(?P<name>.+)$", label, re.I)
|
||||
if m:
|
||||
return int(m.group("number")), m.group("name").strip()
|
||||
|
||||
m = re.match(r"^Episode\s+(?P<number>\d+)\s*[-:]\s*(?P<name>.+)$", label, re.I)
|
||||
if m:
|
||||
return int(m.group("number")), m.group("name").strip()
|
||||
|
||||
return fallback_number, label
|
||||
|
||||
|
||||
def _get_series_titles(self, series_id: str, preferred_season_id: Optional[str] = None) -> Series:
|
||||
"""
|
||||
Important:
|
||||
The /view response usually contains episode items only for the selected season.
|
||||
So we fetch the initial page, then request each season explicitly with seasonId=...
|
||||
"""
|
||||
page = self._get_series_view(series_id, preferred_season_id)
|
||||
|
||||
series_element = self._get_series_element(page)
|
||||
season_bucket = self._get_season_bucket(page)
|
||||
metadata = page.get("metadata", {})
|
||||
|
||||
series_title = (
|
||||
metadata.get("pageTitle")
|
||||
or series_element.get("series", {}).get("title")
|
||||
or ""
|
||||
)
|
||||
series_description = self._extract_series_description(page)
|
||||
series_year = self._extract_series_year(page)
|
||||
|
||||
seasons = series_element.get("seasons", {}).get("items", [])
|
||||
if not seasons:
|
||||
raise ValueError(f"No seasons found for series {series_id}")
|
||||
|
||||
initial_season_id = str(
|
||||
season_bucket.get("seasonId")
|
||||
or season_bucket.get("id")
|
||||
or series_element.get("seasonId")
|
||||
or metadata.get("currentSeason", {}).get("seasonId")
|
||||
or ""
|
||||
)
|
||||
|
||||
cached_items = {}
|
||||
if initial_season_id:
|
||||
cached_items[initial_season_id] = season_bucket.get("items", [])
|
||||
|
||||
built_episodes = []
|
||||
seen_episode_ids = set()
|
||||
|
||||
for season_index, season in enumerate(seasons, start=1):
|
||||
season_id = str(season.get("id"))
|
||||
|
||||
season_number = season.get("seasonNumber")
|
||||
if season_number is None:
|
||||
m = re.search(r"(\d+)", str(season.get("title", "")))
|
||||
season_number = int(m.group(1)) if m else season_index
|
||||
else:
|
||||
season_number = int(season_number)
|
||||
|
||||
items = cached_items.get(season_id)
|
||||
if items is None:
|
||||
season_page = self._get_series_view(series_id, season_id)
|
||||
season_bucket = self._get_season_bucket(season_page)
|
||||
items = season_bucket.get("items", [])
|
||||
|
||||
if not items:
|
||||
self.log.warning(f"No episode items returned for series {series_id}, season {season_number}")
|
||||
continue
|
||||
|
||||
for fallback_ep_num, item in enumerate(items, start=1):
|
||||
episode_id = str(item["id"])
|
||||
if episode_id in seen_episode_ids:
|
||||
continue
|
||||
seen_episode_ids.add(episode_id)
|
||||
|
||||
episode_number, episode_name = self._parse_episode_label(
|
||||
item.get("title", ""),
|
||||
fallback_ep_num,
|
||||
)
|
||||
|
||||
built_episodes.append((
|
||||
season_number,
|
||||
episode_number,
|
||||
Episode(
|
||||
id_=episode_id,
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_number,
|
||||
number=episode_number,
|
||||
name=episode_name,
|
||||
year=series_year,
|
||||
language=Language.get("en"),
|
||||
data={
|
||||
**item,
|
||||
"series_id": int(series_id),
|
||||
"series_title": series_title,
|
||||
"series_description": series_description,
|
||||
"season_id": season.get("id"),
|
||||
"season_title": season.get("title"),
|
||||
"season_number": season_number,
|
||||
"episode_number": episode_number,
|
||||
},
|
||||
),
|
||||
))
|
||||
|
||||
if not built_episodes:
|
||||
raise ValueError(f"No episodes found for series {series_id}")
|
||||
|
||||
return Series([
|
||||
episode
|
||||
for _, _, episode in sorted(built_episodes, key=lambda x: (x[0], x[1]))
|
||||
])
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
"""Get movie or series metadata"""
|
||||
title_id, kind, season_id = self._parse_title_input()
|
||||
self.title = title_id
|
||||
|
||||
# Explicit /series/... URL -> go straight to series handling
|
||||
if not self.movie and kind == "series":
|
||||
return self._get_series_titles(title_id, season_id)
|
||||
|
||||
# Try movie/video manifest first
|
||||
metadata = self._get_video_metadata(title_id)
|
||||
|
||||
# If manifest lookup fails, try series view
|
||||
if metadata is None:
|
||||
if not self.movie:
|
||||
self.log.info(f"Manifest lookup failed for {title_id}, trying series view")
|
||||
return self._get_series_titles(title_id, season_id)
|
||||
raise ValueError(f"Title {title_id} not found")
|
||||
|
||||
if metadata.get("contentDownload", {}).get("permission") == "DISALLOWED":
|
||||
self.log.warning(f"Download not permitted for title {title_id}")
|
||||
|
||||
content_type = str(metadata.get("type", "")).upper()
|
||||
|
||||
# Movie path
|
||||
if self.movie or content_type in ("VOD", "MOVIE"):
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=metadata["id"],
|
||||
service=self.__class__,
|
||||
name=metadata.get("title", ""),
|
||||
description=metadata.get("description", metadata.get("longDescription", "")),
|
||||
year=int(metadata.get("productionYear", 0)) if metadata.get("productionYear") else None,
|
||||
language=Language.get("en"),
|
||||
data=metadata,
|
||||
)
|
||||
])
|
||||
|
||||
# Direct episode ids are not ideal without the parent series context
|
||||
if "SEASON" in content_type or "EPISODE" in content_type:
|
||||
raise ValueError(
|
||||
"Direct episode IDs are not supported yet. "
|
||||
"Use the series URL or series id instead."
|
||||
)
|
||||
|
||||
# Fallback to series handling
|
||||
return self._get_series_titles(title_id, season_id)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
"""Fetch and parse manifest tracks"""
|
||||
def _fetch_variant(
|
||||
title: Title_T,
|
||||
codec: Optional[Video.Codec],
|
||||
range_: Video.Range,
|
||||
) -> Tracks:
|
||||
vcodec_str = "hevc" if codec == Video.Codec.HEVC else "h264"
|
||||
range_str = range_.name
|
||||
video_format = self.VIDEO_RANGE_MAP.get(range_str, "sdr")
|
||||
|
||||
self.log.info(f" + Fetching {vcodec_str.upper()} {range_str} manifest")
|
||||
|
||||
# Build media capabilities payload
|
||||
media_capabilities = [
|
||||
{
|
||||
"protocols": ["HLS", "DASH"],
|
||||
"audioCodecs": ["aac"],
|
||||
"videoCodecs": [vcodec_str],
|
||||
}
|
||||
]
|
||||
|
||||
# Add DRM capabilities for encrypted streams
|
||||
if codec:
|
||||
media_capabilities.append({
|
||||
"keySystem": "WIDEVINE",
|
||||
"robustness": "software",
|
||||
"protocols": ["HLS", "DASH"],
|
||||
"audioCodecs": ["aac"],
|
||||
"encryptionMode": ["CBC", "CTR"],
|
||||
"videoCodecs": [vcodec_str],
|
||||
})
|
||||
|
||||
# Build macros for request
|
||||
macros = {
|
||||
"CM-APP-NAME": "Website",
|
||||
"CM-APP-VERSION": self.config["client"][self.device]["app_version"],
|
||||
"CM-DVC-DNT": "0",
|
||||
"CM-DVC-H": "1080",
|
||||
"CM-DVC-W": "1920",
|
||||
"CM-DVC-LANG": "en-US",
|
||||
"CM-DVC-OS": "14",
|
||||
"CM-DVC-TYPE": "2",
|
||||
"CM-WEB-MBL": "0",
|
||||
f"CM-WEB-PAGE": f"/video/{title.id}",
|
||||
}
|
||||
|
||||
# Inside _fetch_variant() in get_tracks():
|
||||
manifest_resp = self.session.post(
|
||||
url=self.config["endpoints"]["manifest"].format(video_id=title.id),
|
||||
params={"includePlaybackDetails": "URL", "displayGeoblocked": "HIDE"},
|
||||
json={
|
||||
"mediaCapabilities": media_capabilities, # Same as above
|
||||
"macros": macros, # Same as above, update CM-WEB-PAGE with title.id
|
||||
}
|
||||
)
|
||||
manifest_resp.raise_for_status()
|
||||
manifest_data = manifest_resp.json()
|
||||
|
||||
# Extract stream URL and DRM info
|
||||
streams = manifest_data.get("streams", [])
|
||||
if not streams:
|
||||
raise ValueError("No streams available for this title")
|
||||
|
||||
stream = streams[0] # Take first available stream
|
||||
stream_url = stream.get("url")
|
||||
|
||||
if not stream_url:
|
||||
raise ValueError("No stream URL found in manifest")
|
||||
|
||||
# Store DRM/license data for later use
|
||||
drm = stream.get("drm", {})
|
||||
if drm:
|
||||
self.license_data = {
|
||||
"url": drm.get("url", self.config["endpoints"]["widevine_license"]),
|
||||
"jwtToken": drm.get("jwtToken", ""),
|
||||
"encryptionMode": drm.get("encryptionMode", "CBC"),
|
||||
"keySystems": drm.get("keySystems", []),
|
||||
}
|
||||
|
||||
# Parse manifest based on protocol
|
||||
if "m3u8" in stream_url.lower():
|
||||
tracks = HLS.from_url(url=stream_url, session=self.session).to_tracks(language=title.language)
|
||||
else:
|
||||
tracks = DASH.from_url(url=stream_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Apply video range to tracks
|
||||
range_enum = {
|
||||
"hdr10": Video.Range.HDR10,
|
||||
"dolby_vision": Video.Range.DV,
|
||||
}.get(video_format, Video.Range.SDR)
|
||||
|
||||
for video in tracks.videos:
|
||||
video.range = range_enum
|
||||
|
||||
# Filter audio tracks (remove clear/unencrypted if DRM present)
|
||||
if drm:
|
||||
tracks.audio = [
|
||||
track for track in tracks.audio
|
||||
if "clear" not in str(track.data).lower()
|
||||
]
|
||||
|
||||
# Fix channel counts
|
||||
for track in tracks.audio:
|
||||
if track.channels == 6.0:
|
||||
track.channels = 5.1
|
||||
# Check for descriptive audio
|
||||
label = track.data.get("label", "").lower() if isinstance(track.data, dict) else ""
|
||||
if "audio description" in label or "descriptive" in label:
|
||||
track.descriptive = True
|
||||
|
||||
|
||||
return tracks
|
||||
|
||||
return self._get_tracks_for_variants(title, _fetch_variant)
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
"""Extract chapter markers if available"""
|
||||
chapters = []
|
||||
|
||||
# Check for skip markers in title data
|
||||
skip_markers = title.data.get("skipMarkers", [])
|
||||
for marker in skip_markers:
|
||||
marker_type = marker.get("type", "").lower()
|
||||
start = marker.get("start", marker.get("offset"))
|
||||
end = marker.get("end")
|
||||
|
||||
if marker_type == "intro" and start is not None:
|
||||
chapters.append(Chapter(timestamp=int(start), name="Opening"))
|
||||
if end:
|
||||
chapters.append(Chapter(timestamp=int(end)))
|
||||
elif marker_type == "credits" and start is not None:
|
||||
chapters.append(Chapter(timestamp=int(start), name="Credits"))
|
||||
|
||||
return chapters
|
||||
|
||||
def get_widevine_service_certificate(self, **_: any) -> str:
|
||||
"""Return Widevine service certificate if configured"""
|
||||
return self.config.get("certificate", "")
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
||||
"""Request Widevine license from Shudder's DRM server"""
|
||||
license_url = self.license_data.get("url") or self.config["endpoints"]["widevine_license"]
|
||||
|
||||
if not license_url:
|
||||
raise ValueError("Widevine license endpoint not configured")
|
||||
|
||||
# Build license request headers
|
||||
headers = {
|
||||
"User-Agent": self.config["client"][self.device]["user_agent"],
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"Origin": "https://watch.shudder.com",
|
||||
"Referer": "https://watch.shudder.com/",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
}
|
||||
|
||||
# Add DRM info header for Widevine
|
||||
drm_info = {"system": "com.widevine.alpha"}
|
||||
headers["X-DRM-INFO"] = base64.b64encode(json.dumps(drm_info).encode()).decode()
|
||||
|
||||
# Add authorization if we have JWT token
|
||||
jwt_token = self.license_data.get("jwtToken")
|
||||
if jwt_token:
|
||||
headers["Authorization"] = f"Bearer {jwt_token}"
|
||||
elif hasattr(self, "auth_token"):
|
||||
headers["Authorization"] = f"Bearer {self.auth_token}"
|
||||
|
||||
# Send license request
|
||||
response = self.session.post(
|
||||
url=license_url,
|
||||
data=challenge,
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Handle JSON or binary license response
|
||||
try:
|
||||
license_data = response.json()
|
||||
# Shudder may return license in different fields
|
||||
return license_data.get("license") or license_data.get("data") or response.content
|
||||
except ValueError:
|
||||
return response.content
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
# Shudder (SHUD) Configuration
|
||||
|
||||
api_key: "857a1e5d-e35e-4fdf-805b-a87b6f8364bf"
|
||||
|
||||
endpoints:
|
||||
# Initialization
|
||||
init: "https://dce-frontoffice.imggaming.com/api/v1/init/"
|
||||
|
||||
# Authentication (with caching support)
|
||||
login: "https://dce-frontoffice.imggaming.com/api/v2/login"
|
||||
refresh: "https://dce-frontoffice.imggaming.com/api/v2/token/refresh"
|
||||
|
||||
# Content Discovery
|
||||
search: "https://search.dce-prod.dicelaboratory.com/search"
|
||||
|
||||
# Video/Episode Manifest (POST)
|
||||
video: "https://dce-frontoffice.imggaming.com/api/v5/manifest/video/{video_id}"
|
||||
manifest: "https://dce-frontoffice.imggaming.com/api/v5/manifest/video/{video_id}"
|
||||
|
||||
# Series Metadata
|
||||
series_view: "https://dce-frontoffice.imggaming.com/api/v1/view?type=series&id={series_id}&timezone=UTC"
|
||||
|
||||
# DRM License Servers
|
||||
widevine_license: "https://shield-drm.imggaming.com/api/v2/license"
|
||||
playready_license: "https://shield-drm.imggaming.com/api/v2/license"
|
||||
|
||||
client:
|
||||
web:
|
||||
user_agent: "Mozilla/5.0 (X11; Linux x86_64; rv:149.0) Gecko/20100101 Firefox/149.0"
|
||||
license_user_agent: "Mozilla/5.0 (X11; Linux x86_64; rv:149.0) Gecko/20100101 Firefox/149.0"
|
||||
app_version: "6.60.0.7cf91e1"
|
||||
type: "BROWSER"
|
||||
|
||||
android_tv:
|
||||
user_agent: "okhttp/4.12.0"
|
||||
license_user_agent: "okhttp/4.12.0"
|
||||
app_version: "6.60.0"
|
||||
type: "ANDROID_TV"
|
||||
|
||||
# Auth token cache duration (seconds)
|
||||
auth_cache_duration: 3600
|
||||
|
||||
# Optional: Widevine certificate
|
||||
# certificate: "CAUSxwE..."
|
||||
|
||||
realm: "dce.shudder"
|
||||
language: "en_US"
|
||||
rate_limit: 2
|
||||
session_timeout: 300
|
||||
1044
SKST/__init__.py
1044
SKST/__init__.py
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
endpoints:
|
||||
signin: "https://rango.id.skyshowtime.com/signin/service/international"
|
||||
tokens: "https://ovp.skyshowtime.com/auth/tokens"
|
||||
personas: "https://web.clients.skyshowtime.com/bff/personas/v2"
|
||||
atom_node: "https://atom.skyshowtime.com/adapter-calypso/v3/query/node"
|
||||
atom_search: "https://atom.skyshowtime.com/adapter-calypso/v3/query/search"
|
||||
playback: "https://ovp.skyshowtime.com/video/playouts/vod"
|
||||
|
||||
params:
|
||||
provider: "SKYSHOWTIME"
|
||||
proposition: "SKYSHOWTIME"
|
||||
platform: "PC"
|
||||
device: "COMPUTER"
|
||||
client_version: "6.11.21-gsp"
|
||||
|
||||
signature:
|
||||
app_id: "SHOWMAX-ANDROID-v1"
|
||||
key: "kC2UFjsH6PHrc5ENGfyTgC5bPA7aBVZ4aJAyqBBP"
|
||||
version: "1.0"
|
||||
|
||||
territories:
|
||||
- NL
|
||||
- PL
|
||||
- ES
|
||||
- PT
|
||||
- SE
|
||||
- NO
|
||||
- DK
|
||||
- FI
|
||||
- CZ
|
||||
- SK
|
||||
- HU
|
||||
- RO
|
||||
- BG
|
||||
- HR
|
||||
- SI
|
||||
- BA
|
||||
- RS
|
||||
- ME
|
||||
- MK
|
||||
- AL
|
||||
- XK
|
||||
452
VIDO/__init__.py
452
VIDO/__init__.py
@ -1,452 +0,0 @@
|
||||
import re
|
||||
import uuid
|
||||
import xml.etree.ElementTree as ET
|
||||
from urllib.parse import urljoin
|
||||
from hashlib import md5
|
||||
from typing import Optional, Union
|
||||
from http.cookiejar import CookieJar
|
||||
from langcodes import Language
|
||||
|
||||
import click
|
||||
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import HLS, DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
class VIDO(Service):
|
||||
"""
|
||||
Vidio.com service, Series and Movies, login required.
|
||||
Version: 2.3.0
|
||||
|
||||
Supports URLs like:
|
||||
• https://www.vidio.com/premier/2978/giligilis (Series)
|
||||
• https://www.vidio.com/watch/7454613-marantau-short-movie (Movie)
|
||||
|
||||
Security: HD@L3 (Widevine DRM when available)
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^https?://(?:www\.)?vidio\.com/(?:premier|series|watch)/(?P<id>\d+)"
|
||||
GEOFENCE = ("ID",)
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="VIDO", short_help="https://vidio.com (login required)")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VIDO(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
match = re.match(self.TITLE_RE, title)
|
||||
if not match:
|
||||
raise ValueError(f"Unsupported or invalid Vidio URL: {title}")
|
||||
self.content_id = match.group("id")
|
||||
|
||||
self.is_movie = "watch" in title
|
||||
|
||||
# Static app identifiers from Android traffic
|
||||
self.API_AUTH = "laZOmogezono5ogekaso5oz4Mezimew1"
|
||||
self.USER_AGENT = "vidioandroid/7.14.6-e4d1de87f2 (3191683)"
|
||||
self.API_APP_INFO = "android/15/7.14.6-e4d1de87f2-3191683"
|
||||
self.VISITOR_ID = str(uuid.uuid4())
|
||||
|
||||
# Auth state
|
||||
self._email = None
|
||||
self._user_token = None
|
||||
self._access_token = None
|
||||
|
||||
# DRM state
|
||||
self.license_url = None
|
||||
self.custom_data = None
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise ValueError("Vidio requires email and password login.")
|
||||
|
||||
self._email = credential.username
|
||||
password = credential.password
|
||||
|
||||
cache_key = f"auth_tokens_{self._email}"
|
||||
cache = self.cache.get(cache_key)
|
||||
|
||||
# Check if valid tokens are already in the cache
|
||||
if cache and not cache.expired:
|
||||
self.log.info("Using cached authentication tokens")
|
||||
cached_data = cache.data
|
||||
self._user_token = cached_data.get("user_token")
|
||||
self._access_token = cached_data.get("access_token")
|
||||
if self._user_token and self._access_token:
|
||||
return
|
||||
|
||||
# If no valid cache, proceed with login
|
||||
self.log.info("Authenticating with username and password")
|
||||
headers = {
|
||||
"referer": "android-app://com.vidio.android",
|
||||
"x-api-platform": "app-android",
|
||||
"x-api-auth": self.API_AUTH,
|
||||
"user-agent": self.USER_AGENT,
|
||||
"x-api-app-info": self.API_APP_INFO,
|
||||
"accept-language": "en",
|
||||
"content-type": "application/x-www-form-urlencoded",
|
||||
"x-visitor-id": self.VISITOR_ID,
|
||||
}
|
||||
|
||||
data = f"login={self._email}&password={password}"
|
||||
r = self.session.post("https://api.vidio.com/api/login", headers=headers, data=data)
|
||||
r.raise_for_status()
|
||||
|
||||
auth_data = r.json()
|
||||
self._user_token = auth_data["auth"]["authentication_token"]
|
||||
self._access_token = auth_data["auth_tokens"]["access_token"]
|
||||
self.log.info(f"Authenticated as {self._email}")
|
||||
|
||||
try:
|
||||
expires_at_str = auth_data["auth_tokens"]["access_token_expires_at"]
|
||||
expires_at_dt = datetime.fromisoformat(expires_at_str)
|
||||
now_utc = datetime.now(timezone.utc)
|
||||
expiration_in_seconds = max(0, int((expires_at_dt - now_utc).total_seconds()))
|
||||
self.log.info(f"Token expires in {expiration_in_seconds / 60:.2f} minutes.")
|
||||
except (KeyError, ValueError) as e:
|
||||
self.log.warning(f"Could not parse token expiration: {e}. Defaulting to 1 hour.")
|
||||
expiration_in_seconds = 3600
|
||||
|
||||
cache.set({
|
||||
"user_token": self._user_token,
|
||||
"access_token": self._access_token
|
||||
}, expiration=expiration_in_seconds)
|
||||
|
||||
def _headers(self):
|
||||
if not self._user_token or not self._access_token:
|
||||
raise RuntimeError("Not authenticated. Call authenticate() first.")
|
||||
return {
|
||||
"referer": "android-app://com.vidio.android",
|
||||
"x-api-platform": "app-android",
|
||||
"x-api-auth": self.API_AUTH,
|
||||
"user-agent": self.USER_AGENT,
|
||||
"x-api-app-info": self.API_APP_INFO,
|
||||
"x-visitor-id": self.VISITOR_ID,
|
||||
"x-user-email": self._email,
|
||||
"x-user-token": self._user_token,
|
||||
"x-authorization": self._access_token,
|
||||
"accept-language": "en",
|
||||
"accept": "application/json",
|
||||
"accept-charset": "UTF-8",
|
||||
"content-type": "application/vnd.api+json",
|
||||
}
|
||||
|
||||
def _extract_subtitles_from_mpd(self, mpd_url: str) -> list[Subtitle]:
|
||||
"""
|
||||
Manually parse the MPD to extract subtitle tracks.
|
||||
Handles plain VTT format (for free content).
|
||||
"""
|
||||
subtitles = []
|
||||
|
||||
try:
|
||||
r = self.session.get(mpd_url)
|
||||
r.raise_for_status()
|
||||
mpd_content = r.text
|
||||
|
||||
# Get base URL for resolving relative paths
|
||||
base_url = mpd_url.rsplit('/', 1)[0] + '/'
|
||||
|
||||
# Remove namespace for easier parsing
|
||||
mpd_content_clean = re.sub(r'\sxmlns="[^"]+"', '', mpd_content)
|
||||
root = ET.fromstring(mpd_content_clean)
|
||||
|
||||
for adaptation_set in root.findall('.//AdaptationSet'):
|
||||
content_type = adaptation_set.get('contentType', '')
|
||||
|
||||
if content_type != 'text':
|
||||
continue
|
||||
|
||||
lang = adaptation_set.get('lang', 'und')
|
||||
|
||||
for rep in adaptation_set.findall('Representation'):
|
||||
mime_type = rep.get('mimeType', '')
|
||||
|
||||
# Handle plain VTT (free content)
|
||||
if mime_type == 'text/vtt':
|
||||
segment_list = rep.find('SegmentList')
|
||||
if segment_list is not None:
|
||||
for segment_url in segment_list.findall('SegmentURL'):
|
||||
media = segment_url.get('media')
|
||||
if media:
|
||||
full_url = urljoin(base_url, media)
|
||||
|
||||
# Determine if auto-generated
|
||||
is_auto = '-auto' in lang
|
||||
clean_lang = lang.replace('-auto', '')
|
||||
|
||||
subtitle = Subtitle(
|
||||
id_=md5(full_url.encode()).hexdigest()[0:16],
|
||||
url=full_url,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
language=Language.get(clean_lang),
|
||||
forced=False,
|
||||
sdh=False,
|
||||
)
|
||||
|
||||
subtitles.append(subtitle)
|
||||
self.log.debug(f"Found VTT subtitle: {lang} -> {full_url}")
|
||||
|
||||
except Exception as e:
|
||||
self.log.warning(f"Failed to extract subtitles from MPD: {e}")
|
||||
|
||||
return subtitles
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
headers = self._headers()
|
||||
|
||||
if self.is_movie:
|
||||
r = self.session.get(f"https://api.vidio.com/api/videos/{self.content_id}/detail", headers=headers)
|
||||
r.raise_for_status()
|
||||
video_data = r.json()["video"]
|
||||
year = None
|
||||
if video_data.get("publish_date"):
|
||||
try:
|
||||
year = int(video_data["publish_date"][:4])
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=video_data["id"],
|
||||
service=self.__class__,
|
||||
name=video_data["title"],
|
||||
description=video_data.get("description", ""),
|
||||
year=year,
|
||||
language=Language.get("id"),
|
||||
data=video_data,
|
||||
)
|
||||
])
|
||||
else:
|
||||
r = self.session.get(f"https://api.vidio.com/content_profiles/{self.content_id}", headers=headers)
|
||||
r.raise_for_status()
|
||||
root = r.json()["data"]
|
||||
series_title = root["attributes"]["title"]
|
||||
|
||||
r_playlists = self.session.get(
|
||||
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists",
|
||||
headers=headers
|
||||
)
|
||||
r_playlists.raise_for_status()
|
||||
playlists_data = r_playlists.json()
|
||||
|
||||
# Use metadata to identify season playlists
|
||||
season_playlist_ids = set()
|
||||
if "meta" in playlists_data and "playlist_group" in playlists_data["meta"]:
|
||||
for group in playlists_data["meta"]["playlist_group"]:
|
||||
if group.get("type") == "season":
|
||||
season_playlist_ids.update(group.get("playlist_ids", []))
|
||||
|
||||
season_playlists = []
|
||||
for pl in playlists_data["data"]:
|
||||
playlist_id = int(pl["id"])
|
||||
name = pl["attributes"]["name"].lower()
|
||||
|
||||
if season_playlist_ids:
|
||||
if playlist_id in season_playlist_ids:
|
||||
season_playlists.append(pl)
|
||||
else:
|
||||
if ("season" in name or name == "episode" or name == "episodes") and \
|
||||
"trailer" not in name and "extra" not in name:
|
||||
season_playlists.append(pl)
|
||||
|
||||
if not season_playlists:
|
||||
raise ValueError("No season playlists found for this series.")
|
||||
|
||||
def extract_season_number(pl):
|
||||
name = pl["attributes"]["name"]
|
||||
match = re.search(r"season\s*(\d+)", name, re.IGNORECASE)
|
||||
if match:
|
||||
return int(match.group(1))
|
||||
elif name.lower() in ["season", "episodes", "episode"]:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
season_playlists.sort(key=extract_season_number)
|
||||
|
||||
all_episodes = []
|
||||
|
||||
for playlist in season_playlists:
|
||||
playlist_id = playlist["id"]
|
||||
season_number = extract_season_number(playlist)
|
||||
|
||||
if season_number == 0:
|
||||
season_number = 1
|
||||
|
||||
self.log.debug(f"Processing playlist '{playlist['attributes']['name']}' as Season {season_number}")
|
||||
|
||||
page = 1
|
||||
while True:
|
||||
r_eps = self.session.get(
|
||||
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists/{playlist_id}/videos",
|
||||
params={
|
||||
"page[number]": page,
|
||||
"page[size]": 20,
|
||||
"sort": "order",
|
||||
"included": "upcoming_videos"
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
r_eps.raise_for_status()
|
||||
page_data = r_eps.json()
|
||||
|
||||
for raw_ep in page_data["data"]:
|
||||
attrs = raw_ep["attributes"]
|
||||
ep_number = len([e for e in all_episodes if e.season == season_number]) + 1
|
||||
all_episodes.append(
|
||||
Episode(
|
||||
id_=int(raw_ep["id"]),
|
||||
service=self.__class__,
|
||||
title=series_title,
|
||||
season=season_number,
|
||||
number=ep_number,
|
||||
name=attrs["title"],
|
||||
description=attrs.get("description", ""),
|
||||
language=Language.get("id"),
|
||||
data=raw_ep,
|
||||
)
|
||||
)
|
||||
|
||||
if not page_data["links"].get("next"):
|
||||
break
|
||||
page += 1
|
||||
|
||||
if not all_episodes:
|
||||
raise ValueError("No episodes found in any season.")
|
||||
|
||||
return Series(all_episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
headers = self._headers()
|
||||
headers.update({
|
||||
"x-device-brand": "samsung",
|
||||
"x-device-model": "SM-A525F",
|
||||
"x-device-form-factor": "phone",
|
||||
"x-device-soc": "Qualcomm SM7125",
|
||||
"x-device-os": "Android 15 (API 35)",
|
||||
"x-device-android-mpc": "0",
|
||||
"x-device-cpu-arch": "arm64-v8a",
|
||||
"x-device-platform": "android",
|
||||
"x-app-version": "7.14.6-e4d1de87f2-3191683",
|
||||
})
|
||||
|
||||
video_id = str(title.id)
|
||||
url = f"https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
||||
|
||||
r = self.session.get(url, headers=headers)
|
||||
r.raise_for_status()
|
||||
stream = r.json()
|
||||
|
||||
if not isinstance(stream, dict):
|
||||
raise ValueError("Vidio returned invalid stream data.")
|
||||
|
||||
# Extract DRM info
|
||||
custom_data = stream.get("custom_data") or {}
|
||||
license_servers = stream.get("license_servers") or {}
|
||||
widevine_data = custom_data.get("widevine") if isinstance(custom_data, dict) else None
|
||||
license_url = license_servers.get("drm_license_url") if isinstance(license_servers, dict) else None
|
||||
|
||||
# Get stream URLs, check all possible HLS and DASH fields
|
||||
# HLS URLs (prefer in this order)
|
||||
hls_url = (
|
||||
stream.get("stream_hls_url") or
|
||||
stream.get("stream_token_hls_url") or
|
||||
stream.get("stream_token_url") # This is also HLS (m3u8)
|
||||
)
|
||||
|
||||
# DASH URLs
|
||||
dash_url = stream.get("stream_dash_url") or stream.get("stream_token_dash_url")
|
||||
|
||||
has_drm = widevine_data and license_url and dash_url and isinstance(widevine_data, str)
|
||||
|
||||
if has_drm:
|
||||
# DRM content: must use DASH
|
||||
self.log.info("Widevine DRM detected, using DASH")
|
||||
self.custom_data = widevine_data
|
||||
self.license_url = license_url
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
elif hls_url:
|
||||
# Non-DRM: prefer HLS (H.264, proper frame_rate metadata)
|
||||
self.log.info("No DRM detected, using HLS")
|
||||
self.custom_data = None
|
||||
self.license_url = None
|
||||
tracks = HLS.from_url(hls_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Clear HLS subtitles (they're segmented and incompatible)
|
||||
if tracks.subtitles:
|
||||
self.log.debug("Clearing HLS subtitles (incompatible format)")
|
||||
tracks.subtitles.clear()
|
||||
|
||||
# Get subtitles from DASH manifest (plain VTT) if available
|
||||
if dash_url:
|
||||
self.log.debug("Extracting subtitles from DASH manifest")
|
||||
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
||||
if manual_subs:
|
||||
for sub in manual_subs:
|
||||
tracks.add(sub)
|
||||
self.log.info(f"Added {len(manual_subs)} subtitle tracks from DASH")
|
||||
|
||||
elif dash_url:
|
||||
# Fallback to DASH only if no HLS available
|
||||
self.log.warning("No HLS available, using DASH (VP9 codec - may have issues)")
|
||||
self.custom_data = None
|
||||
self.license_url = None
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Try manual subtitle extraction for non-DRM DASH
|
||||
if not tracks.subtitles:
|
||||
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
||||
if manual_subs:
|
||||
for sub in manual_subs:
|
||||
tracks.add(sub)
|
||||
else:
|
||||
raise ValueError("No playable stream (DASH or HLS) available.")
|
||||
|
||||
self.log.info(f"Found {len(tracks.videos)} video tracks, {len(tracks.audio)} audio tracks, {len(tracks.subtitles)} subtitle tracks")
|
||||
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def search(self):
|
||||
raise NotImplementedError("Search not implemented for Vidio.")
|
||||
|
||||
def get_widevine_service_certificate(self, **_) -> Union[bytes, str, None]:
|
||||
return None
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not self.license_url or not self.custom_data:
|
||||
raise ValueError("DRM license info missing.")
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Referer": "https://www.vidio.com/",
|
||||
"Origin": "https://www.vidio.com",
|
||||
"pallycon-customdata-v2": self.custom_data,
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
|
||||
self.log.debug(f"Requesting Widevine license from: {self.license_url}")
|
||||
response = self.session.post(
|
||||
self.license_url,
|
||||
data=challenge,
|
||||
headers=headers
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
error_summary = response.text[:200] if response.text else "No response body"
|
||||
raise Exception(f"License request failed ({response.status_code}): {error_summary}")
|
||||
|
||||
return response.content
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
endpoints:
|
||||
content_profile: "https://api.vidio.com/content_profiles/{content_id}"
|
||||
playlists: "https://api.vidio.com/content_profiles/{content_id}/playlists"
|
||||
playlist_videos: "https://api.vidio.com/content_profiles/{content_id}/playlists/{playlist_id}/videos"
|
||||
stream: "https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
||||
328
VIKI/__init__.py
328
VIKI/__init__.py
@ -1,328 +0,0 @@
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Generator
|
||||
|
||||
import click
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Movie, Movies, Series, Episode, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
from unshackle.core.drm import Widevine
|
||||
from langcodes import Language
|
||||
|
||||
|
||||
class VIKI(Service):
|
||||
"""
|
||||
Service code for Rakuten Viki (viki.com)
|
||||
Version: 1.4.0
|
||||
|
||||
Authorization: Required cookies (_viki_session, device_id).
|
||||
Security: FHD @ L3 (Widevine)
|
||||
|
||||
Supports:
|
||||
• Movies and TV Series
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?viki\.com)?/(?:movies|tv)/(?P<id>\d+c)-.+$"
|
||||
GEOFENCE = ()
|
||||
NO_SUBTITLES = False
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="VIKI", short_help="https://viki.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VIKI(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if not m:
|
||||
self.search_term = title
|
||||
self.title_url = None
|
||||
return
|
||||
|
||||
self.container_id = m.group("id")
|
||||
self.title_url = title
|
||||
self.video_id: Optional[str] = None
|
||||
self.api_access_key: Optional[str] = None
|
||||
self.drm_license_url: Optional[str] = None
|
||||
|
||||
self.cdm = ctx.obj.cdm
|
||||
if self.config is None:
|
||||
raise EnvironmentError("Missing service config for VIKI.")
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
|
||||
if not cookies:
|
||||
raise PermissionError("VIKI requires a cookie file for authentication.")
|
||||
|
||||
session_cookie = next((c for c in cookies if c.name == "_viki_session"), None)
|
||||
device_cookie = next((c for c in cookies if c.name == "device_id"), None)
|
||||
|
||||
if not session_cookie or not device_cookie:
|
||||
raise PermissionError("Your cookie file is missing '_viki_session' or 'device_id'.")
|
||||
|
||||
self.session.headers.update({
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"X-Viki-App-Ver": "14.64.0",
|
||||
"X-Viki-Device-ID": device_cookie.value,
|
||||
"Origin": "https://www.viki.com",
|
||||
"Referer": "https://www.viki.com/",
|
||||
})
|
||||
self.log.info("VIKI authentication cookies loaded successfully.")
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if not self.title_url:
|
||||
raise ValueError("No URL provided to process.")
|
||||
|
||||
self.log.debug(f"Scraping page for API access key: {self.title_url}")
|
||||
r_page = self.session.get(self.title_url)
|
||||
r_page.raise_for_status()
|
||||
|
||||
match = re.search(r'"token":"([^"]+)"', r_page.text)
|
||||
if not match:
|
||||
raise RuntimeError("Failed to extract API access key from page source.")
|
||||
|
||||
self.api_access_key = match.group(1)
|
||||
self.log.debug(f"Extracted API access key: {self.api_access_key[:10]}...")
|
||||
|
||||
url = self.config["endpoints"]["container"].format(container_id=self.container_id)
|
||||
params = {
|
||||
"app": self.config["params"]["app"],
|
||||
"token": self.api_access_key,
|
||||
}
|
||||
r = self.session.get(url, params=params)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
content_type = data.get("type")
|
||||
if content_type == "film":
|
||||
return self._parse_movie(data)
|
||||
elif content_type == "series":
|
||||
return self._parse_series(data)
|
||||
else:
|
||||
self.log.error(f"Unknown content type '{content_type}' found.")
|
||||
return Movies([])
|
||||
|
||||
def _parse_movie(self, data: dict) -> Movies:
|
||||
name = data.get("titles", {}).get("en", "Unknown Title")
|
||||
year = int(data["created_at"][:4]) if "created_at" in data else None
|
||||
description = data.get("descriptions", {}).get("en", "")
|
||||
original_lang_code = data.get("origin", {}).get("language", "en")
|
||||
self.video_id = data.get("watch_now", {}).get("id")
|
||||
|
||||
if not self.video_id:
|
||||
raise ValueError(f"Could not find a playable video ID for container {self.container_id}.")
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=self.container_id,
|
||||
service=self.__class__,
|
||||
name=name,
|
||||
year=year,
|
||||
description=description,
|
||||
language=Language.get(original_lang_code),
|
||||
data=data,
|
||||
)
|
||||
])
|
||||
|
||||
def _parse_series(self, data: dict) -> Series:
|
||||
"""Parse series metadata and fetch episodes."""
|
||||
series_name = data.get("titles", {}).get("en", "Unknown Title")
|
||||
year = int(data["created_at"][:4]) if "created_at" in data else None
|
||||
description = data.get("descriptions", {}).get("en", "")
|
||||
original_lang_code = data.get("origin", {}).get("language", "en")
|
||||
|
||||
self.log.info(f"Parsing series: {series_name}")
|
||||
|
||||
# Fetch episode list IDs
|
||||
episodes_url = self.config["endpoints"]["episodes"].format(container_id=self.container_id)
|
||||
params = {
|
||||
"app": self.config["params"]["app"],
|
||||
"token": self.api_access_key,
|
||||
"direction": "asc",
|
||||
"with_upcoming": "true",
|
||||
"sort": "number",
|
||||
"blocked": "true",
|
||||
"only_ids": "true"
|
||||
}
|
||||
|
||||
r = self.session.get(episodes_url, params=params)
|
||||
r.raise_for_status()
|
||||
episodes_data = r.json()
|
||||
|
||||
episode_ids = episodes_data.get("response", [])
|
||||
self.log.info(f"Found {len(episode_ids)} episodes")
|
||||
|
||||
episodes = []
|
||||
for idx, ep_id in enumerate(episode_ids, 1):
|
||||
# Fetch individual episode metadata
|
||||
ep_url = self.config["endpoints"]["episode_meta"].format(video_id=ep_id)
|
||||
ep_params = {
|
||||
"app": self.config["params"]["app"],
|
||||
"token": self.api_access_key,
|
||||
}
|
||||
|
||||
try:
|
||||
r_ep = self.session.get(ep_url, params=ep_params)
|
||||
r_ep.raise_for_status()
|
||||
ep_data = r_ep.json()
|
||||
|
||||
ep_number = ep_data.get("number", idx)
|
||||
ep_title = ep_data.get("titles", {}).get("en", "")
|
||||
ep_description = ep_data.get("descriptions", {}).get("en", "")
|
||||
|
||||
# If no episode title, use generic name
|
||||
if not ep_title:
|
||||
ep_title = f"Episode {ep_number}"
|
||||
|
||||
# Store the video_id in the data dict
|
||||
ep_data["video_id"] = ep_id
|
||||
|
||||
self.log.debug(f"Episode {ep_number}: {ep_title} ({ep_id})")
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=ep_id,
|
||||
service=self.__class__,
|
||||
title=series_name, # Series title
|
||||
season=1, # VIKI typically doesn't separate seasons clearly
|
||||
number=ep_number,
|
||||
name=ep_title, # Episode title
|
||||
description=ep_description,
|
||||
language=Language.get(original_lang_code),
|
||||
data=ep_data
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
self.log.warning(f"Failed to fetch episode {ep_id}: {e}")
|
||||
# Create a basic episode entry even if metadata fetch fails
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=ep_id,
|
||||
service=self.__class__,
|
||||
title=series_name,
|
||||
season=1,
|
||||
number=idx,
|
||||
name=f"Episode {idx}",
|
||||
description="",
|
||||
language=Language.get(original_lang_code),
|
||||
data={"video_id": ep_id} # Store video_id in data
|
||||
)
|
||||
)
|
||||
|
||||
# Return Series with just the episodes list
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
# For episodes, get the video_id from the data dict
|
||||
if isinstance(title, Episode):
|
||||
self.video_id = title.data.get("video_id")
|
||||
if not self.video_id:
|
||||
# Fallback to episode id if video_id not in data
|
||||
self.video_id = title.data.get("id")
|
||||
elif not self.video_id:
|
||||
raise RuntimeError("video_id not set. Call get_titles() first.")
|
||||
|
||||
if not self.video_id:
|
||||
raise ValueError("Could not determine video_id for this title")
|
||||
|
||||
self.log.info(f"Getting tracks for video ID: {self.video_id}")
|
||||
|
||||
url = self.config["endpoints"]["playback"].format(video_id=self.video_id)
|
||||
r = self.session.get(url)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
# Get the DRM-protected manifest from queue
|
||||
manifest_url = None
|
||||
for item in data.get("queue", []):
|
||||
if item.get("type") == "video" and item.get("format") == "mpd":
|
||||
manifest_url = item.get("url")
|
||||
break
|
||||
|
||||
if not manifest_url:
|
||||
raise ValueError("No DRM-protected manifest URL found in queue")
|
||||
|
||||
self.log.debug(f"Found DRM-protected manifest URL: {manifest_url}")
|
||||
|
||||
# Create headers for manifest download
|
||||
manifest_headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"X-Viki-App-Ver": "14.64.0",
|
||||
"X-Viki-Device-ID": self.session.headers.get("X-Viki-Device-ID", ""),
|
||||
"Origin": "https://www.viki.com",
|
||||
"Referer": "https://www.viki.com/",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
|
||||
# Parse tracks from the DRM-protected manifest
|
||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Subtitles
|
||||
title_language = title.language.language
|
||||
subtitles = []
|
||||
for sub in data.get("subtitles", []):
|
||||
sub_url = sub.get("src")
|
||||
lang_code = sub.get("srclang")
|
||||
if not sub_url or not lang_code:
|
||||
continue
|
||||
|
||||
subtitles.append(
|
||||
Subtitle(
|
||||
id_=lang_code,
|
||||
url=sub_url,
|
||||
language=Language.get(lang_code),
|
||||
is_original_lang=lang_code == title_language,
|
||||
codec=Subtitle.Codec.WebVTT,
|
||||
name=sub.get("label", lang_code.upper()).split(" (")[0]
|
||||
)
|
||||
)
|
||||
tracks.subtitles = subtitles
|
||||
|
||||
# Store DRM license URL (only dt3) at service level
|
||||
drm_b64 = data.get("drm")
|
||||
if drm_b64:
|
||||
drm_data = json.loads(base64.b64decode(drm_b64))
|
||||
self.drm_license_url = drm_data.get("dt3") # Use dt3 as requested
|
||||
else:
|
||||
self.log.warning("No DRM info found, assuming unencrypted stream.")
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
if not hasattr(self, 'drm_license_url') or not self.drm_license_url:
|
||||
raise ValueError("DRM license URL not available.")
|
||||
|
||||
r = self.session.post(
|
||||
self.drm_license_url,
|
||||
data=challenge,
|
||||
headers={"Content-type": "application/octet-stream"}
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.content
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
self.log.warning("Search not yet implemented for VIKI.")
|
||||
return
|
||||
yield
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
@ -1,8 +0,0 @@
|
||||
params:
|
||||
app: "100000a"
|
||||
endpoints:
|
||||
container: "https://api.viki.io/v4/containers/{container_id}.json"
|
||||
episodes: "https://api.viki.io/v4/series/{container_id}/episodes.json" # New
|
||||
episode_meta: "https://api.viki.io/v4/videos/{video_id}.json" # New
|
||||
playback: "https://www.viki.com/api/videos/{video_id}"
|
||||
search: "https://api.viki.io/v4/search/all.json"
|
||||
653
VLD/__init__.py
653
VLD/__init__.py
@ -1,653 +0,0 @@
|
||||
import re
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, Union
|
||||
|
||||
import click
|
||||
from langcodes import Language
|
||||
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks
|
||||
|
||||
|
||||
class VLD(Service):
|
||||
"""
|
||||
Service code for RTL's Dutch streaming service Videoland (https://v2.videoland.com)
|
||||
Version: 1.1.0
|
||||
|
||||
Authorization: Credentials
|
||||
|
||||
Security:
|
||||
- L1: >= 720p
|
||||
- L3: <= 576p
|
||||
|
||||
They are using the license server of DRMToday with encoded streams from CastLabs.
|
||||
It accepts Non-Whitelisted CDMs so every unrevoked L1 CDM should work.
|
||||
|
||||
Use full URL (for example - https://v2.videoland.com/title-p_12345) or title slug.
|
||||
"""
|
||||
|
||||
ALIASES = ("VLD", "videoland")
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?v2\.videoland\.com/)?(?P<title_id>[a-zA-Z0-9_-]+)"
|
||||
GEOFENCE = ("NL",)
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="Videoland", short_help="https://v2.videoland.com")
|
||||
@click.argument("title", type=str)
|
||||
@click.option("-m", "--movie", is_flag=True, default=False, help="Specify if it's a movie")
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VLD(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title, movie):
|
||||
super().__init__(ctx)
|
||||
|
||||
self.title = title
|
||||
self.movie = movie
|
||||
self.cdm = ctx.obj.cdm
|
||||
self.device_id = str(uuid.uuid1().int)
|
||||
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
|
||||
profile_name = ctx.parent.params.get("profile")
|
||||
self.profile = profile_name if profile_name else "default"
|
||||
|
||||
self.platform = self.config["platform"]["android_tv"]
|
||||
self.platform_token = "token-androidtv-3"
|
||||
|
||||
# Auth state - initialized to None, populated by authenticate()
|
||||
self.access_token = None
|
||||
self.gigya_uid = None
|
||||
self.profile_id = None
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
super().authenticate(cookies, credential)
|
||||
if not credential or not credential.username or not credential.password:
|
||||
raise EnvironmentError("Service requires Credentials for Authentication.")
|
||||
|
||||
self.credential = credential
|
||||
|
||||
self.session.headers.update({
|
||||
"origin": "https://v2.videoland.com",
|
||||
"x-client-release": self.config["sdk"]["version"],
|
||||
"x-customer-name": "rtlnl",
|
||||
})
|
||||
|
||||
cache_key = f"tokens_{credential.username}"
|
||||
cache = self.cache.get(cache_key)
|
||||
|
||||
if cache and not cache.expired:
|
||||
cached_data = cache.data
|
||||
if (
|
||||
isinstance(cached_data, dict)
|
||||
and cached_data.get("username") == credential.username
|
||||
and cached_data.get("access_token")
|
||||
and cached_data.get("gigya_uid")
|
||||
and cached_data.get("profile_id")
|
||||
):
|
||||
self.log.info("Using cached Videoland tokens")
|
||||
self._restore_from_cache(cached_data)
|
||||
return
|
||||
else:
|
||||
self.log.warning("Cached token data is incomplete or mismatched, re-authenticating")
|
||||
|
||||
self.log.info("Retrieving new Videoland tokens")
|
||||
self._do_login(credential)
|
||||
self._cache_tokens(credential.username, cache)
|
||||
|
||||
def _invalidate_cache(self) -> None:
|
||||
"""Wipe the cached tokens for the current credential so the next
|
||||
call to authenticate() is forced to perform a fresh login."""
|
||||
if not self.credential:
|
||||
return
|
||||
cache_key = f"tokens_{self.credential.username}"
|
||||
cache = self.cache.get(cache_key)
|
||||
# Writing an empty dict with a TTL of 0 effectively expires it
|
||||
# immediately so the next cache.expired check returns True.
|
||||
try:
|
||||
cache.set(data={}, expiration=0)
|
||||
self.log.debug("Token cache invalidated")
|
||||
except Exception:
|
||||
pass # If the cache backend refuses, just continue
|
||||
|
||||
def _reauthenticate(self) -> None:
|
||||
"""Invalidate the cache and perform a completely fresh login.
|
||||
|
||||
Call this whenever the API returns a token-expired error so that
|
||||
the rest of the current run continues with valid credentials.
|
||||
"""
|
||||
self.log.warning("Access token has expired — invalidating cache and re-authenticating")
|
||||
self._invalidate_cache()
|
||||
self._do_login(self.credential)
|
||||
# Re-persist the brand-new tokens
|
||||
cache_key = f"tokens_{self.credential.username}"
|
||||
cache = self.cache.get(cache_key)
|
||||
self._cache_tokens(self.credential.username, cache)
|
||||
|
||||
def _restore_from_cache(self, cached_data: dict) -> None:
|
||||
"""Restore authentication state from cached data."""
|
||||
self.access_token = cached_data["access_token"]
|
||||
self.gigya_uid = cached_data["gigya_uid"]
|
||||
self.profile_id = cached_data["profile_id"]
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.access_token}"})
|
||||
|
||||
def _cache_tokens(self, username: str, cache: object) -> None:
|
||||
"""Persist the current tokens into the cache object.
|
||||
|
||||
Accepts the cache object directly instead of re-fetching it by key,
|
||||
so we always write to the exact same object we checked during the
|
||||
cache-hit test in authenticate().
|
||||
"""
|
||||
cache.set(
|
||||
data={
|
||||
"username": username,
|
||||
"access_token": self.access_token,
|
||||
"gigya_uid": self.gigya_uid,
|
||||
"profile_id": self.profile_id,
|
||||
},
|
||||
# 3500 seconds gives a 100-second safety margin below the
|
||||
# typical 1-hour JWT lifetime so we never use a nearly-expired token.
|
||||
expiration=3500,
|
||||
)
|
||||
self.log.info("Videoland tokens cached successfully")
|
||||
|
||||
def _do_login(self, credential: Credential) -> None:
|
||||
"""Perform the full four-step Videoland / Gigya login flow."""
|
||||
|
||||
# ── Step 1: Gigya account login ──────────────────────────────
|
||||
auth_response = self.session.post(
|
||||
url=self.config["endpoints"]["authorization"],
|
||||
data={
|
||||
"loginID": credential.username,
|
||||
"password": credential.password,
|
||||
"sessionExpiration": "0",
|
||||
"targetEnv": "jssdk",
|
||||
"include": "profile,data",
|
||||
"includeUserInfo": "true",
|
||||
"lang": "nl",
|
||||
"ApiKey": self.config["sdk"]["apikey"],
|
||||
"authMode": "cookie",
|
||||
"pageURL": "https://v2.videoland.com/",
|
||||
"sdkBuild": self.config["sdk"]["build"],
|
||||
"format": "json",
|
||||
},
|
||||
).json()
|
||||
|
||||
if auth_response.get("errorMessage"):
|
||||
raise EnvironmentError(
|
||||
f"Could not authorize Videoland account: {auth_response['errorMessage']!r}"
|
||||
)
|
||||
|
||||
self.gigya_uid = auth_response["UID"]
|
||||
uid_signature = auth_response["UIDSignature"]
|
||||
signature_timestamp = auth_response["signatureTimestamp"]
|
||||
|
||||
# ── Step 2: Exchange Gigya credentials for an initial JWT ─────
|
||||
jwt_headers = {
|
||||
"x-auth-device-id": self.device_id,
|
||||
"x-auth-device-player-size-height": "3840",
|
||||
"x-auth-device-player-size-width": "2160",
|
||||
"X-Auth-gigya-signature": uid_signature,
|
||||
"X-Auth-gigya-signature-timestamp": signature_timestamp,
|
||||
"X-Auth-gigya-uid": self.gigya_uid,
|
||||
"X-Client-Release": self.config["sdk"]["version"],
|
||||
"X-Customer-Name": "rtlnl",
|
||||
}
|
||||
|
||||
jwt_response = self.session.get(
|
||||
url=self.config["endpoints"]["jwt_tokens"].format(platform=self.platform),
|
||||
headers=jwt_headers,
|
||||
).json()
|
||||
|
||||
if jwt_response.get("error"):
|
||||
raise EnvironmentError(
|
||||
f"Could not get Access Token: {jwt_response['error']['message']!r}"
|
||||
)
|
||||
|
||||
initial_token = jwt_response["token"]
|
||||
|
||||
# ── Step 3: Fetch profiles and pick the first one ─────────────
|
||||
profiles_response = self.session.get(
|
||||
url=self.config["endpoints"]["profiles"].format(
|
||||
platform=self.platform,
|
||||
gigya=self.gigya_uid,
|
||||
),
|
||||
headers={"Authorization": f"Bearer {initial_token}"},
|
||||
).json()
|
||||
|
||||
if isinstance(profiles_response, dict) and profiles_response.get("error"):
|
||||
raise EnvironmentError(
|
||||
f"Could not get profiles: {profiles_response['error']['message']!r}"
|
||||
)
|
||||
|
||||
self.profile_id = profiles_response[0]["uid"]
|
||||
|
||||
# ── Step 4: Obtain a profile-scoped JWT (the final token) ─────
|
||||
jwt_headers["X-Auth-profile-id"] = self.profile_id
|
||||
|
||||
final_jwt_response = self.session.get(
|
||||
url=self.config["endpoints"]["jwt_tokens"].format(platform=self.platform),
|
||||
headers=jwt_headers,
|
||||
).json()
|
||||
|
||||
if final_jwt_response.get("error"):
|
||||
raise EnvironmentError(
|
||||
f"Could not get final Access Token: {final_jwt_response['error']['message']!r}"
|
||||
)
|
||||
|
||||
self.access_token = final_jwt_response["token"]
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.access_token}"})
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Title discovery
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def search(self) -> Generator[SearchResult, None, None]:
|
||||
query = self.title.strip()
|
||||
if not query:
|
||||
return
|
||||
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["search"],
|
||||
params={
|
||||
"x-algolia-agent": self.config["algolia"]["agent"],
|
||||
"x-algolia-api-key": self.config["algolia"]["api_key"],
|
||||
"x-algolia-application-id": self.config["algolia"]["app_id"],
|
||||
},
|
||||
headers={
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "text/plain",
|
||||
"Referer": "https://v2.videoland.com/",
|
||||
"Origin": "https://v2.videoland.com",
|
||||
},
|
||||
json={
|
||||
"requests": [
|
||||
{
|
||||
"indexName": self.config["algolia"]["index"],
|
||||
"query": query,
|
||||
"clickAnalytics": True,
|
||||
"hitsPerPage": 50,
|
||||
"facetFilters": [
|
||||
["metadata.item_type:program"],
|
||||
[f"metadata.platforms_assets:{self.config['platform']['web']}"],
|
||||
],
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
results = data.get("results", [])
|
||||
if not results:
|
||||
return
|
||||
|
||||
seen = set()
|
||||
|
||||
for hit in results[0].get("hits", []):
|
||||
metadata = hit.get("metadata", {}) or {}
|
||||
item = hit.get("item", {}) or {}
|
||||
item_content = item.get("itemContent", {}) or {}
|
||||
|
||||
target = (
|
||||
item_content.get("action", {})
|
||||
.get("target", {})
|
||||
.get("value_layout", {})
|
||||
)
|
||||
|
||||
content = hit.get("content", {}) or {}
|
||||
content_id = str(target.get("id") or content.get("id") or "").strip()
|
||||
seo = target.get("seo")
|
||||
title = item_content.get("title") or metadata.get("title")
|
||||
|
||||
if not content_id or not title:
|
||||
continue
|
||||
|
||||
if content_id in seen:
|
||||
continue
|
||||
seen.add(content_id)
|
||||
|
||||
edito_tags = metadata.get("tags", {}).get("edito", []) or []
|
||||
program_nature = metadata.get("tags", {}).get("program_nature", []) or []
|
||||
|
||||
if "CONTENTTYPE:Film" in edito_tags:
|
||||
label = "MOVIE"
|
||||
elif "CONTENTTYPE:Series" in edito_tags:
|
||||
label = "SERIES"
|
||||
elif "Unitary" in program_nature:
|
||||
label = "MOVIE"
|
||||
else:
|
||||
label = "SERIES"
|
||||
|
||||
url = f"https://v2.videoland.com/{seo}-p_{content_id}" if seo else None
|
||||
|
||||
yield SearchResult(
|
||||
id_=content_id,
|
||||
title=title,
|
||||
label=label,
|
||||
url=url,
|
||||
)
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
title_match = re.match(self.TITLE_RE, self.title)
|
||||
if not title_match:
|
||||
raise ValueError(f"Invalid title format: {self.title}")
|
||||
|
||||
title_slug = title_match.group("title_id")
|
||||
|
||||
if re.match(r".+?-f_[0-9]+", title_slug):
|
||||
title_slug = self._get_program_title(title_slug)
|
||||
|
||||
title_id = title_slug.split("-p_")[-1] if "-p_" in title_slug else title_slug
|
||||
|
||||
metadata = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"program/{title_id}",
|
||||
),
|
||||
params={"nbPages": "10"},
|
||||
).json()
|
||||
|
||||
# ── Token expiry detection and automatic recovery ─────────────────
|
||||
if isinstance(metadata, dict) and metadata.get("error"):
|
||||
message = metadata.get("message", "Unknown error")
|
||||
# The API returns "Token expired/invalid" when the JWT has lapsed.
|
||||
# Re-authenticate once and retry the same request rather than
|
||||
# crashing with a ValueError.
|
||||
if "token" in message.lower() and (
|
||||
"expired" in message.lower() or "invalid" in message.lower()
|
||||
):
|
||||
self._reauthenticate()
|
||||
# Retry the metadata request with the fresh token
|
||||
metadata = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"program/{title_id}",
|
||||
),
|
||||
params={"nbPages": "10"},
|
||||
).json()
|
||||
# If it still fails after re-auth, raise normally
|
||||
if isinstance(metadata, dict) and metadata.get("error"):
|
||||
raise ValueError(
|
||||
f"API Error after re-authentication: {metadata.get('message', 'Unknown error')}"
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"API Error: {message}")
|
||||
|
||||
is_movie = "Seizoen" not in str(metadata)
|
||||
|
||||
if is_movie:
|
||||
movie_info = metadata["blocks"][0]["content"]["items"][0]
|
||||
viewable_id = movie_info["itemContent"]["action"]["target"]["value_layout"]["id"]
|
||||
|
||||
return Movies([
|
||||
Movie(
|
||||
id_=movie_info["ucid"],
|
||||
service=self.__class__,
|
||||
name=metadata["entity"]["metadata"]["title"],
|
||||
year=None,
|
||||
language=Language.get("nl"),
|
||||
data={
|
||||
"viewable": viewable_id,
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
])
|
||||
|
||||
seasons = [
|
||||
block
|
||||
for block in metadata["blocks"]
|
||||
if block["featureId"] == "videos_by_season_by_program"
|
||||
]
|
||||
|
||||
for season in seasons:
|
||||
while (
|
||||
len(season["content"]["items"])
|
||||
!= season["content"]["pagination"]["totalItems"]
|
||||
):
|
||||
season_data = self.session.get(
|
||||
url=self.config["endpoints"]["seasoning"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
program=title_id,
|
||||
season_id=season["id"],
|
||||
),
|
||||
params={
|
||||
"nbPages": "10",
|
||||
"page": season["content"]["pagination"]["nextPage"],
|
||||
},
|
||||
).json()
|
||||
|
||||
for episode in season_data["content"]["items"]:
|
||||
if episode not in season["content"]["items"]:
|
||||
season["content"]["items"].append(episode)
|
||||
|
||||
season["content"]["pagination"]["nextPage"] = (
|
||||
season_data["content"]["pagination"]["nextPage"]
|
||||
)
|
||||
|
||||
episodes = []
|
||||
for season in seasons:
|
||||
season_title = season.get("title", {}).get("long", "")
|
||||
season_match = re.search(r"(\d+)", season_title)
|
||||
season_number = int(season_match.group(1)) if season_match else 1
|
||||
|
||||
for idx, episode_data in enumerate(season["content"]["items"]):
|
||||
extra_title = episode_data["itemContent"].get("extraTitle", "")
|
||||
|
||||
episode_number = None
|
||||
episode_name = extra_title
|
||||
|
||||
ep_match = re.match(r"^(\d+)\.\s*(.*)$", extra_title)
|
||||
if ep_match:
|
||||
episode_number = int(ep_match.group(1))
|
||||
episode_name = ep_match.group(2)
|
||||
else:
|
||||
episode_number = idx + 1
|
||||
|
||||
viewable_id = (
|
||||
episode_data["itemContent"]["action"]["target"]["value_layout"]["id"]
|
||||
)
|
||||
|
||||
episodes.append(
|
||||
Episode(
|
||||
id_=episode_data["ucid"],
|
||||
service=self.__class__,
|
||||
title=metadata["entity"]["metadata"]["title"],
|
||||
season=season_number,
|
||||
number=episode_number,
|
||||
name=episode_name,
|
||||
year=None,
|
||||
language=Language.get("nl"),
|
||||
data={
|
||||
"viewable": viewable_id,
|
||||
"episode_data": episode_data,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
episodes = sorted(episodes, key=lambda ep: (ep.season, ep.number))
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
viewable_id = title.data["viewable"]
|
||||
|
||||
manifest_response = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"video/{viewable_id}",
|
||||
),
|
||||
params={"nbPages": "2"},
|
||||
).json()
|
||||
|
||||
# ── Token expiry detection in get_tracks ──────────────────────────
|
||||
if isinstance(manifest_response, dict) and manifest_response.get("error"):
|
||||
message = manifest_response.get("message", "Unknown error")
|
||||
if "token" in message.lower() and (
|
||||
"expired" in message.lower() or "invalid" in message.lower()
|
||||
):
|
||||
self._reauthenticate()
|
||||
manifest_response = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"video/{viewable_id}",
|
||||
),
|
||||
params={"nbPages": "2"},
|
||||
).json()
|
||||
if isinstance(manifest_response, dict) and manifest_response.get("error"):
|
||||
raise ValueError(
|
||||
f"API Error after re-authentication: {manifest_response.get('message', 'Unknown error')}"
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"API Error: {message}")
|
||||
|
||||
player_block = next(
|
||||
(
|
||||
block
|
||||
for block in manifest_response["blocks"]
|
||||
if block["templateId"] == "Player"
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if not player_block:
|
||||
raise ValueError("Could not find player block in manifest")
|
||||
|
||||
assets = player_block["content"]["items"][0]["itemContent"]["video"]["assets"]
|
||||
if not assets:
|
||||
raise ValueError("Failed to load content manifest - no assets found")
|
||||
|
||||
mpd_asset = next((a for a in assets if a["quality"] == "hd"), None) or \
|
||||
next((a for a in assets if a["quality"] == "sd"), None)
|
||||
|
||||
if not mpd_asset:
|
||||
raise ValueError("No suitable quality stream found")
|
||||
|
||||
mpd_url = mpd_asset["path"]
|
||||
|
||||
tracks = DASH.from_url(url=mpd_url, session=self.session).to_tracks(
|
||||
language=title.language
|
||||
)
|
||||
|
||||
for track in tracks:
|
||||
if not hasattr(track, "url") or not track.url:
|
||||
continue
|
||||
if isinstance(track.url, list):
|
||||
track.url = [
|
||||
re.sub(
|
||||
r"https://.+?\.videoland\.bedrock\.tech",
|
||||
"https://origin.vod.videoland.bedrock.tech",
|
||||
uri.split("?")[0],
|
||||
)
|
||||
for uri in track.url
|
||||
]
|
||||
elif isinstance(track.url, str):
|
||||
track.url = re.sub(
|
||||
r"https://.+?\.videoland\.bedrock\.tech",
|
||||
"https://origin.vod.videoland.bedrock.tech",
|
||||
track.url.split("?")[0],
|
||||
)
|
||||
|
||||
for subtitle in tracks.subtitles:
|
||||
url_str = str(subtitle.url) if subtitle.url else ""
|
||||
if "sdh" in url_str.lower():
|
||||
subtitle.sdh = True
|
||||
if "forced" in url_str.lower() or "opencaption" in url_str.lower():
|
||||
subtitle.forced = True
|
||||
|
||||
self.log.info(
|
||||
f"Tracks: {len(tracks.videos)} video, "
|
||||
f"{len(tracks.audio)} audio, "
|
||||
f"{len(tracks.subtitles)} subtitle"
|
||||
)
|
||||
|
||||
self.current_viewable = viewable_id
|
||||
return tracks
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
|
||||
def get_widevine_service_certificate(self, **_) -> Optional[str]:
|
||||
return self.config.get("certificate")
|
||||
|
||||
def get_widevine_license(
|
||||
self, *, challenge: bytes, title: Title_T, track: AnyTrack
|
||||
) -> Optional[Union[bytes, str]]:
|
||||
license_token = self._get_license_token(title)
|
||||
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["license_wv"],
|
||||
data=challenge,
|
||||
headers={"x-dt-auth-token": license_token},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Failed to get Widevine license: {response.status_code}")
|
||||
|
||||
return response.json().get("license")
|
||||
|
||||
def get_playready_license(
|
||||
self, *, challenge: bytes, title: Title_T, track: AnyTrack
|
||||
) -> Optional[bytes]:
|
||||
license_token = self._get_license_token(title)
|
||||
|
||||
response = self.session.post(
|
||||
url=self.config["endpoints"]["license_pr"],
|
||||
data=challenge,
|
||||
headers={"x-dt-auth-token": license_token},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Failed to get PlayReady license: {response.status_code}")
|
||||
|
||||
return response.content
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _get_license_token(self, title: Title_T) -> str:
|
||||
"""Fetch a per-clip DRM upfront token from the Videoland token endpoint."""
|
||||
viewable_id = title.data["viewable"]
|
||||
|
||||
response = self.session.get(
|
||||
url=self.config["endpoints"]["license_token"].format(
|
||||
platform=self.platform,
|
||||
gigya=self.gigya_uid,
|
||||
clip=viewable_id,
|
||||
),
|
||||
).json()
|
||||
|
||||
return response["token"]
|
||||
|
||||
def _get_program_title(self, folder_title: str) -> str:
|
||||
"""Resolve a folder slug (title-f_12345) to its programme slug (title-p_12345)."""
|
||||
folder_id = folder_title.split("-f_")[1]
|
||||
|
||||
response = self.session.get(
|
||||
url=self.config["endpoints"]["layout"].format(
|
||||
platform=self.platform,
|
||||
token=self.platform_token,
|
||||
endpoint=f"folder/{folder_id}",
|
||||
),
|
||||
params={"nbPages": "2"},
|
||||
).json()
|
||||
|
||||
target = response["blocks"][0]["content"]["items"][0]["itemContent"]["action"][
|
||||
"target"
|
||||
]["value_layout"]
|
||||
parent_seo = target["parent"]["seo"]
|
||||
parent_id = target["parent"]["id"]
|
||||
|
||||
return f"{parent_seo}-p_{parent_id}"
|
||||
@ -1,36 +0,0 @@
|
||||
certificate: |
|
||||
CsECCAMSEBcFuRfMEgSGiwYzOi93KowYgrSCkgUijgIwggEKAoIBAQCZ7Vs7Mn2rXiTvw7YqlbWYUgrVvMs3UD4GRbgU2Ha430BRBEGtjOOtsRu4jE5yWl5
|
||||
KngeVKR1YWEAjp+GvDjipEnk5MAhhC28VjIeMfiG/+/7qd+EBnh5XgeikX0YmPRTmDoBYqGB63OBPrIRXsTeo1nzN6zNwXZg6IftO7L1KEMpHSQykfqpdQ4
|
||||
IY3brxyt4zkvE9b/tkQv0x4b9AsMYE0cS6TJUgpL+X7r1gkpr87vVbuvVk4tDnbNfFXHOggrmWEguDWe3OJHBwgmgNb2fG2CxKxfMTRJCnTuw3r0svAQxZ6
|
||||
ChD4lgvC2ufXbD8Xm7fZPvTCLRxG88SUAGcn1oJAgMBAAE6FGxpY2Vuc2Uud2lkZXZpbmUuY29tEoADrjRzFLWoNSl/JxOI+3u4y1J30kmCPN3R2jC5MzlR
|
||||
HrPMveoEuUS5J8EhNG79verJ1BORfm7BdqEEOEYKUDvBlSubpOTOD8S/wgqYCKqvS/zRnB3PzfV0zKwo0bQQQWz53ogEMBy9szTK/NDUCXhCOmQuVGE98K/
|
||||
PlspKkknYVeQrOnA+8XZ/apvTbWv4K+drvwy6T95Z0qvMdv62Qke4XEMfvKUiZrYZ/DaXlUP8qcu9u/r6DhpV51Wjx7zmVflkb1gquc9wqgi5efhn9joLK3
|
||||
/bNixbxOzVVdhbyqnFk8ODyFfUnaq3fkC3hR3f0kmYgI41sljnXXjqwMoW9wRzBMINk+3k6P8cbxfmJD4/Paj8FwmHDsRfuoI6Jj8M76H3CTsZCZKDJjM3B
|
||||
QQ6Kb2m+bQ0LMjfVDyxoRgvfF//M/EEkPrKWyU2C3YBXpxaBquO4C8A0ujVmGEEqsxN1HX9lu6c5OMm8huDxwWFd7OHMs3avGpr7RP7DUnTikXrh6X0
|
||||
|
||||
endpoints:
|
||||
layout: https://layout.videoland.bedrock.tech/front/v1/rtlnl/{platform}/main/{token}/{endpoint}/layout
|
||||
seasoning: https://layout.videoland.bedrock.tech/front/v1/rtlnl/{platform}/main/{token}/program/{program}/block/{season_id}
|
||||
license_pr: https://lic.drmtoday.com/license-proxy-headerauth/drmtoday/RightsManager.asmx
|
||||
license_wv: https://lic.drmtoday.com/license-proxy-widevine/cenc/
|
||||
license_token: https://drm.videoland.bedrock.tech/v1/customers/rtlnl/platforms/{platform}/services/videoland/users/{gigya}/videos/{clip}/upfront-token
|
||||
authorization: https://accounts.eu1.gigya.com/accounts.login
|
||||
jwt_tokens: https://front-auth.videoland.bedrock.tech/v2/platforms/{platform}/getJwt
|
||||
profiles: https://users.videoland.bedrock.tech/v2/platforms/{platform}/users/{gigya}/profiles
|
||||
search: https://nhacvivxxk-dsn.algolia.net/1/indexes/*/queries
|
||||
|
||||
platform:
|
||||
web: m6group_web
|
||||
android_mob: m6group_android_mob
|
||||
android_tv: m6group_android_tv
|
||||
|
||||
algolia:
|
||||
app_id: NHACVIVXXK
|
||||
api_key: 6ef59fc6d78ac129339ab9c35edd41fa
|
||||
agent: Algolia for JavaScript (5.49.1); Search (5.49.1); Browser
|
||||
index: videoland_prod_bedrock_layout_items_v2_rtlnl_main
|
||||
|
||||
sdk:
|
||||
apikey: 3_W6BPwMz2FGQEfH4_nVRaj4Ak1F1XDp33an_8y8nXULn8nk43FHvPIpb0TLOYIaUI
|
||||
build: "13414"
|
||||
version: 5.47.2
|
||||
264
VRT/__init__.py
264
VRT/__init__.py
@ -1,264 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import base64
|
||||
import warnings # Added
|
||||
from http.cookiejar import CookieJar
|
||||
from typing import Optional, List
|
||||
from langcodes import Language
|
||||
|
||||
import click
|
||||
import jwt
|
||||
from bs4 import XMLParsedAsHTMLWarning # Added
|
||||
from collections.abc import Generator
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.constants import AnyTrack
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.manifests import DASH
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||
|
||||
# Ignore the BeautifulSoup XML warning caused by STPP subtitles
|
||||
warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
|
||||
|
||||
# GraphQL Fragments and Queries
|
||||
FRAGMENTS = """
|
||||
fragment tileFragment on Tile {
|
||||
... on ITile {
|
||||
title
|
||||
action { ... on LinkAction { link } }
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
QUERY_PROGRAM = """
|
||||
query VideoProgramPage($pageId: ID!) {
|
||||
page(id: $pageId) {
|
||||
... on ProgramPage {
|
||||
title
|
||||
components {
|
||||
__typename
|
||||
... on PaginatedTileList { listId title }
|
||||
... on StaticTileList { listId title }
|
||||
... on ContainerNavigation {
|
||||
items {
|
||||
title
|
||||
components {
|
||||
__typename
|
||||
... on PaginatedTileList { listId }
|
||||
... on StaticTileList { listId }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
QUERY_PAGINATED_LIST = FRAGMENTS + """
|
||||
query PaginatedTileListPage($listId: ID!, $after: ID) {
|
||||
list(listId: $listId) {
|
||||
... on PaginatedTileList {
|
||||
paginatedItems(first: 50, after: $after) {
|
||||
edges { node { ...tileFragment } }
|
||||
pageInfo { endCursor hasNextPage }
|
||||
}
|
||||
}
|
||||
... on StaticTileList {
|
||||
items { ...tileFragment }
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
QUERY_PLAYBACK = """
|
||||
query EpisodePage($pageId: ID!) {
|
||||
page(id: $pageId) {
|
||||
... on PlaybackPage {
|
||||
title
|
||||
player { modes { streamId } }
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
class VRT(Service):
|
||||
"""
|
||||
Service code for VRT MAX (vrt.be)
|
||||
Version: 2.1.1
|
||||
Auth: Gigya + OIDC flow
|
||||
Security: FHD @ L3 (Widevine)
|
||||
Supports:
|
||||
- Movies: https://www.vrt.be/vrtmax/a-z/rikkie-de-ooievaar-2/
|
||||
Series: https://www.vrt.be/vrtmax/a-z/schaar-steen-papier/
|
||||
"""
|
||||
|
||||
TITLE_RE = r"^(?:https?://(?:www\.)?vrt\.be/vrtmax/a-z/)?(?P<slug>[^/]+)(?:/(?P<season_num>\d+)/(?P<episode_slug>[^/]+))?/?$"
|
||||
|
||||
@staticmethod
|
||||
@click.command(name="VRT", short_help="https://www.vrt.be/vrtmax/")
|
||||
@click.argument("title", type=str)
|
||||
@click.pass_context
|
||||
def cli(ctx, **kwargs):
|
||||
return VRT(ctx, **kwargs)
|
||||
|
||||
def __init__(self, ctx, title: str):
|
||||
super().__init__(ctx)
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
m = re.match(self.TITLE_RE, title)
|
||||
if m:
|
||||
self.slug = m.group("slug")
|
||||
self.is_series_root = m.group("episode_slug") is None
|
||||
if "vrtmax/a-z" in title:
|
||||
self.page_id = "/" + title.split("vrt.be/")[1].split("?")[0]
|
||||
else:
|
||||
self.page_id = f"/vrtmax/a-z/{self.slug}/"
|
||||
else:
|
||||
self.search_term = title
|
||||
|
||||
self.access_token = None
|
||||
self.video_token = None
|
||||
|
||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||
cache = self.cache.get("auth_data")
|
||||
if cache and not cache.expired:
|
||||
self.log.info("Using cached VRT session.")
|
||||
self.access_token = cache.data["access_token"]
|
||||
self.video_token = cache.data["video_token"]
|
||||
return
|
||||
|
||||
if not credential or not credential.username or not credential.password: return
|
||||
|
||||
self.log.info(f"Logging in to VRT as {credential.username}...")
|
||||
login_params = {
|
||||
"apiKey": self.config["settings"]["api_key"],
|
||||
"loginID": credential.username,
|
||||
"password": credential.password,
|
||||
"format": "json",
|
||||
"sdk": "Android_6.1.0"
|
||||
}
|
||||
r = self.session.post(self.config["endpoints"]["gigya_login"], data=login_params)
|
||||
gigya_data = r.json()
|
||||
if gigya_data.get("errorCode") != 0: raise PermissionError("Gigya login failed")
|
||||
|
||||
sso_params = {"UID": gigya_data["UID"], "UIDSignature": gigya_data["UIDSignature"], "signatureTimestamp": gigya_data["signatureTimestamp"]}
|
||||
r = self.session.get(self.config["endpoints"]["vrt_sso"], params=sso_params)
|
||||
|
||||
match = re.search(r'var response = "(.*?)";', r.text)
|
||||
token_data = json.loads(match.group(1).replace('\\"', '"'))
|
||||
self.access_token = token_data["tokens"]["access_token"]
|
||||
self.video_token = token_data["tokens"]["video_token"]
|
||||
|
||||
decoded = jwt.decode(self.access_token, options={"verify_signature": False})
|
||||
cache.set(data={"access_token": self.access_token, "video_token": self.video_token}, expiration=int(decoded["exp"] - time.time()) - 300)
|
||||
|
||||
def _get_gql_headers(self):
|
||||
return {
|
||||
"x-vrt-client-name": self.config["settings"]["client_name"],
|
||||
"x-vrt-client-version": self.config["settings"]["client_version"],
|
||||
"x-vrt-zone": "default",
|
||||
"authorization": f"Bearer {self.access_token}" if self.access_token else None,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
def get_titles(self) -> Titles_T:
|
||||
if not self.is_series_root:
|
||||
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
||||
data = r.json()["data"]["page"]
|
||||
return Movies([Movie(id_=data["player"]["modes"][0]["streamId"], service=self.__class__, name=data["title"], language=Language.get("nl"), data={"page_id": self.page_id})])
|
||||
|
||||
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PROGRAM, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
||||
program_data = r.json().get("data", {}).get("page")
|
||||
if not program_data:
|
||||
raise ValueError(f"Series page not found: {self.page_id}")
|
||||
|
||||
series_name = program_data["title"]
|
||||
episodes = []
|
||||
list_ids = []
|
||||
|
||||
for comp in program_data.get("components", []):
|
||||
typename = comp.get("__typename")
|
||||
if typename in ("PaginatedTileList", "StaticTileList") and "listId" in comp:
|
||||
list_ids.append((comp.get("title") or "Episodes", comp["listId"]))
|
||||
elif typename == "ContainerNavigation":
|
||||
for item in comp.get("items", []):
|
||||
item_title = item.get("title", "Episodes")
|
||||
for sub in item.get("components", []):
|
||||
if "listId" in sub:
|
||||
list_ids.append((item_title, sub["listId"]))
|
||||
|
||||
seen_lists = set()
|
||||
unique_list_ids = []
|
||||
for title, lid in list_ids:
|
||||
if lid not in seen_lists:
|
||||
unique_list_ids.append((title, lid))
|
||||
seen_lists.add(lid)
|
||||
|
||||
for season_title, list_id in unique_list_ids:
|
||||
after = None
|
||||
while True:
|
||||
r_list = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PAGINATED_LIST, "variables": {"listId": list_id, "after": after}}, headers=self._get_gql_headers())
|
||||
list_resp = r_list.json().get("data", {}).get("list")
|
||||
if not list_resp: break
|
||||
|
||||
items_container = list_resp.get("paginatedItems")
|
||||
nodes = [e["node"] for e in items_container["edges"]] if items_container else list_resp.get("items", [])
|
||||
|
||||
for node in nodes:
|
||||
if not node.get("action"): continue
|
||||
link = node["action"]["link"]
|
||||
s_match = re.search(r'/(\d+)/.+s(\d+)a(\d+)', link)
|
||||
episodes.append(Episode(
|
||||
id_=link,
|
||||
service=self.__class__,
|
||||
title=series_name,
|
||||
season=int(s_match.group(2)) if s_match else 1,
|
||||
number=int(s_match.group(3)) if s_match else 0,
|
||||
name=node["title"],
|
||||
language=Language.get("nl"),
|
||||
data={"page_id": link}
|
||||
))
|
||||
|
||||
if items_container and items_container["pageInfo"]["hasNextPage"]:
|
||||
after = items_container["pageInfo"]["endCursor"]
|
||||
else:
|
||||
break
|
||||
|
||||
if not episodes:
|
||||
raise ValueError("No episodes found for this series.")
|
||||
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
page_id = title.data["page_id"]
|
||||
r_meta = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": page_id}}, headers=self._get_gql_headers())
|
||||
stream_id = r_meta.json()["data"]["page"]["player"]["modes"][0]["streamId"]
|
||||
|
||||
p_info = base64.urlsafe_b64encode(json.dumps(self.config["player_info"]).encode()).decode().replace("=", "")
|
||||
r_tok = self.session.post(self.config["endpoints"]["player_token"], json={"identityToken": self.video_token, "playerInfo": f"eyJhbGciOiJIUzI1NiJ9.{p_info}."})
|
||||
vrt_player_token = r_tok.json()["vrtPlayerToken"]
|
||||
|
||||
r_agg = self.session.get(self.config["endpoints"]["aggregator"].format(stream_id=stream_id), params={"client": self.config["settings"]["client_id"], "vrtPlayerToken": vrt_player_token})
|
||||
agg_data = r_agg.json()
|
||||
|
||||
dash_url = next(u["url"] for u in agg_data["targetUrls"] if u["type"] == "mpeg_dash")
|
||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||
self.drm_token = agg_data["drm"]
|
||||
|
||||
for sub in agg_data.get("subtitleUrls", []):
|
||||
tracks.add(Subtitle(id_=sub.get("label", "nl"), url=sub["url"], codec=Subtitle.Codec.WebVTT, language=Language.get(sub.get("language", "nl"))))
|
||||
|
||||
for tr in tracks.videos + tracks.audio:
|
||||
if tr.drm: tr.drm.license = lambda challenge, **kw: self.get_widevine_license(challenge, title, tr)
|
||||
|
||||
return tracks
|
||||
|
||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||
r = self.session.post(self.config["endpoints"]["license"], data=challenge, headers={"x-vudrm-token": self.drm_token, "Origin": "https://www.vrt.be", "Referer": "https://www.vrt.be/"})
|
||||
return r.content
|
||||
|
||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||
return []
|
||||
@ -1,18 +0,0 @@
|
||||
endpoints:
|
||||
gigya_login: "https://accounts.eu1.gigya.com/accounts.login"
|
||||
vrt_sso: "https://www.vrt.be/vrtmax/sso/login"
|
||||
graphql: "https://www.vrt.be/vrtnu-api/graphql/v1"
|
||||
player_token: "https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v2/tokens"
|
||||
aggregator: "https://media-services-public.vrt.be/media-aggregator/v2/media-items/{stream_id}"
|
||||
license: "https://widevine-proxy.drm.technology/proxy"
|
||||
|
||||
settings:
|
||||
api_key: "3_qhEcPa5JGFROVwu5SWKqJ4mVOIkwlFNMSKwzPDAh8QZOtHqu6L4nD5Q7lk0eXOOG"
|
||||
client_name: "WEB"
|
||||
client_id: "vrtnu-web@PROD"
|
||||
client_version: "1.5.15"
|
||||
|
||||
player_info:
|
||||
drm: { widevine: "L3" }
|
||||
platform: "desktop"
|
||||
app: { type: "browser", name: "Firefox", version: "146.0" }
|
||||
Loading…
x
Reference in New Issue
Block a user