mirror of
https://github.com/devine-dl/devine.git
synced 2025-04-29 17:49:44 +00:00
Compare commits
19 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
09eda16882 | ||
|
a95d32de9e | ||
|
221cd145c4 | ||
|
0310646cb2 | ||
|
3426fc145f | ||
|
e57d755837 | ||
|
03f3fec5cc | ||
|
2acee30e54 | ||
|
2e697d93fc | ||
|
f08402d795 | ||
|
5ef95e942a | ||
|
dde55fd708 | ||
|
345cc5aba6 | ||
|
145e7a6c17 | ||
|
5706bb1417 | ||
|
85246ab419 | ||
|
71a3a4e2c4 | ||
|
06d414975c | ||
|
f419e04fad |
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,4 +1,6 @@
|
||||
# devine
|
||||
devine.yaml
|
||||
devine.yml
|
||||
*.mkv
|
||||
*.mp4
|
||||
*.exe
|
||||
|
@ -343,6 +343,8 @@ Please refrain from spam or asking for questions that infringe upon a Service's
|
||||
<a href="https://github.com/Shivelight"><img src="https://images.weserv.nl/?url=avatars.githubusercontent.com/u/20620780?v=4&h=25&w=25&fit=cover&mask=circle&maxage=7d" alt="Shivelight"/></a>
|
||||
<a href="https://github.com/knowhere01"><img src="https://images.weserv.nl/?url=avatars.githubusercontent.com/u/113712042?v=4&h=25&w=25&fit=cover&mask=circle&maxage=7d" alt="knowhere01"/></a>
|
||||
<a href="https://github.com/retouching"><img src="https://images.weserv.nl/?url=avatars.githubusercontent.com/u/33735357?v=4&h=25&w=25&fit=cover&mask=circle&maxage=7d" alt="retouching"/></a>
|
||||
<a href="https://github.com/pandamoon21"><img src="https://images.weserv.nl/?url=avatars.githubusercontent.com/u/33972938?v=4&h=25&w=25&fit=cover&mask=circle&maxage=7d" alt="pandamoon21"/></a>
|
||||
<a href="https://github.com/adbbbb"><img src="https://images.weserv.nl/?url=avatars.githubusercontent.com/u/56319336?v=4&h=25&w=25&fit=cover&mask=circle&maxage=7d" alt="adbbbb"/></a>
|
||||
|
||||
## Licensing
|
||||
|
||||
|
@ -178,6 +178,7 @@ class dl:
|
||||
except ValueError as e:
|
||||
self.log.error(f"Failed to load Widevine CDM, {e}")
|
||||
sys.exit(1)
|
||||
if self.cdm:
|
||||
self.log.info(
|
||||
f"Loaded {self.cdm.__class__.__name__} Widevine CDM: {self.cdm.system_id} (L{self.cdm.security_level})"
|
||||
)
|
||||
@ -701,16 +702,22 @@ class dl:
|
||||
):
|
||||
for task_id, task_tracks in multiplex_tasks:
|
||||
progress.start_task(task_id) # TODO: Needed?
|
||||
muxed_path, return_code = task_tracks.mux(
|
||||
muxed_path, return_code, errors = task_tracks.mux(
|
||||
str(title),
|
||||
progress=partial(progress.update, task_id=task_id),
|
||||
delete=False
|
||||
)
|
||||
muxed_paths.append(muxed_path)
|
||||
if return_code == 1:
|
||||
self.log.warning("mkvmerge had at least one warning, will continue anyway...")
|
||||
elif return_code >= 2:
|
||||
self.log.error(f"Failed to Mux video to Matroska file ({return_code})")
|
||||
if return_code >= 2:
|
||||
self.log.error(f"Failed to Mux video to Matroska file ({return_code}):")
|
||||
elif return_code == 1 or errors:
|
||||
self.log.warning("mkvmerge had at least one warning or error, continuing anyway...")
|
||||
for line in errors:
|
||||
if line.startswith("#GUI#error"):
|
||||
self.log.error(line)
|
||||
else:
|
||||
self.log.warning(line)
|
||||
if return_code >= 2:
|
||||
sys.exit(1)
|
||||
for video_track in task_tracks.videos:
|
||||
video_track.delete()
|
||||
@ -930,21 +937,21 @@ class dl:
|
||||
return Credential.loads(credentials) # type: ignore
|
||||
|
||||
@staticmethod
|
||||
def get_cdm(service: str, profile: Optional[str] = None) -> WidevineCdm:
|
||||
def get_cdm(service: str, profile: Optional[str] = None) -> Optional[WidevineCdm]:
|
||||
"""
|
||||
Get CDM for a specified service (either Local or Remote CDM).
|
||||
Raises a ValueError if there's a problem getting a CDM.
|
||||
"""
|
||||
cdm_name = config.cdm.get(service) or config.cdm.get("default")
|
||||
if not cdm_name:
|
||||
raise ValueError("A CDM to use wasn't listed in the config")
|
||||
return None
|
||||
|
||||
if isinstance(cdm_name, dict):
|
||||
if not profile:
|
||||
raise ValueError("CDM config is mapped for profiles, but no profile was chosen")
|
||||
return None
|
||||
cdm_name = cdm_name.get(profile) or config.cdm.get("default")
|
||||
if not cdm_name:
|
||||
raise ValueError(f"A CDM to use was not mapped for the profile {profile}")
|
||||
return None
|
||||
|
||||
cdm_api = next(iter(x for x in config.remote_cdm if x["name"] == cdm_name), None)
|
||||
if cdm_api:
|
||||
|
@ -26,6 +26,7 @@ ShakaPackager = find(
|
||||
"shaka-packager",
|
||||
"packager",
|
||||
f"packager-{__shaka_platform}",
|
||||
f"packager-{__shaka_platform}-arm64",
|
||||
f"packager-{__shaka_platform}-x64"
|
||||
)
|
||||
Aria2 = find("aria2c", "aria2")
|
||||
|
@ -7,7 +7,7 @@ from typing import Optional, Union
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from Cryptodome.Cipher import AES
|
||||
from Cryptodome.Util.Padding import pad, unpad
|
||||
from Cryptodome.Util.Padding import unpad
|
||||
from m3u8.model import Key
|
||||
from requests import Session
|
||||
|
||||
@ -43,7 +43,7 @@ class ClearKey:
|
||||
|
||||
decrypted = AES. \
|
||||
new(self.key, AES.MODE_CBC, self.iv). \
|
||||
decrypt(pad(path.read_bytes(), AES.block_size))
|
||||
decrypt(path.read_bytes())
|
||||
|
||||
try:
|
||||
decrypted = unpad(decrypted, AES.block_size)
|
||||
|
@ -292,6 +292,7 @@ class DASH:
|
||||
if segment_template is not None:
|
||||
segment_template = copy(segment_template)
|
||||
start_number = int(segment_template.get("startNumber") or 1)
|
||||
end_number = int(segment_template.get("endNumber") or 0) or None
|
||||
segment_timeline = segment_template.find("SegmentTimeline")
|
||||
segment_timescale = float(segment_template.get("timescale") or 1)
|
||||
|
||||
@ -328,9 +329,11 @@ class DASH:
|
||||
for _ in range(1 + (int(s.get("r") or 0))):
|
||||
segment_durations.append(current_time)
|
||||
current_time += int(s.get("d"))
|
||||
seg_num_list = list(range(start_number, len(segment_durations) + start_number))
|
||||
|
||||
for t, n in zip(segment_durations, seg_num_list):
|
||||
if not end_number:
|
||||
end_number = len(segment_durations)
|
||||
|
||||
for t, n in zip(segment_durations, range(start_number, end_number + 1)):
|
||||
segments.append((
|
||||
DASH.replace_fields(
|
||||
segment_template.get("media"),
|
||||
@ -345,9 +348,11 @@ class DASH:
|
||||
raise ValueError("Duration of the Period was unable to be determined.")
|
||||
period_duration = DASH.pt_to_sec(period_duration)
|
||||
segment_duration = float(segment_template.get("duration")) or 1
|
||||
total_segments = math.ceil(period_duration / (segment_duration / segment_timescale))
|
||||
|
||||
for s in range(start_number, start_number + total_segments):
|
||||
if not end_number:
|
||||
end_number = math.ceil(period_duration / (segment_duration / segment_timescale))
|
||||
|
||||
for s in range(start_number, end_number + 1):
|
||||
segments.append((
|
||||
DASH.replace_fields(
|
||||
segment_template.get("media"),
|
||||
@ -467,6 +472,7 @@ class DASH:
|
||||
if downloader.__name__ == "aria2c" and any(bytes_range is not None for url, bytes_range in segments):
|
||||
# aria2(c) is shit and doesn't support the Range header, fallback to the requests downloader
|
||||
downloader = requests_downloader
|
||||
log.warning("Falling back to the requests downloader as aria2(c) doesn't support the Range header")
|
||||
|
||||
for status_update in downloader(
|
||||
urls=[
|
||||
|
@ -254,6 +254,12 @@ class HLS:
|
||||
progress(total=total_segments)
|
||||
|
||||
downloader = track.downloader
|
||||
if (
|
||||
downloader.__name__ == "aria2c" and
|
||||
any(x.byterange for x in master.segments if x not in unwanted_segments)
|
||||
):
|
||||
downloader = requests_downloader
|
||||
log.warning("Falling back to the requests downloader as aria2(c) doesn't support the Range header")
|
||||
|
||||
urls: list[dict[str, Any]] = []
|
||||
segment_durations: list[int] = []
|
||||
@ -266,9 +272,6 @@ class HLS:
|
||||
segment_durations.append(int(segment.duration))
|
||||
|
||||
if segment.byterange:
|
||||
if downloader.__name__ == "aria2c":
|
||||
# aria2(c) is shit and doesn't support the Range header, fallback to the requests downloader
|
||||
downloader = requests_downloader
|
||||
byte_range = HLS.calculate_byte_range(segment.byterange, range_offset)
|
||||
range_offset = byte_range.split("-")[0]
|
||||
else:
|
||||
@ -384,15 +387,27 @@ class HLS:
|
||||
elif len(files) != range_len:
|
||||
raise ValueError(f"Missing {range_len - len(files)} segment files for {segment_range}...")
|
||||
|
||||
if isinstance(drm, Widevine):
|
||||
# with widevine we can merge all segments and decrypt once
|
||||
merge(
|
||||
to=merged_path,
|
||||
via=files,
|
||||
delete=True,
|
||||
include_map_data=True
|
||||
)
|
||||
|
||||
drm.decrypt(merged_path)
|
||||
merged_path.rename(decrypted_path)
|
||||
else:
|
||||
# with other drm we must decrypt separately and then merge them
|
||||
# for aes this is because each segment likely has 16-byte padding
|
||||
for file in files:
|
||||
drm.decrypt(file)
|
||||
merge(
|
||||
to=merged_path,
|
||||
via=files,
|
||||
delete=True,
|
||||
include_map_data=True
|
||||
)
|
||||
|
||||
events.emit(
|
||||
events.Types.TRACK_DECRYPTED,
|
||||
|
@ -37,7 +37,7 @@ class Attachment:
|
||||
mime_type = {
|
||||
".ttf": "application/x-truetype-font",
|
||||
".otf": "application/vnd.ms-opentype"
|
||||
}.get(path.suffix, mimetypes.guess_type(path)[0])
|
||||
}.get(path.suffix.lower(), mimetypes.guess_type(path)[0])
|
||||
if not mime_type:
|
||||
raise ValueError("The attachment mime-type could not be automatically detected.")
|
||||
|
||||
|
@ -206,12 +206,14 @@ class Subtitle(Track):
|
||||
elif self.codec == Subtitle.Codec.WebVTT:
|
||||
text = self.path.read_text("utf8")
|
||||
if self.descriptor == Track.Descriptor.DASH:
|
||||
if len(self.data["dash"]["segment_durations"]) > 1:
|
||||
text = merge_segmented_webvtt(
|
||||
text,
|
||||
segment_durations=self.data["dash"]["segment_durations"],
|
||||
timescale=self.data["dash"]["timescale"]
|
||||
)
|
||||
elif self.descriptor == Track.Descriptor.HLS:
|
||||
if len(self.data["hls"]["segment_durations"]) > 1:
|
||||
text = merge_segmented_webvtt(
|
||||
text,
|
||||
segment_durations=self.data["hls"]["segment_durations"],
|
||||
|
@ -4,6 +4,7 @@ import logging
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from collections import defaultdict
|
||||
from copy import copy
|
||||
from enum import Enum
|
||||
from functools import partial
|
||||
@ -42,7 +43,7 @@ class Track:
|
||||
drm: Optional[Iterable[DRM_T]] = None,
|
||||
edition: Optional[str] = None,
|
||||
downloader: Optional[Callable] = None,
|
||||
data: Optional[dict] = None,
|
||||
data: Optional[Union[dict, defaultdict]] = None,
|
||||
id_: Optional[str] = None,
|
||||
) -> None:
|
||||
if not isinstance(url, (str, list)):
|
||||
@ -63,8 +64,8 @@ class Track:
|
||||
raise TypeError(f"Expected edition to be a {str}, not {type(edition)}")
|
||||
if not isinstance(downloader, (Callable, type(None))):
|
||||
raise TypeError(f"Expected downloader to be a {Callable}, not {type(downloader)}")
|
||||
if not isinstance(data, (dict, type(None))):
|
||||
raise TypeError(f"Expected data to be a {dict}, not {type(data)}")
|
||||
if not isinstance(data, (dict, defaultdict, type(None))):
|
||||
raise TypeError(f"Expected data to be a {dict} or {defaultdict}, not {type(data)}")
|
||||
|
||||
invalid_urls = ", ".join(set(type(x) for x in url if not isinstance(x, str)))
|
||||
if invalid_urls:
|
||||
@ -93,6 +94,7 @@ class Track:
|
||||
self.drm = drm
|
||||
self.edition: str = edition
|
||||
self.downloader = downloader
|
||||
self._data: defaultdict[Any, Any] = defaultdict(dict)
|
||||
self.data = data or {}
|
||||
|
||||
if self.name is None:
|
||||
@ -132,6 +134,42 @@ class Track:
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return isinstance(other, Track) and self.id == other.id
|
||||
|
||||
@property
|
||||
def data(self) -> defaultdict[Any, Any]:
|
||||
"""
|
||||
Arbitrary track data dictionary.
|
||||
|
||||
A defaultdict is used with a dict as the factory for easier
|
||||
nested saving and safer exists-checks.
|
||||
|
||||
Reserved keys:
|
||||
|
||||
- "hls" used by the HLS class.
|
||||
- playlist: m3u8.model.Playlist - The primary track information.
|
||||
- media: m3u8.model.Media - The audio/subtitle track information.
|
||||
- segment_durations: list[int] - A list of each segment's duration.
|
||||
- "dash" used by the DASH class.
|
||||
- manifest: lxml.ElementTree - DASH MPD manifest.
|
||||
- period: lxml.Element - The period of this track.
|
||||
- adaptation_set: lxml.Element - The adaptation set of this track.
|
||||
- representation: lxml.Element - The representation of this track.
|
||||
- timescale: int - The timescale of the track's segments.
|
||||
- segment_durations: list[int] - A list of each segment's duration.
|
||||
|
||||
You should not add, change, or remove any data within reserved keys.
|
||||
You may use their data but do note that the values of them may change
|
||||
or be removed at any point.
|
||||
"""
|
||||
return self._data
|
||||
|
||||
@data.setter
|
||||
def data(self, value: Union[dict, defaultdict]) -> None:
|
||||
if not isinstance(value, (dict, defaultdict)):
|
||||
raise TypeError(f"Expected data to be a {dict} or {defaultdict}, not {type(value)}")
|
||||
if isinstance(value, dict):
|
||||
value = defaultdict(dict, **value)
|
||||
self._data = value
|
||||
|
||||
def download(
|
||||
self,
|
||||
session: Session,
|
||||
@ -504,6 +542,7 @@ class Track:
|
||||
else:
|
||||
raise
|
||||
|
||||
original_path.unlink()
|
||||
self.path = output_path
|
||||
|
||||
|
||||
|
@ -316,7 +316,7 @@ class Tracks:
|
||||
][:per_language or None])
|
||||
return selected
|
||||
|
||||
def mux(self, title: str, delete: bool = True, progress: Optional[partial] = None) -> tuple[Path, int]:
|
||||
def mux(self, title: str, delete: bool = True, progress: Optional[partial] = None) -> tuple[Path, int, list[str]]:
|
||||
"""
|
||||
Multiplex all the Tracks into a Matroska Container file.
|
||||
|
||||
@ -410,15 +410,18 @@ class Tracks:
|
||||
|
||||
# let potential failures go to caller, caller should handle
|
||||
try:
|
||||
errors = []
|
||||
p = subprocess.Popen([
|
||||
*cl,
|
||||
"--output", str(output_path),
|
||||
"--gui-mode"
|
||||
], text=True, stdout=subprocess.PIPE)
|
||||
for line in iter(p.stdout.readline, ""):
|
||||
if line.startswith("#GUI#error") or line.startswith("#GUI#warning"):
|
||||
errors.append(line)
|
||||
if "progress" in line:
|
||||
progress(total=100, completed=int(line.strip()[14:-1]))
|
||||
return output_path, p.wait()
|
||||
return output_path, p.wait(), errors
|
||||
finally:
|
||||
if chapters_path:
|
||||
# regardless of delete param, we delete as it's a file we made during muxing
|
||||
|
@ -123,18 +123,18 @@ def get_boxes(data: bytes, box_type: bytes, as_bytes: bool = False) -> Box:
|
||||
# since it doesn't care what child box the wanted box is from, this works fine.
|
||||
if not isinstance(data, (bytes, bytearray)):
|
||||
raise ValueError("data must be bytes")
|
||||
|
||||
offset = 0
|
||||
while True:
|
||||
try:
|
||||
index = data.index(box_type)
|
||||
index = data[offset:].index(box_type)
|
||||
except ValueError:
|
||||
break
|
||||
if index < 0:
|
||||
break
|
||||
if index > 4:
|
||||
index -= 4 # size is before box type and is 4 bytes long
|
||||
data = data[index:]
|
||||
try:
|
||||
box = Box.parse(data)
|
||||
box = Box.parse(data[offset:][index:])
|
||||
except IOError:
|
||||
# since get_init_segment might cut off unexpectedly, pymp4 may be unable to read
|
||||
# the expected amounts of data and complain, so let's just end the function here
|
||||
@ -147,6 +147,7 @@ def get_boxes(data: bytes, box_type: bytes, as_bytes: bool = False) -> Box:
|
||||
raise e
|
||||
if as_bytes:
|
||||
box = Box.build(box)
|
||||
offset += index + len(Box.build(box))
|
||||
yield box
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user