Concatenating + splitting at the same time is now possible. Track entries are only created once and not for each new file.

This commit is contained in:
Moritz Bunkus 2005-03-06 14:28:26 +00:00
parent 93ba4adb2b
commit a5f837fb44
7 changed files with 66 additions and 34 deletions

View File

@ -1,3 +1,9 @@
2005-03-06 Moritz Bunkus <moritz@bunkus.org>
* mkvmerge: bug fix: Appending + splitting was segfaulting if used
together and at least one split occured after a track has been
appended.
2005-03-02 Moritz Bunkus <moritz@bunkus.org>
* mkvmerge: Added more descriptive error messages if two tracks

View File

@ -139,7 +139,7 @@ bool identifying = false, identify_verbose = false;
cluster_helper_c *cluster_helper = NULL;
KaxSegment *kax_segment;
KaxTracks *kax_tracks;
KaxTracks kax_tracks;
KaxTrackEntry *kax_last_entry;
KaxCues *kax_cues;
KaxSeekHead *kax_sh_main = NULL, *kax_sh_cues = NULL;
@ -621,30 +621,30 @@ render_headers(mm_io_c *rout) {
if (write_meta_seek_for_clusters)
kax_sh_cues = new KaxSeekHead();
kax_tracks = &GetChild<KaxTracks>(*kax_segment);
kax_last_entry = NULL;
for (i = 0; i < track_order.size(); i++)
if ((track_order[i].file_id >= 0) &&
(track_order[i].file_id < files.size()) &&
!files[track_order[i].file_id].appending)
files[track_order[i].file_id].reader->
set_headers_for_track(track_order[i].track_id);
for (i = 0; i < files.size(); i++)
if (!files[i].appending)
files[i].reader->set_headers();
set_timecode_scale();
for (i = 0; i < packetizers.size(); i++)
if (packetizers[i].packetizer != NULL)
packetizers[i].packetizer->fix_headers();
if (first_file) {
kax_last_entry = NULL;
for (i = 0; i < track_order.size(); i++)
if ((track_order[i].file_id >= 0) &&
(track_order[i].file_id < files.size()) &&
!files[track_order[i].file_id].appending)
files[track_order[i].file_id].reader->
set_headers_for_track(track_order[i].track_id);
for (i = 0; i < files.size(); i++)
if (!files[i].appending)
files[i].reader->set_headers();
set_timecode_scale();
for (i = 0; i < packetizers.size(); i++)
if (packetizers[i].packetizer != NULL)
packetizers[i].packetizer->fix_headers();
}
kax_infos->Render(*rout, true);
kax_sh_main->IndexThis(*kax_infos, *kax_segment);
if (packetizers.size() > 0) {
kax_tracks->Render(*rout,
!hack_engaged(ENGAGE_NO_DEFAULT_HEADER_VALUES));
kax_sh_main->IndexThis(*kax_tracks, *kax_segment);
kax_segment->PushElement(kax_tracks);
kax_tracks.Render(*rout, !hack_engaged(ENGAGE_NO_DEFAULT_HEADER_VALUES));
kax_sh_main->IndexThis(kax_tracks, *kax_segment);
// Reserve some small amount of space for header changes by the
// packetizers.
@ -668,13 +668,12 @@ void
rerender_track_headers() {
int64_t new_void_size;
kax_tracks->UpdateSize(!hack_engaged(ENGAGE_NO_DEFAULT_HEADER_VALUES));
kax_tracks.UpdateSize(!hack_engaged(ENGAGE_NO_DEFAULT_HEADER_VALUES));
new_void_size = void_after_track_headers->GetElementPosition() +
void_after_track_headers->GetSize() -
kax_tracks->GetElementPosition() -
kax_tracks->ElementSize();
out->save_pos(kax_tracks->GetElementPosition());
kax_tracks->Render(*out, !hack_engaged(ENGAGE_NO_DEFAULT_HEADER_VALUES));
kax_tracks.GetElementPosition() - kax_tracks.ElementSize();
out->save_pos(kax_tracks.GetElementPosition());
kax_tracks.Render(*out, !hack_engaged(ENGAGE_NO_DEFAULT_HEADER_VALUES));
delete void_after_track_headers;
void_after_track_headers = new EbmlVoid;
void_after_track_headers->SetSize(new_void_size);
@ -1589,6 +1588,13 @@ finish_file(bool last_file) {
kax_segment->OverwriteHead(*out);
delete out;
// The tracks element must not be deleted.
for (i = 0; i < kax_segment->ListSize(); ++i)
if (NULL == dynamic_cast<KaxTracks *>((*kax_segment)[i]))
delete (*kax_segment)[i];
kax_segment->RemoveAll();
delete kax_segment;
delete kax_cues;
delete kax_sh_void;
@ -1596,9 +1602,6 @@ finish_file(bool last_file) {
delete void_after_track_headers;
if (kax_sh_cues != NULL)
delete kax_sh_cues;
for (i = 0; i < packetizers.size(); i++)
packetizers[i].packetizer->reset();
}
static void establish_deferred_connections(filelist_t &file);

View File

@ -145,7 +145,7 @@ extern string segmentinfo_file_name;
extern KaxTags *tags_from_cue_chapters;
extern KaxSegment *kax_segment;
extern KaxTracks *kax_tracks;
extern KaxTracks kax_tracks;
extern KaxTrackEntry *kax_last_entry;
extern KaxCues *kax_cues;
extern KaxSeekHead *kax_sh_main, *kax_sh_cues;

View File

@ -567,10 +567,10 @@ generic_packetizer_c::set_headers() {
if (track_entry == NULL) {
if (kax_last_entry == NULL)
track_entry = &GetChild<KaxTrackEntry>(*kax_tracks);
track_entry = &GetChild<KaxTrackEntry>(kax_tracks);
else
track_entry =
&GetNextChild<KaxTrackEntry>(*kax_tracks, *kax_last_entry);
&GetNextChild<KaxTrackEntry>(kax_tracks, *kax_last_entry);
kax_last_entry = track_entry;
track_entry->SetGlobalTimecodeScale((int64_t)timecode_scale);
}

View File

@ -496,9 +496,6 @@ public:
virtual file_status_e read() {
return reader->read(this);
}
virtual void reset() {
track_entry = NULL;
}
virtual void add_packet(memory_c &mem, int64_t timecode,
int64_t duration, bool duration_mandatory = false,

View File

@ -54,3 +54,4 @@ T_204wavpack_without_correctiondata:0d01f5162a71fedc934d7e8a675a0004:passed:2005
T_205X_cuesheets:3b00b00c7d185137e30d7e95e3123d33-b3bb67d316e20da12926d5c1d628f6e5:passed:20050210-211853
T_206X_vobsub:1871f1e7e83dc90ba918a1337bc8eb30-287d40b353664d122f12dfeae3c84888-115f77a674d29a530d6aefc386ed5e85-a6c19d7405f1f9cb8227ac53a3ad23f8-24836c79a8fea4e0da5081e5ea8a643f:passed:20050211-231728
T_207segmentinfo:6f78ae296efa17b704ceca71de9ff728:passed:20050211-234856
T_208cat_and_splitting:55c3d406e2b4f793f7d0e1f0547d66b3-2d5670d74da87a4ed48e00720fd8c16f:new:20050306-152640

View File

@ -0,0 +1,25 @@
#!/usr/bin/ruby -w
class T_208cat_and_splitting < Test
def description
return "mkvmerge / concatenation and splitting at the same time"
end
def run
merge(tmp + "-%03d ", "--split-max-files 2 --split 4m data/avi/v.avi " +
"+data/avi/v.avi")
if (!FileTest.exist?(tmp + "-001"))
error("First split file does not exist.")
end
if (!FileTest.exist?(tmp + "-002"))
File.unlink(tmp + "-001")
error("Second split file does not exist.")
end
hash = hash_file(tmp + "-001") + "-" + hash_file(tmp + "-002")
File.unlink(tmp + "-001")
File.unlink(tmp + "-002")
return hash
end
end