OpenShot Library | libopenshot  0.5.0
Clip.cpp
Go to the documentation of this file.
1 
9 // Copyright (c) 2008-2019 OpenShot Studios, LLC
10 //
11 // SPDX-License-Identifier: LGPL-3.0-or-later
12 
13 #include "Clip.h"
14 
15 #include "AudioResampler.h"
16 #include "Exceptions.h"
17 #include "FFmpegReader.h"
18 #include "FrameMapper.h"
19 #include "QtImageReader.h"
20 #include "ChunkReader.h"
21 #include "DummyReader.h"
22 #include "Timeline.h"
23 #include "ZmqLogger.h"
24 
25 #include <algorithm>
26 #include <cmath>
27 #include <sstream>
28 
29 #ifdef USE_IMAGEMAGICK
30  #include "MagickUtilities.h"
31  #include "ImageReader.h"
32  #include "TextReader.h"
33 #endif
34 
35 #include <Qt>
36 
37 using namespace openshot;
38 
39 namespace {
40  struct CompositeChoice { const char* name; CompositeType value; };
41  const CompositeChoice composite_choices[] = {
42  {"Normal", COMPOSITE_SOURCE_OVER},
43 
44  // Darken group
45  {"Darken", COMPOSITE_DARKEN},
46  {"Multiply", COMPOSITE_MULTIPLY},
47  {"Color Burn", COMPOSITE_COLOR_BURN},
48 
49  // Lighten group
50  {"Lighten", COMPOSITE_LIGHTEN},
51  {"Screen", COMPOSITE_SCREEN},
52  {"Color Dodge", COMPOSITE_COLOR_DODGE},
53  {"Add", COMPOSITE_PLUS},
54 
55  // Contrast group
56  {"Overlay", COMPOSITE_OVERLAY},
57  {"Soft Light", COMPOSITE_SOFT_LIGHT},
58  {"Hard Light", COMPOSITE_HARD_LIGHT},
59 
60  // Compare
61  {"Difference", COMPOSITE_DIFFERENCE},
62  {"Exclusion", COMPOSITE_EXCLUSION},
63  };
64  const int composite_choices_count = sizeof(composite_choices)/sizeof(CompositeChoice);
65 }
66 
67 // Init default settings for a clip
69 {
70  // Init clip settings
71  Position(0.0);
72  Layer(0);
73  Start(0.0);
74  ClipBase::End(0.0);
76  scale = SCALE_FIT;
81  waveform = false;
83  parentObjectId = "";
84 
85  // Init scale curves
86  scale_x = Keyframe(1.0);
87  scale_y = Keyframe(1.0);
88 
89  // Init location curves
90  location_x = Keyframe(0.0);
91  location_y = Keyframe(0.0);
92 
93  // Init alpha
94  alpha = Keyframe(1.0);
95 
96  // Init time & volume
97  time = Keyframe(1.0);
98  volume = Keyframe(1.0);
99 
100  // Init audio waveform color
101  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
102 
103  // Init shear and perspective curves
104  shear_x = Keyframe(0.0);
105  shear_y = Keyframe(0.0);
106  origin_x = Keyframe(0.5);
107  origin_y = Keyframe(0.5);
108  perspective_c1_x = Keyframe(-1.0);
109  perspective_c1_y = Keyframe(-1.0);
110  perspective_c2_x = Keyframe(-1.0);
111  perspective_c2_y = Keyframe(-1.0);
112  perspective_c3_x = Keyframe(-1.0);
113  perspective_c3_y = Keyframe(-1.0);
114  perspective_c4_x = Keyframe(-1.0);
115  perspective_c4_y = Keyframe(-1.0);
116 
117  // Init audio channel filter and mappings
118  channel_filter = Keyframe(-1.0);
119  channel_mapping = Keyframe(-1.0);
120 
121  // Init audio and video overrides
122  has_audio = Keyframe(-1.0);
123  has_video = Keyframe(-1.0);
124 
125  // Initialize the attached object and attached clip as null pointers
126  parentTrackedObject = nullptr;
127  parentClipObject = NULL;
128 
129  // Init reader info struct
131 }
132 
133 // Init reader info details
135  if (reader) {
136  // Init rotation (if any)
138 
139  // Initialize info struct
140  info = reader->info;
141 
142  // Init cache
144  }
145 }
146 
148  // Only apply metadata rotation if clip rotation has not been explicitly set.
149  if (rotation.GetCount() > 0 || !reader)
150  return;
151 
152  const auto rotate_meta = reader->info.metadata.find("rotate");
153  if (rotate_meta == reader->info.metadata.end()) {
154  // Ensure rotation keyframes always start with a default 0° point.
155  rotation = Keyframe(0.0f);
156  return;
157  }
158 
159  float rotate_angle = 0.0f;
160  try {
161  rotate_angle = strtof(rotate_meta->second.c_str(), nullptr);
162  } catch (const std::exception& e) {
163  return; // ignore invalid metadata
164  }
165 
166  rotation = Keyframe(rotate_angle);
167 
168  // Do not overwrite user-authored scale curves.
169  auto has_default_scale = [](const Keyframe& kf) {
170  return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
171  };
172  if (!has_default_scale(scale_x) || !has_default_scale(scale_y))
173  return;
174 
175  // No need to adjust scaling when the metadata rotation is effectively zero.
176  if (fabs(rotate_angle) < 0.0001f)
177  return;
178 
179  float w = static_cast<float>(reader->info.width);
180  float h = static_cast<float>(reader->info.height);
181  if (w <= 0.0f || h <= 0.0f)
182  return;
183 
184  float rad = rotate_angle * static_cast<float>(M_PI) / 180.0f;
185 
186  float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
187  float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
188  if (new_width <= 0.0f || new_height <= 0.0f)
189  return;
190 
191  float uniform_scale = std::min(w / new_width, h / new_height);
192 
193  scale_x = Keyframe(uniform_scale);
194  scale_y = Keyframe(uniform_scale);
195 }
196 
197 // Default Constructor for a clip
198 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
199 {
200  // Init all default settings
201  init_settings();
202 }
203 
204 // Constructor with reader
205 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
206 {
207  // Init all default settings
208  init_settings();
209 
210  // Open and Close the reader (to set the duration of the clip)
211  Open();
212  Close();
213 
214  // Update duration and set parent
215  if (reader) {
216  ClipBase::End(reader->info.duration);
217  reader->ParentClip(this);
218  // Init reader info struct
220  }
221 }
222 
223 // Constructor with filepath
224 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
225 {
226  // Init all default settings
227  init_settings();
228 
229  // Get file extension (and convert to lower case)
230  std::string ext = get_file_extension(path);
231  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
232 
233  // Determine if common video formats (or image sequences)
234  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
235  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || ext=="gif" || path.find("%") != std::string::npos)
236  {
237  try
238  {
239  // Open common video format
240  reader = new openshot::FFmpegReader(path);
241 
242  } catch(...) { }
243  }
244  if (ext=="osp")
245  {
246  try
247  {
248  // Open common video format
249  reader = new openshot::Timeline(path, true);
250 
251  } catch(...) { }
252  }
253 
254 
255  // If no video found, try each reader
256  if (!reader)
257  {
258  try
259  {
260  // Try an image reader
261  reader = new openshot::QtImageReader(path);
262 
263  } catch(...) {
264  try
265  {
266  // Try a video reader
267  reader = new openshot::FFmpegReader(path);
268 
269  } catch(...) { }
270  }
271  }
272 
273  // Update duration and set parent
274  if (reader) {
275  ClipBase::End(reader->info.duration);
276  reader->ParentClip(this);
277  allocated_reader = reader;
278  // Init reader info struct
280  }
281 }
282 
283 // Destructor
285 {
286  // Delete the reader if clip created it
287  if (allocated_reader) {
288  delete allocated_reader;
289  allocated_reader = NULL;
290  reader = NULL;
291  }
292 
293  // Close the resampler
294  if (resampler) {
295  delete resampler;
296  resampler = NULL;
297  }
298 
299  // Close clip
300  Close();
301 }
302 
303 // Attach clip to bounding box
304 void Clip::AttachToObject(std::string object_id)
305 {
306  // Search for the tracked object on the timeline
307  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
308 
309  if (parentTimeline) {
310  // Create a smart pointer to the tracked object from the timeline
311  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
312  Clip* clipObject = parentTimeline->GetClip(object_id);
313 
314  // Check for valid tracked object
315  if (trackedObject){
316  SetAttachedObject(trackedObject);
317  parentClipObject = NULL;
318  }
319  else if (clipObject) {
320  SetAttachedClip(clipObject);
321  parentTrackedObject = nullptr;
322  }
323  }
324 }
325 
326 // Set the pointer to the trackedObject this clip is attached to
327 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
328  parentTrackedObject = trackedObject;
329 }
330 
331 // Set the pointer to the clip this clip is attached to
332 void Clip::SetAttachedClip(Clip* clipObject){
333  parentClipObject = clipObject;
334 }
335 
337 void Clip::Reader(ReaderBase* new_reader)
338 {
339  // Delete previously allocated reader (if not related to new reader)
340  // FrameMappers that point to the same allocated reader are ignored
341  bool is_same_reader = false;
342  if (new_reader && allocated_reader) {
343  if (new_reader->Name() == "FrameMapper") {
344  // Determine if FrameMapper is pointing at the same allocated ready
345  FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
346  if (allocated_reader == clip_mapped_reader->Reader()) {
347  is_same_reader = true;
348  }
349  }
350  }
351  // Clear existing allocated reader (if different)
352  if (allocated_reader && !is_same_reader) {
353  reader->Close();
354  allocated_reader->Close();
355  delete allocated_reader;
356  reader = NULL;
357  allocated_reader = NULL;
358  }
359 
360  // set reader pointer
361  reader = new_reader;
362 
363  // set parent
364  if (reader) {
365  reader->ParentClip(this);
366 
367  // Init reader info struct
369  }
370 }
371 
374 {
375  if (reader)
376  return reader;
377  else
378  // Throw error if reader not initialized
379  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
380 }
381 
382 // Open the internal reader
384 {
385  if (reader)
386  {
387  // Open the reader
388  reader->Open();
389  is_open = true;
390 
391  // Copy Reader info to Clip
392  info = reader->info;
393 
394  // Set some clip properties from the file reader
395  if (end == 0.0)
396  ClipBase::End(reader->info.duration);
397  }
398  else
399  // Throw error if reader not initialized
400  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
401 }
402 
403 // Close the internal reader
405 {
406  if (is_open && reader) {
407  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
408 
409  // Close the reader
410  reader->Close();
411  }
412 
413  // Clear cache
414  final_cache.Clear();
415  is_open = false;
416 }
417 
418 // Get end position of clip (trim end of video), which can be affected by the time curve.
419 float Clip::End() const
420 {
421  // if a time curve is present, use its length
422  if (time.GetCount() > 1)
423  {
424  // Determine the FPS fo this clip
425  float fps = 24.0;
426  if (reader)
427  // file reader
428  fps = reader->info.fps.ToFloat();
429  else
430  // Throw error if reader not initialized
431  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
432 
433  return float(time.GetLength()) / fps;
434  }
435  else
436  // just use the duration (as detected by the reader)
437  return end;
438 }
439 
440 // Override End() position
441 void Clip::End(float value) {
442  ClipBase::End(value);
443 }
444 
445 // Set associated Timeline pointer
447  timeline = new_timeline;
448 
449  // Clear cache (it might have changed)
450  final_cache.Clear();
451 }
452 
453 // Create an openshot::Frame object for a specific frame number of this reader.
454 std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
455 {
456  // Call override of GetFrame
457  return GetFrame(NULL, clip_frame_number, NULL);
458 }
459 
460 // Create an openshot::Frame object for a specific frame number of this reader.
461 // NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
462 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
463 {
464  // Call override of GetFrame
465  return GetFrame(background_frame, clip_frame_number, NULL);
466 }
467 
468 // Use an existing openshot::Frame object and draw this Clip's frame onto it
469 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
470 {
471  // Check for open reader (or throw exception)
472  if (!is_open)
473  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
474 
475  if (reader)
476  {
477  // Get frame object
478  std::shared_ptr<Frame> frame = NULL;
479 
480  // Check cache
481  frame = final_cache.GetFrame(clip_frame_number);
482  if (!frame) {
483  // Generate clip frame
484  frame = GetOrCreateFrame(clip_frame_number);
485 
486  // Get frame size and frame #
487  int64_t timeline_frame_number = clip_frame_number;
488  QSize timeline_size(frame->GetWidth(), frame->GetHeight());
489  if (background_frame) {
490  // If a background frame is provided, use it instead
491  timeline_frame_number = background_frame->number;
492  timeline_size.setWidth(background_frame->GetWidth());
493  timeline_size.setHeight(background_frame->GetHeight());
494  }
495 
496  // Get time mapped frame object (used to increase speed, change direction, etc...)
497  apply_timemapping(frame);
498 
499  // Apply waveform image (if any)
500  apply_waveform(frame, timeline_size);
501 
502  // Apply effects BEFORE applying keyframes (if any local or global effects are used)
503  apply_effects(frame, timeline_frame_number, options, true);
504 
505  // Apply keyframe / transforms to current clip image
506  apply_keyframes(frame, timeline_size);
507 
508  // Apply effects AFTER applying keyframes (if any local or global effects are used)
509  apply_effects(frame, timeline_frame_number, options, false);
510 
511  // Add final frame to cache (before flattening into background_frame)
512  final_cache.Add(frame);
513  }
514 
515  if (!background_frame) {
516  // Create missing background_frame w/ transparent color (if needed)
517  background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
518  "#00000000", frame->GetAudioSamplesCount(),
519  frame->GetAudioChannelsCount());
520  }
521 
522  // Apply background canvas (i.e. flatten this image onto previous layer image)
523  apply_background(frame, background_frame);
524 
525  // Return processed 'frame'
526  return frame;
527  }
528  else
529  // Throw error if reader not initialized
530  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
531 }
532 
533 // Look up an effect by ID
534 openshot::EffectBase* Clip::GetEffect(const std::string& id)
535 {
536  // Find the matching effect (if any)
537  for (const auto& effect : effects) {
538  if (effect->Id() == id) {
539  return effect;
540  }
541  }
542  return nullptr;
543 }
544 
545 // Return the associated ParentClip (if any)
547  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
548  // Attach parent clip OR object to this clip
549  AttachToObject(parentObjectId);
550  }
551  return parentClipObject;
552 }
553 
554 // Return the associated Parent Tracked Object (if any)
555 std::shared_ptr<openshot::TrackedObjectBase> Clip::GetParentTrackedObject() {
556  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
557  // Attach parent clip OR object to this clip
558  AttachToObject(parentObjectId);
559  }
560  return parentTrackedObject;
561 }
562 
563 // Get file extension
564 std::string Clip::get_file_extension(std::string path)
565 {
566  // Return last part of path safely (handle filenames without a dot)
567  const auto dot_pos = path.find_last_of('.');
568  if (dot_pos == std::string::npos || dot_pos + 1 >= path.size()) {
569  return std::string();
570  }
571 
572  return path.substr(dot_pos + 1);
573 }
574 
575 // Adjust the audio and image of a time mapped frame
576 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
577 {
578  // Check for valid reader
579  if (!reader)
580  // Throw error if reader not initialized
581  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
582 
583  // Check for a valid time map curve
584  if (time.GetLength() > 1)
585  {
586  const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
587 
588  int64_t clip_frame_number = frame->number;
589  int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
590 
591  // create buffer
592  juce::AudioBuffer<float> *source_samples = nullptr;
593 
594  // Get delta (difference from this frame to the next time mapped frame: Y value)
595  double delta = time.GetDelta(clip_frame_number + 1);
596  const bool prev_is_increasing = time.IsIncreasing(clip_frame_number);
597  const bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
598 
599  // Determine length of source audio (in samples)
600  // A delta of 1.0 == normal expected samples
601  // A delta of 0.5 == 50% of normal expected samples
602  // A delta of 2.0 == 200% of normal expected samples
603  int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
605  Reader()->info.channels);
606  int source_sample_count = round(target_sample_count * fabs(delta));
607 
608  // Determine starting audio location
609  AudioLocation location;
610  if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2 || prev_is_increasing != is_increasing) {
611  // No previous location OR gap detected
612  location.frame = new_frame_number;
613  location.sample_start = 0;
614 
615  // Create / Reset resampler
616  // We don't want to interpolate between unrelated audio data
617  if (resampler) {
618  delete resampler;
619  resampler = nullptr;
620  }
621  // Init resampler with # channels from Reader (should match the timeline)
622  resampler = new AudioResampler(Reader()->info.channels);
623 
624  // Allocate buffer of silence to initialize some data inside the resampler
625  // To prevent it from becoming input limited
626  juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
627  init_samples.clear();
628  resampler->SetBuffer(&init_samples, 1.0);
629  resampler->GetResampledBuffer();
630 
631  } else {
632  // Use previous location
633  location = previous_location;
634  }
635 
636  if (source_sample_count <= 0) {
637  // Add silence and bail (we don't need any samples)
638  frame->AddAudioSilence(target_sample_count);
639  return;
640  }
641 
642  // Allocate a new sample buffer for these delta frames
643  source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
644  source_samples->clear();
645 
646  // Determine ending audio location
647  int remaining_samples = source_sample_count;
648  int source_pos = 0;
649  while (remaining_samples > 0) {
650  std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
651  int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
652 
653  // Inform FrameMapper of the direction for THIS mapper frame
654  if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
655  fm->SetDirectionHint(is_increasing);
656  }
657  source_frame->SetAudioDirection(is_increasing);
658 
659  if (frame_sample_count == 0) {
660  // No samples found in source frame (fill with silence)
661  if (is_increasing) {
662  location.frame++;
663  } else {
664  location.frame--;
665  }
666  location.sample_start = 0;
667  break;
668  }
669  if (remaining_samples - frame_sample_count >= 0) {
670  // Use all frame samples & increment location
671  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
672  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
673  }
674  if (is_increasing) {
675  location.frame++;
676  } else {
677  location.frame--;
678  }
679  location.sample_start = 0;
680  remaining_samples -= frame_sample_count;
681  source_pos += frame_sample_count;
682 
683  } else {
684  // Use just what is needed (and reverse samples)
685  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
686  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
687  }
688  location.sample_start += remaining_samples;
689  remaining_samples = 0;
690  source_pos += remaining_samples;
691  }
692 
693  }
694 
695  // Resize audio for current frame object + fill with silence
696  // We are fixing to clobber this with actual audio data (possibly resampled)
697  frame->AddAudioSilence(target_sample_count);
698 
699  if (source_sample_count != target_sample_count) {
700  // Resample audio (if needed)
701  double resample_ratio = double(source_sample_count) / double(target_sample_count);
702  resampler->SetBuffer(source_samples, resample_ratio);
703 
704  // Resample the data
705  juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
706 
707  // Fill the frame with resampled data
708  for (int channel = 0; channel < Reader()->info.channels; channel++) {
709  // Add new (slower) samples, to the frame object
710  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
711  }
712  } else {
713  // Fill the frame
714  for (int channel = 0; channel < Reader()->info.channels; channel++) {
715  // Add new (slower) samples, to the frame object
716  frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
717  }
718  }
719 
720  // Clean up
721  delete source_samples;
722 
723  // Set previous location
724  previous_location = location;
725  }
726 }
727 
728 // Adjust frame number minimum value
729 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
730 {
731  // Never return a frame number 0 or below
732  if (frame_number < 1)
733  return 1;
734  else
735  return frame_number;
736 
737 }
738 
739 // Get or generate a blank frame
740 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
741 {
742  try {
743  // Init to requested frame
744  int64_t clip_frame_number = adjust_frame_number_minimum(number);
745  bool is_increasing = true;
746 
747  // Adjust for time-mapping (if any)
748  if (enable_time && time.GetLength() > 1) {
749  is_increasing = time.IsIncreasing(clip_frame_number + 1);
750  const int64_t time_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
751  if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
752  // Inform FrameMapper which direction this mapper frame is being requested
753  fm->SetDirectionHint(is_increasing);
754  }
755  clip_frame_number = time_frame_number;
756  }
757 
758  // Debug output
760  "Clip::GetOrCreateFrame (from reader)",
761  "number", number, "clip_frame_number", clip_frame_number);
762 
763  // Attempt to get a frame (but this could fail if a reader has just been closed)
764  auto reader_frame = reader->GetFrame(clip_frame_number);
765  if (reader_frame) {
766  // Override frame # (due to time-mapping might change it)
767  reader_frame->number = number;
768  reader_frame->SetAudioDirection(is_increasing);
769 
770  // Return real frame
771  // Create a new copy of reader frame
772  // This allows a clip to modify the pixels and audio of this frame without
773  // changing the underlying reader's frame data
774  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
775  if (has_video.GetInt(number) == 0) {
776  // No video, so add transparent pixels
777  reader_copy->AddColor(QColor(Qt::transparent));
778  }
779  if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
780  // No audio, so include silence (also, mute audio if past end of reader)
781  reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
782  }
783  return reader_copy;
784  }
785 
786  } catch (const ReaderClosed & e) {
787  // ...
788  } catch (const OutOfBoundsFrame & e) {
789  // ...
790  }
791 
792  // Estimate # of samples needed for this frame
793  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
794 
795  // Debug output
797  "Clip::GetOrCreateFrame (create blank)",
798  "number", number,
799  "estimated_samples_in_frame", estimated_samples_in_frame);
800 
801  // Create blank frame
802  auto new_frame = std::make_shared<Frame>(
803  number, reader->info.width, reader->info.height,
804  "#000000", estimated_samples_in_frame, reader->info.channels);
805  new_frame->SampleRate(reader->info.sample_rate);
806  new_frame->ChannelsLayout(reader->info.channel_layout);
807  new_frame->AddAudioSilence(estimated_samples_in_frame);
808  return new_frame;
809 }
810 
811 // Generate JSON string of this object
812 std::string Clip::Json() const {
813 
814  // Return formatted string
815  return JsonValue().toStyledString();
816 }
817 
818 // Get all properties for a specific frame
819 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
820 
821  // Generate JSON properties list
822  Json::Value root;
823  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
824  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
825  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
826  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
827  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
828  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
829  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
830  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
831  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
832  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
833  root["composite"] = add_property_json("Composite", composite, "int", "", NULL, 0, composite_choices_count - 1, false, requested_frame);
834  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
835  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
836 
837  // Add gravity choices (dropdown style)
838  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
839  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
840  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
841  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
842  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
843  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
844  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
845  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
846  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
847 
848  // Add scale choices (dropdown style)
849  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
850  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
851  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
852  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
853 
854  // Add frame number display choices (dropdown style)
855  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
856  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
857  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
858  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
859 
860  // Add volume mixing choices (dropdown style)
861  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
862  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
863  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
864 
865  // Add composite choices (dropdown style)
866  for (int i = 0; i < composite_choices_count; ++i)
867  root["composite"]["choices"].append(add_property_choice_json(composite_choices[i].name, composite_choices[i].value, composite));
868 
869  // Add waveform choices (dropdown style)
870  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
871  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
872 
873  // Add the parentClipObject's properties
874  if (parentClipObject)
875  {
876  // Convert Clip's frame position to Timeline's frame position
877  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
878  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
879  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
880 
881  // Correct the parent Clip Object properties by the clip's reference system
882  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
883  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
884  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
885  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
886  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
887  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
888  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
889 
890  // Add the parent Clip Object properties to JSON
891  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
892  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
893  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
894  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
895  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
896  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
897  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
898  }
899  else
900  {
901  // Add this own clip's properties to JSON
902  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
903  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
904  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
905  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
906  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
907  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
908  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
909  }
910 
911  // Keyframes
912  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
913  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
914  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
915  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
916  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
917  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
918  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
919  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
920  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
921 
922  // Add enable audio/video choices (dropdown style)
923  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
924  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
925  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
926  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
927  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
928  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
929 
930  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
931  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
932  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
933  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
934  root["wave_color"]["alpha"] = add_property_json("Alpha", wave_color.alpha.GetValue(requested_frame), "float", "", &wave_color.alpha, 0, 255, false, requested_frame);
935 
936  // Return formatted string
937  return root.toStyledString();
938 }
939 
940 // Generate Json::Value for this object
941 Json::Value Clip::JsonValue() const {
942 
943  // Create root json object
944  Json::Value root = ClipBase::JsonValue(); // get parent properties
945  root["parentObjectId"] = parentObjectId;
946  root["gravity"] = gravity;
947  root["scale"] = scale;
948  root["anchor"] = anchor;
949  root["display"] = display;
950  root["mixing"] = mixing;
951  root["composite"] = composite;
952  root["waveform"] = waveform;
953  root["scale_x"] = scale_x.JsonValue();
954  root["scale_y"] = scale_y.JsonValue();
955  root["location_x"] = location_x.JsonValue();
956  root["location_y"] = location_y.JsonValue();
957  root["alpha"] = alpha.JsonValue();
958  root["rotation"] = rotation.JsonValue();
959  root["time"] = time.JsonValue();
960  root["volume"] = volume.JsonValue();
961  root["wave_color"] = wave_color.JsonValue();
962  root["shear_x"] = shear_x.JsonValue();
963  root["shear_y"] = shear_y.JsonValue();
964  root["origin_x"] = origin_x.JsonValue();
965  root["origin_y"] = origin_y.JsonValue();
966  root["channel_filter"] = channel_filter.JsonValue();
967  root["channel_mapping"] = channel_mapping.JsonValue();
968  root["has_audio"] = has_audio.JsonValue();
969  root["has_video"] = has_video.JsonValue();
970  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
971  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
972  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
973  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
974  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
975  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
976  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
977  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
978 
979  // Add array of effects
980  root["effects"] = Json::Value(Json::arrayValue);
981 
982  // loop through effects
983  for (auto existing_effect : effects)
984  {
985  root["effects"].append(existing_effect->JsonValue());
986  }
987 
988  if (reader)
989  root["reader"] = reader->JsonValue();
990  else
991  root["reader"] = Json::Value(Json::objectValue);
992 
993  // return JsonValue
994  return root;
995 }
996 
997 // Load JSON string into this object
998 void Clip::SetJson(const std::string value) {
999 
1000  // Parse JSON string into JSON objects
1001  try
1002  {
1003  const Json::Value root = openshot::stringToJson(value);
1004  // Set all values that match
1005  SetJsonValue(root);
1006  }
1007  catch (const std::exception& e)
1008  {
1009  // Error parsing JSON (or missing keys)
1010  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1011  }
1012 }
1013 
1014 // Load Json::Value into this object
1015 void Clip::SetJsonValue(const Json::Value root) {
1016 
1017  // Set parent data
1018  ClipBase::SetJsonValue(root);
1019 
1020  // Set data from Json (if key is found)
1021  if (!root["parentObjectId"].isNull()){
1022  parentObjectId = root["parentObjectId"].asString();
1023  if (parentObjectId.size() > 0 && parentObjectId != ""){
1024  AttachToObject(parentObjectId);
1025  } else{
1026  parentTrackedObject = nullptr;
1027  parentClipObject = NULL;
1028  }
1029  }
1030  if (!root["gravity"].isNull())
1031  gravity = (GravityType) root["gravity"].asInt();
1032  if (!root["scale"].isNull())
1033  scale = (ScaleType) root["scale"].asInt();
1034  if (!root["anchor"].isNull())
1035  anchor = (AnchorType) root["anchor"].asInt();
1036  if (!root["display"].isNull())
1037  display = (FrameDisplayType) root["display"].asInt();
1038  if (!root["mixing"].isNull())
1039  mixing = (VolumeMixType) root["mixing"].asInt();
1040  if (!root["composite"].isNull())
1041  composite = (CompositeType) root["composite"].asInt();
1042  if (!root["waveform"].isNull())
1043  waveform = root["waveform"].asBool();
1044  if (!root["scale_x"].isNull())
1045  scale_x.SetJsonValue(root["scale_x"]);
1046  if (!root["scale_y"].isNull())
1047  scale_y.SetJsonValue(root["scale_y"]);
1048  if (!root["location_x"].isNull())
1049  location_x.SetJsonValue(root["location_x"]);
1050  if (!root["location_y"].isNull())
1051  location_y.SetJsonValue(root["location_y"]);
1052  if (!root["alpha"].isNull())
1053  alpha.SetJsonValue(root["alpha"]);
1054  if (!root["rotation"].isNull())
1055  rotation.SetJsonValue(root["rotation"]);
1056  if (!root["time"].isNull())
1057  time.SetJsonValue(root["time"]);
1058  if (!root["volume"].isNull())
1059  volume.SetJsonValue(root["volume"]);
1060  if (!root["wave_color"].isNull())
1061  wave_color.SetJsonValue(root["wave_color"]);
1062  if (!root["shear_x"].isNull())
1063  shear_x.SetJsonValue(root["shear_x"]);
1064  if (!root["shear_y"].isNull())
1065  shear_y.SetJsonValue(root["shear_y"]);
1066  if (!root["origin_x"].isNull())
1067  origin_x.SetJsonValue(root["origin_x"]);
1068  if (!root["origin_y"].isNull())
1069  origin_y.SetJsonValue(root["origin_y"]);
1070  if (!root["channel_filter"].isNull())
1071  channel_filter.SetJsonValue(root["channel_filter"]);
1072  if (!root["channel_mapping"].isNull())
1073  channel_mapping.SetJsonValue(root["channel_mapping"]);
1074  if (!root["has_audio"].isNull())
1075  has_audio.SetJsonValue(root["has_audio"]);
1076  if (!root["has_video"].isNull())
1077  has_video.SetJsonValue(root["has_video"]);
1078  if (!root["perspective_c1_x"].isNull())
1079  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1080  if (!root["perspective_c1_y"].isNull())
1081  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1082  if (!root["perspective_c2_x"].isNull())
1083  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1084  if (!root["perspective_c2_y"].isNull())
1085  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1086  if (!root["perspective_c3_x"].isNull())
1087  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1088  if (!root["perspective_c3_y"].isNull())
1089  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1090  if (!root["perspective_c4_x"].isNull())
1091  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1092  if (!root["perspective_c4_y"].isNull())
1093  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1094  if (!root["effects"].isNull()) {
1095 
1096  // Clear existing effects
1097  effects.clear();
1098 
1099  // loop through effects
1100  for (const auto existing_effect : root["effects"]) {
1101  // Skip NULL nodes
1102  if (existing_effect.isNull()) {
1103  continue;
1104  }
1105 
1106  // Create Effect
1107  EffectBase *e = NULL;
1108  if (!existing_effect["type"].isNull()) {
1109 
1110  // Create instance of effect
1111  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1112 
1113  // Load Json into Effect
1114  e->SetJsonValue(existing_effect);
1115 
1116  // Add Effect to Timeline
1117  AddEffect(e);
1118  }
1119  }
1120  }
1121  }
1122  if (!root["reader"].isNull()) // does Json contain a reader?
1123  {
1124  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1125  {
1126  // Close previous reader (if any)
1127  bool already_open = false;
1128  if (reader)
1129  {
1130  // Track if reader was open
1131  already_open = reader->IsOpen();
1132 
1133  // Close and delete existing allocated reader (if any)
1134  Reader(NULL);
1135  }
1136 
1137  // Create new reader (and load properties)
1138  std::string type = root["reader"]["type"].asString();
1139 
1140  if (type == "FFmpegReader") {
1141 
1142  // Create new reader
1143  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1144  reader->SetJsonValue(root["reader"]);
1145 
1146  } else if (type == "QtImageReader") {
1147 
1148  // Create new reader
1149  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1150  reader->SetJsonValue(root["reader"]);
1151 
1152 #ifdef USE_IMAGEMAGICK
1153  } else if (type == "ImageReader") {
1154 
1155  // Create new reader
1156  reader = new ImageReader(root["reader"]["path"].asString(), false);
1157  reader->SetJsonValue(root["reader"]);
1158 
1159  } else if (type == "TextReader") {
1160 
1161  // Create new reader
1162  reader = new TextReader();
1163  reader->SetJsonValue(root["reader"]);
1164 #endif
1165 
1166  } else if (type == "ChunkReader") {
1167 
1168  // Create new reader
1169  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1170  reader->SetJsonValue(root["reader"]);
1171 
1172  } else if (type == "DummyReader") {
1173 
1174  // Create new reader
1175  reader = new openshot::DummyReader();
1176  reader->SetJsonValue(root["reader"]);
1177 
1178  } else if (type == "Timeline") {
1179 
1180  // Create new reader (always load from file again)
1181  // This prevents FrameMappers from being loaded on accident
1182  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1183  }
1184 
1185  // mark as managed reader and set parent
1186  if (reader) {
1187  reader->ParentClip(this);
1188  allocated_reader = reader;
1189  }
1190 
1191  // Re-Open reader (if needed)
1192  if (already_open) {
1193  reader->Open();
1194  }
1195  }
1196  }
1197 
1198  // Clear cache (it might have changed)
1199  final_cache.Clear();
1200 }
1201 
1202 // Sort effects by order
1203 void Clip::sort_effects()
1204 {
1205  // sort clips
1206  effects.sort(CompareClipEffects());
1207 }
1208 
1209 // Add an effect to the clip
1211 {
1212  // Set parent clip pointer
1213  effect->ParentClip(this);
1214 
1215  // Add effect to list
1216  effects.push_back(effect);
1217 
1218  // Sort effects
1219  sort_effects();
1220 
1221  // Get the parent timeline of this clip
1222  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1223 
1224  if (parentTimeline)
1225  effect->ParentTimeline(parentTimeline);
1226 
1227  #ifdef USE_OPENCV
1228  // Add Tracked Object to Timeline
1229  if (effect->info.has_tracked_object){
1230 
1231  // Check if this clip has a parent timeline
1232  if (parentTimeline){
1233 
1234  effect->ParentTimeline(parentTimeline);
1235 
1236  // Iterate through effect's vector of Tracked Objects
1237  for (auto const& trackedObject : effect->trackedObjects){
1238 
1239  // Cast the Tracked Object as TrackedObjectBBox
1240  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1241 
1242  // Set the Tracked Object's parent clip to this
1243  trackedObjectBBox->ParentClip(this);
1244 
1245  // Add the Tracked Object to the timeline
1246  parentTimeline->AddTrackedObject(trackedObjectBBox);
1247  }
1248  }
1249  }
1250  #endif
1251 
1252  // Clear cache (it might have changed)
1253  final_cache.Clear();
1254 }
1255 
1256 // Remove an effect from the clip
1258 {
1259  effects.remove(effect);
1260 
1261  // Clear cache (it might have changed)
1262  final_cache.Clear();
1263 }
1264 
1265 // Apply background image to the current clip image (i.e. flatten this image onto previous layer)
1266 void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1267  // Add background canvas
1268  std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1269  QPainter painter(background_canvas.get());
1270 
1271  // Composite a new layer onto the image
1272  painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1273  painter.drawImage(0, 0, *frame->GetImage());
1274  painter.end();
1275 
1276  // Add new QImage to frame
1277  frame->AddImage(background_canvas);
1278 }
1279 
1280 // Apply effects to the source frame (if any)
1281 void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, TimelineInfoStruct* options, bool before_keyframes)
1282 {
1283  for (auto effect : effects)
1284  {
1285  // Apply the effect to this frame
1286  if (effect->info.apply_before_clip && before_keyframes) {
1287  effect->GetFrame(frame, frame->number);
1288  } else if (!effect->info.apply_before_clip && !before_keyframes) {
1289  effect->GetFrame(frame, frame->number);
1290  }
1291  }
1292 
1293  if (timeline != NULL && options != NULL) {
1294  // Apply global timeline effects (i.e. transitions & masks... if any)
1295  Timeline* timeline_instance = static_cast<Timeline*>(timeline);
1296  options->is_before_clip_keyframes = before_keyframes;
1297  timeline_instance->apply_effects(frame, timeline_frame_number, Layer(), options);
1298  }
1299 }
1300 
1301 // Compare 2 floating point numbers for equality
1302 bool Clip::isNear(double a, double b)
1303 {
1304  return fabs(a - b) < 0.000001;
1305 }
1306 
1307 // Apply keyframes to the source frame (if any)
1308 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1309  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1310  if (!frame->has_image_data) {
1311  // Skip the rest of the image processing for performance reasons
1312  return;
1313  }
1314 
1315  // Get image from clip, and create transparent background image
1316  std::shared_ptr<QImage> source_image = frame->GetImage();
1317  std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1318  timeline_size.height(),
1319  QImage::Format_RGBA8888_Premultiplied);
1320  background_canvas->fill(QColor(Qt::transparent));
1321 
1322  // Get transform from clip's keyframes
1323  QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1324 
1325  // Load timeline's new frame image into a QPainter
1326  QPainter painter(background_canvas.get());
1327  painter.setRenderHint(QPainter::TextAntialiasing, true);
1328  if (!transform.isIdentity()) {
1329  painter.setRenderHint(QPainter::SmoothPixmapTransform, true);
1330  }
1331  // Apply transform (translate, rotate, scale)
1332  painter.setTransform(transform);
1333 
1334  // Composite a new layer onto the image
1335  painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1336 
1337  // Apply opacity via painter instead of per-pixel alpha manipulation
1338  const float alpha_value = alpha.GetValue(frame->number);
1339  if (alpha_value != 1.0f) {
1340  painter.setOpacity(alpha_value);
1341  painter.drawImage(0, 0, *source_image);
1342  // Reset so any subsequent drawing (e.g., overlays) isn’t faded
1343  painter.setOpacity(1.0);
1344  } else {
1345  painter.drawImage(0, 0, *source_image);
1346  }
1347 
1348  if (timeline) {
1349  Timeline *t = static_cast<Timeline *>(timeline);
1350 
1351  // Draw frame #'s on top of image (if needed)
1352  if (display != FRAME_DISPLAY_NONE) {
1353  std::stringstream frame_number_str;
1354  switch (display) {
1355  case (FRAME_DISPLAY_NONE):
1356  // This is only here to prevent unused-enum warnings
1357  break;
1358 
1359  case (FRAME_DISPLAY_CLIP):
1360  frame_number_str << frame->number;
1361  break;
1362 
1363  case (FRAME_DISPLAY_TIMELINE):
1364  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1365  break;
1366 
1367  case (FRAME_DISPLAY_BOTH):
1368  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1369  break;
1370  }
1371 
1372  // Draw frame number on top of image
1373  painter.setPen(QColor("#ffffff"));
1374  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1375  }
1376  }
1377  painter.end();
1378 
1379  // Add new QImage to frame
1380  frame->AddImage(background_canvas);
1381 }
1382 
1383 // Apply apply_waveform image to the source frame (if any)
1384 void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1385 
1386  if (!Waveform()) {
1387  // Exit if no waveform is needed
1388  return;
1389  }
1390 
1391  // Get image from clip
1392  std::shared_ptr<QImage> source_image = frame->GetImage();
1393 
1394  // Debug output
1395  ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_waveform (Generate Waveform Image)",
1396  "frame->number", frame->number,
1397  "Waveform()", Waveform(),
1398  "width", timeline_size.width(),
1399  "height", timeline_size.height());
1400 
1401  // Get the color of the waveform
1402  int red = wave_color.red.GetInt(frame->number);
1403  int green = wave_color.green.GetInt(frame->number);
1404  int blue = wave_color.blue.GetInt(frame->number);
1405  int alpha = wave_color.alpha.GetInt(frame->number);
1406 
1407  // Generate Waveform Dynamically (the size of the timeline)
1408  source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue, alpha);
1409  frame->AddImage(source_image);
1410 }
1411 
1412 // Scale a source size to a target size (given a specific scale-type)
1413 QSize Clip::scale_size(QSize source_size, ScaleType source_scale, int target_width, int target_height) {
1414  switch (source_scale)
1415  {
1416  case (SCALE_FIT): {
1417  source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1418  break;
1419  }
1420  case (SCALE_STRETCH): {
1421  source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1422  break;
1423  }
1424  case (SCALE_CROP): {
1425  source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1426  break;
1427  }
1428  }
1429 
1430  return source_size;
1431 }
1432 
1433 // Get QTransform from keyframes
1434 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1435 {
1436  // Get image from clip
1437  std::shared_ptr<QImage> source_image = frame->GetImage();
1438 
1439  /* RESIZE SOURCE IMAGE - based on scale type */
1440  QSize source_size = scale_size(source_image->size(), scale, width, height);
1441 
1442  // Initialize parent object's properties (Clip or Tracked Object)
1443  float parentObject_location_x = 0.0;
1444  float parentObject_location_y = 0.0;
1445  float parentObject_scale_x = 1.0;
1446  float parentObject_scale_y = 1.0;
1447  float parentObject_shear_x = 0.0;
1448  float parentObject_shear_y = 0.0;
1449  float parentObject_rotation = 0.0;
1450 
1451  // Get the parentClipObject properties
1452  if (GetParentClip()){
1453  // Get the start trim position of the parent clip
1454  long parent_start_offset = parentClipObject->Start() * info.fps.ToDouble();
1455  long parent_frame_number = frame->number + parent_start_offset;
1456 
1457  // Get parent object's properties (Clip)
1458  parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1459  parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1460  parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1461  parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1462  parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1463  parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1464  parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1465  }
1466 
1467  // Get the parentTrackedObject properties
1468  if (GetParentTrackedObject()){
1469  // Get the attached object's parent clip's properties
1470  Clip* parentClip = (Clip*) parentTrackedObject->ParentClip();
1471  if (parentClip)
1472  {
1473  // Get the start trim position of the parent clip
1474  long parent_start_offset = parentClip->Start() * info.fps.ToDouble();
1475  long parent_frame_number = frame->number + parent_start_offset;
1476 
1477  // Access the parentTrackedObject's properties
1478  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1479 
1480  // Get actual scaled parent size
1481  QSize parent_size = scale_size(QSize(parentClip->info.width, parentClip->info.height),
1482  parentClip->scale, width, height);
1483 
1484  // Get actual scaled tracked object size
1485  int trackedWidth = trackedObjectProperties["w"] * trackedObjectProperties["sx"] * parent_size.width() *
1486  parentClip->scale_x.GetValue(parent_frame_number);
1487  int trackedHeight = trackedObjectProperties["h"] * trackedObjectProperties["sy"] * parent_size.height() *
1488  parentClip->scale_y.GetValue(parent_frame_number);
1489 
1490  // Scale the clip source_size based on the actual tracked object size
1491  source_size = scale_size(source_size, scale, trackedWidth, trackedHeight);
1492 
1493  // Update parentObject's properties based on the tracked object's properties and parent clip's scale
1494  parentObject_location_x = parentClip->location_x.GetValue(parent_frame_number) + ((trackedObjectProperties["cx"] - 0.5) * parentClip->scale_x.GetValue(parent_frame_number));
1495  parentObject_location_y = parentClip->location_y.GetValue(parent_frame_number) + ((trackedObjectProperties["cy"] - 0.5) * parentClip->scale_y.GetValue(parent_frame_number));
1496  parentObject_rotation = trackedObjectProperties["r"] + parentClip->rotation.GetValue(parent_frame_number);
1497  }
1498  }
1499 
1500  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1501  float x = 0.0; // left
1502  float y = 0.0; // top
1503 
1504  // Adjust size for scale x and scale y
1505  float sx = scale_x.GetValue(frame->number); // percentage X scale
1506  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1507 
1508  // Change clip's scale to parentObject's scale
1509  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1510  sx*= parentObject_scale_x;
1511  sy*= parentObject_scale_y;
1512  }
1513 
1514  float scaled_source_width = source_size.width() * sx;
1515  float scaled_source_height = source_size.height() * sy;
1516 
1517  switch (gravity)
1518  {
1519  case (GRAVITY_TOP_LEFT):
1520  // This is only here to prevent unused-enum warnings
1521  break;
1522  case (GRAVITY_TOP):
1523  x = (width - scaled_source_width) / 2.0; // center
1524  break;
1525  case (GRAVITY_TOP_RIGHT):
1526  x = width - scaled_source_width; // right
1527  break;
1528  case (GRAVITY_LEFT):
1529  y = (height - scaled_source_height) / 2.0; // center
1530  break;
1531  case (GRAVITY_CENTER):
1532  x = (width - scaled_source_width) / 2.0; // center
1533  y = (height - scaled_source_height) / 2.0; // center
1534  break;
1535  case (GRAVITY_RIGHT):
1536  x = width - scaled_source_width; // right
1537  y = (height - scaled_source_height) / 2.0; // center
1538  break;
1539  case (GRAVITY_BOTTOM_LEFT):
1540  y = (height - scaled_source_height); // bottom
1541  break;
1542  case (GRAVITY_BOTTOM):
1543  x = (width - scaled_source_width) / 2.0; // center
1544  y = (height - scaled_source_height); // bottom
1545  break;
1546  case (GRAVITY_BOTTOM_RIGHT):
1547  x = width - scaled_source_width; // right
1548  y = (height - scaled_source_height); // bottom
1549  break;
1550  }
1551 
1552  // Debug output
1554  "Clip::get_transform (Gravity)",
1555  "frame->number", frame->number,
1556  "source_clip->gravity", gravity,
1557  "scaled_source_width", scaled_source_width,
1558  "scaled_source_height", scaled_source_height);
1559 
1560  QTransform transform;
1561 
1562  /* LOCATION, ROTATION, AND SCALE */
1563  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1564  x += width * (location_x.GetValue(frame->number) + parentObject_location_x); // move in percentage of final width
1565  y += height * (location_y.GetValue(frame->number) + parentObject_location_y); // move in percentage of final height
1566  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1567  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1568  float origin_x_value = origin_x.GetValue(frame->number);
1569  float origin_y_value = origin_y.GetValue(frame->number);
1570 
1571  // Transform source image (if needed)
1573  "Clip::get_transform (Build QTransform - if needed)",
1574  "frame->number", frame->number,
1575  "x", x, "y", y,
1576  "r", r,
1577  "sx", sx, "sy", sy);
1578 
1579  if (!isNear(x, 0) || !isNear(y, 0)) {
1580  // TRANSLATE/MOVE CLIP
1581  transform.translate(x, y);
1582  }
1583  if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1584  // ROTATE CLIP (around origin_x, origin_y)
1585  float origin_x_offset = (scaled_source_width * origin_x_value);
1586  float origin_y_offset = (scaled_source_height * origin_y_value);
1587  transform.translate(origin_x_offset, origin_y_offset);
1588  transform.rotate(r);
1589  transform.shear(shear_x_value, shear_y_value);
1590  transform.translate(-origin_x_offset,-origin_y_offset);
1591  }
1592  // SCALE CLIP (if needed)
1593  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1594  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1595  if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1596  transform.scale(source_width_scale, source_height_scale);
1597  }
1598 
1599  return transform;
1600 }
1601 
1602 // Adjust frame number for Clip position and start (which can result in a different number)
1603 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1604 
1605  // Get clip position from parent clip (if any)
1606  float position = 0.0;
1607  float start = 0.0;
1608  Clip *parent = static_cast<Clip *>(ParentClip());
1609  if (parent) {
1610  position = parent->Position();
1611  start = parent->Start();
1612  }
1613 
1614  // Adjust start frame and position based on parent clip.
1615  // This ensures the same frame # is used by mapped readers and clips,
1616  // when calculating samples per frame.
1617  // Thus, this prevents gaps and mismatches in # of samples.
1618  int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1619  int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1620  int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1621 
1622  return frame_number;
1623 }
openshot::ClipBase::add_property_json
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:96
openshot::stringToJson
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
openshot::CacheMemory::Clear
void Clear()
Clear the cache of all frames.
Definition: CacheMemory.cpp:238
openshot::Clip::Open
void Open() override
Open the internal reader.
Definition: Clip.cpp:383
openshot::Keyframe::IsIncreasing
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
openshot::ReaderInfo::sample_rate
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
openshot::ClipBase::timeline
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:40
openshot::EffectInfo
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:28
openshot::FRAME_DISPLAY_BOTH
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:56
openshot::Fraction::ToFloat
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
openshot::EffectBase
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:53
openshot::EffectBase::info
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:69
openshot::ReaderBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:106
openshot::COMPOSITE_MULTIPLY
@ COMPOSITE_MULTIPLY
Definition: Enums.h:91
openshot::Clip::anchor
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:178
Clip.h
Header file for Clip class.
openshot::Keyframe::GetLong
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:287
openshot::CacheMemory::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
Definition: CacheMemory.cpp:81
openshot::ChunkReader
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:78
openshot::CacheMemory::Add
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
Definition: CacheMemory.cpp:47
openshot::ReaderBase::GetFrame
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::Clip::previous_location
AudioLocation previous_location
Previous time-mapped audio location.
Definition: Clip.h:95
openshot::FRAME_DISPLAY_CLIP
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:54
openshot::FRAME_DISPLAY_TIMELINE
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:55
openshot::ReaderBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:157
openshot::Clip::GetEffect
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:534
openshot::ClipBase::End
virtual void End(float value)
Set end position (in seconds) of clip (trim end of video)
Definition: ClipBase.cpp:53
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:28
openshot::Clip::scale_y
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:317
openshot::Clip::PropertiesJSON
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:819
openshot::EffectBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:201
openshot::AudioLocation
This struct holds the associated video frame and starting sample # for an audio packet.
Definition: AudioLocation.h:25
openshot::Keyframe::GetDelta
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:399
TextReader.h
Header file for TextReader class.
openshot::Clip::time
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:330
openshot::CompositeType
CompositeType
This enumeration determines how clips are composited onto lower layers.
Definition: Enums.h:75
openshot::ClipBase::add_property_choice_json
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:132
openshot::AudioLocation::frame
int64_t frame
Definition: AudioLocation.h:26
openshot::Clip
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
juce::AudioBuffer< float >
openshot::AudioLocation::sample_start
int sample_start
Definition: AudioLocation.h:27
openshot::Clip::alpha
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:320
openshot::Clip::End
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:419
openshot::ReaderBase::info
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
openshot::COMPOSITE_SCREEN
@ COMPOSITE_SCREEN
Definition: Enums.h:92
openshot::GRAVITY_TOP_LEFT
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:23
Timeline.h
Header file for Timeline class.
openshot::Clip::origin_x
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:326
openshot::Clip::ParentTimeline
void ParentTimeline(openshot::TimelineBase *new_timeline) override
Set associated Timeline pointer.
Definition: Clip.cpp:446
openshot::Clip::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:454
openshot::Clip::Close
void Close() override
Close the internal reader.
Definition: Clip.cpp:404
AudioResampler.h
Header file for AudioResampler class.
openshot::Clip::location_y
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:319
openshot::DummyReader
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition: DummyReader.h:85
openshot::GRAVITY_TOP_RIGHT
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:25
openshot::Keyframe::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:372
openshot::GravityType
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:21
openshot::Clip::origin_y
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:327
openshot::Clip::GetParentTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
Definition: Clip.cpp:555
openshot::ReaderInfo::duration
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
openshot::EffectBase::trackedObjects
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition: EffectBase.h:66
openshot::Clip::channel_mapping
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:348
openshot::Clip::AddEffect
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1210
openshot::ReaderBase::Name
virtual std::string Name()=0
Return the type name of the class.
openshot::Clip::~Clip
virtual ~Clip()
Destructor.
Definition: Clip.cpp:284
openshot::ReaderInfo::width
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
openshot::Clip::Json
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:812
openshot::ClipBase::Position
void Position(float value)
Set the Id of this clip object
Definition: ClipBase.cpp:19
openshot::GRAVITY_RIGHT
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:28
openshot::FRAME_DISPLAY_NONE
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:53
openshot::COMPOSITE_SOFT_LIGHT
@ COMPOSITE_SOFT_LIGHT
Definition: Enums.h:99
openshot::CompareClipEffects
Definition: Clip.h:48
openshot::Clip::SetJsonValue
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:1015
openshot::OutOfBoundsFrame
Exception for frames that are out of bounds.
Definition: Exceptions.h:300
openshot::Fraction::ToDouble
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
openshot::Timeline::apply_effects
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:546
openshot::Keyframe::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:339
openshot::COMPOSITE_LIGHTEN
@ COMPOSITE_LIGHTEN
Definition: Enums.h:95
FrameMapper.h
Header file for the FrameMapper class.
openshot::GRAVITY_TOP
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:24
openshot::COMPOSITE_OVERLAY
@ COMPOSITE_OVERLAY
Definition: Enums.h:93
openshot::CacheBase::SetMaxBytesFromInfo
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:28
openshot::Color
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
openshot::Clip::display
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:179
openshot::ClipBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
openshot::Clip::perspective_c2_y
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:340
openshot::Clip::scale_x
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:316
openshot::QtImageReader
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:74
openshot::ClipBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:64
openshot::ReaderInfo::video_length
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
openshot::AudioResampler
This class is used to resample audio data for many sequential frames.
Definition: AudioResampler.h:30
openshot::AudioResampler::SetBuffer
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
Definition: AudioResampler.cpp:60
openshot::ReaderInfo::height
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
openshot::VOLUME_MIX_REDUCE
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:71
openshot::ClipBase::position
float position
The position on the timeline where this clip should start playing.
Definition: ClipBase.h:35
openshot::Timeline::GetClip
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:413
openshot::Clip::perspective_c3_y
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:342
openshot::Clip::perspective_c4_y
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:344
ZmqLogger.h
Header file for ZeroMQ-based Logger class.
openshot::Clip::has_video
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:352
openshot::Keyframe
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:53
openshot::Clip::gravity
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:176
openshot::Color::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
openshot::TimelineInfoStruct::is_before_clip_keyframes
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
Definition: TimelineBase.h:35
openshot::ReaderBase::Open
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::GRAVITY_BOTTOM
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:30
openshot::ReaderBase::IsOpen
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:217
openshot::Timeline
This class represents a timeline.
Definition: Timeline.h:154
openshot::ImageReader
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:55
openshot::Clip::composite
openshot::CompositeType composite
How this clip is composited onto lower layers.
Definition: Clip.h:181
openshot::Clip::perspective_c1_x
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:337
openshot::SCALE_CROP
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:37
openshot::Color::green
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
openshot::COMPOSITE_HARD_LIGHT
@ COMPOSITE_HARD_LIGHT
Definition: Enums.h:98
openshot::Clip::init_settings
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:68
openshot::TimelineInfoStruct
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:32
openshot::EffectInfoStruct::has_tracked_object
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:42
openshot::ReaderInfo::metadata
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
openshot::ClipBase::end
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:38
openshot::ClipBase::Start
void Start(float value)
Set start position (in seconds) of clip (trim start of video)
Definition: ClipBase.cpp:42
openshot::FFmpegReader
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:103
path
path
Definition: FFmpegWriter.cpp:1469
openshot::COMPOSITE_PLUS
@ COMPOSITE_PLUS
Definition: Enums.h:90
openshot::FrameMapper
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:193
openshot::Frame::GetSamplesPerFrame
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:484
ChunkReader.h
Header file for ChunkReader class.
openshot::COMPOSITE_DIFFERENCE
@ COMPOSITE_DIFFERENCE
Definition: Enums.h:100
openshot::ZmqLogger::Instance
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
openshot::ClipBase::start
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:37
openshot::Clip::Reader
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:373
openshot::SCALE_FIT
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
openshot::GRAVITY_BOTTOM_LEFT
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:29
openshot::Clip::perspective_c2_x
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:339
openshot::ZmqLogger::AppendDebugMethod
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
openshot::Clip::volume
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:331
openshot::COMPOSITE_DARKEN
@ COMPOSITE_DARKEN
Definition: Enums.h:94
openshot::Timeline::AddTrackedObject
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:224
openshot::Clip::SetJson
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:998
openshot::GRAVITY_BOTTOM_RIGHT
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:31
openshot::Color::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
openshot::Keyframe::GetLength
int64_t GetLength() const
Definition: KeyFrame.cpp:417
openshot::Keyframe::GetInt
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
openshot::ANCHOR_CANVAS
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:46
openshot::Clip::SetAttachedClip
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:332
openshot::Clip::perspective_c4_x
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:343
openshot::ReaderClosed
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:363
openshot::ReaderInfo::channel_layout
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
openshot::Clip::perspective_c1_y
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:338
openshot::COMPOSITE_EXCLUSION
@ COMPOSITE_EXCLUSION
Definition: Enums.h:101
openshot::Clip::channel_filter
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:347
openshot::Clip::init_reader_rotation
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:147
openshot::ClipBase::Id
void Id(std::string value)
Definition: ClipBase.h:94
openshot::Clip::init_reader_settings
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:134
openshot::TimelineBase
This class represents a timeline (used for building generic timeline implementations)
Definition: TimelineBase.h:41
MagickUtilities.h
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
openshot::GRAVITY_LEFT
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:26
openshot::Keyframe::GetCount
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:424
openshot::ReaderInfo::fps
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
openshot::Clip::Clip
Clip()
Default Constructor.
Definition: Clip.cpp:198
openshot::ReaderBase
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:75
openshot::Clip::SetAttachedObject
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:327
openshot::ClipBase::previous_properties
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:39
openshot::Clip::scale
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:177
openshot::COMPOSITE_SOURCE_OVER
@ COMPOSITE_SOURCE_OVER
Definition: Enums.h:76
openshot::AudioResampler::GetResampledBuffer
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
Definition: AudioResampler.cpp:106
openshot::VOLUME_MIX_AVERAGE
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:70
openshot::ReaderBase::Close
virtual void Close()=0
Close the reader (and any resources it was consuming)
openshot::AnchorType
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:44
openshot::ScaleType
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:35
openshot::Clip::AttachToObject
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:304
openshot::Color::alpha
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:33
openshot::Clip::has_audio
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:351
openshot::Clip::GetParentClip
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
Definition: Clip.cpp:546
openshot::Clip::rotation
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:323
openshot::SCALE_NONE
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:40
openshot::TextReader
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:62
QtImageReader.h
Header file for QtImageReader class.
openshot::GRAVITY_CENTER
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:27
openshot::Clip::JsonValue
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:941
openshot::Color::red
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
openshot::SCALE_STRETCH
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
ImageReader.h
Header file for ImageReader class.
openshot::FrameMapper::Reader
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:65
openshot::Clip::perspective_c3_x
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:341
openshot::COMPOSITE_COLOR_BURN
@ COMPOSITE_COLOR_BURN
Definition: Enums.h:97
openshot::VOLUME_MIX_NONE
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:69
openshot::ChunkVersion
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:49
openshot::ClipBase::Layer
void Layer(int value)
Set layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.cpp:31
openshot::ReaderInfo::channels
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::VolumeMixType
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:67
openshot::Clip::wave_color
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:334
openshot::Clip::shear_y
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:325
openshot::Clip::RemoveEffect
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1257
DummyReader.h
Header file for DummyReader class.
openshot::Color::blue
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
openshot::Timeline::GetTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:242
Exceptions.h
Header file for all Exception classes.
openshot::Clip::mixing
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:180
FFmpegReader.h
Header file for FFmpegReader class.
openshot::COMPOSITE_COLOR_DODGE
@ COMPOSITE_COLOR_DODGE
Definition: Enums.h:96
openshot::Clip::shear_x
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:324
openshot::EffectBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:115
openshot::Keyframe::GetValue
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
openshot::Clip::location_x
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:318
openshot::Clip::getFrameMutex
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: Clip.h:92
openshot::FrameDisplayType
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:51
openshot::ReaderBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:240