OpenShot Library | libopenshot  0.1.2
Clip.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Clip.h"
29 
30 using namespace openshot;
31 
32 // Init default settings for a clip
33 void Clip::init_settings()
34 {
35  // Init clip settings
36  Position(0.0);
37  Layer(0);
38  Start(0.0);
39  End(0.0);
41  scale = SCALE_FIT;
43  waveform = false;
45 
46  // Init scale curves
47  scale_x = Keyframe(1.0);
48  scale_y = Keyframe(1.0);
49 
50  // Init location curves
51  location_x = Keyframe(0.0);
52  location_y = Keyframe(0.0);
53 
54  // Init alpha & rotation
55  alpha = Keyframe(1.0);
56  rotation = Keyframe(0.0);
57 
58  // Init time & volume
59  time = Keyframe(0.0);
60  volume = Keyframe(1.0);
61 
62  // Init audio waveform color
63  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
64 
65  // Init crop settings
67  crop_width = Keyframe(-1.0);
68  crop_height = Keyframe(-1.0);
69  crop_x = Keyframe(0.0);
70  crop_y = Keyframe(0.0);
71 
72  // Init shear and perspective curves
73  shear_x = Keyframe(0.0);
74  shear_y = Keyframe(0.0);
75  perspective_c1_x = Keyframe(-1.0);
76  perspective_c1_y = Keyframe(-1.0);
77  perspective_c2_x = Keyframe(-1.0);
78  perspective_c2_y = Keyframe(-1.0);
79  perspective_c3_x = Keyframe(-1.0);
80  perspective_c3_y = Keyframe(-1.0);
81  perspective_c4_x = Keyframe(-1.0);
82  perspective_c4_y = Keyframe(-1.0);
83 
84  // Init audio channel filter and mappings
85  channel_filter = Keyframe(-1.0);
86  channel_mapping = Keyframe(-1.0);
87 
88  // Init audio and video overrides
89  has_audio = Keyframe(-1.0);
90  has_video = Keyframe(-1.0);
91 
92  // Default pointers
93  reader = NULL;
94  resampler = NULL;
95  audio_cache = NULL;
96  manage_reader = false;
97 }
98 
99 // Default Constructor for a clip
101 {
102  // Init all default settings
103  init_settings();
104 }
105 
106 // Constructor with reader
107 Clip::Clip(ReaderBase* new_reader)
108 {
109  // Init all default settings
110  init_settings();
111 
112  // Set the reader
113  reader = new_reader;
114 
115  // Open and Close the reader (to set the duration of the clip)
116  Open();
117  Close();
118 
119  // Update duration
120  End(reader->info.duration);
121 }
122 
123 // Constructor with filepath
124 Clip::Clip(string path)
125 {
126  // Init all default settings
127  init_settings();
128 
129  // Get file extension (and convert to lower case)
130  string ext = get_file_extension(path);
131  transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
132 
133  // Determine if common video formats
134  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
135  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
136  {
137  try
138  {
139  // Open common video format
140  reader = new FFmpegReader(path);
141 
142  } catch(...) { }
143  }
144 
145  // If no video found, try each reader
146  if (!reader)
147  {
148  try
149  {
150  // Try an image reader
151  reader = new QtImageReader(path);
152 
153  } catch(...) {
154  try
155  {
156  // Try a video reader
157  reader = new FFmpegReader(path);
158 
159  } catch(...) { }
160  }
161  }
162 
163  // Update duration
164  if (reader) {
165  End(reader->info.duration);
166  manage_reader = true;
167  }
168 }
169 
170 // Destructor
172 {
173  // Delete the reader if clip created it
174  if (manage_reader && reader) {
175  delete reader;
176  reader = NULL;
177  }
178 
179  // Close the resampler
180  if (resampler) {
181  delete resampler;
182  resampler = NULL;
183  }
184 }
185 
186 /// Set the current reader
187 void Clip::Reader(ReaderBase* new_reader)
188 {
189  // set reader pointer
190  reader = new_reader;
191 }
192 
193 /// Get the current reader
195 {
196  if (reader)
197  return reader;
198  else
199  // Throw error if reader not initialized
200  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
201 }
202 
203 // Open the internal reader
205 {
206  if (reader)
207  {
208  // Open the reader
209  reader->Open();
210 
211  // Set some clip properties from the file reader
212  if (end == 0.0)
213  End(reader->info.duration);
214  }
215  else
216  // Throw error if reader not initialized
217  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
218 }
219 
220 // Close the internal reader
222 {
223  if (reader) {
224  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
225 
226  // Close the reader
227  reader->Close();
228  }
229  else
230  // Throw error if reader not initialized
231  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
232 }
233 
234 // Get end position of clip (trim end of video), which can be affected by the time curve.
235 float Clip::End() throw(ReaderClosed)
236 {
237  // if a time curve is present, use it's length
238  if (time.Points.size() > 1)
239  {
240  // Determine the FPS fo this clip
241  float fps = 24.0;
242  if (reader)
243  // file reader
244  fps = reader->info.fps.ToFloat();
245  else
246  // Throw error if reader not initialized
247  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
248 
249  return float(time.GetLength()) / fps;
250  }
251  else
252  // just use the duration (as detected by the reader)
253  return end;
254 }
255 
256 // Get an openshot::Frame object for a specific frame number of this reader.
257 tr1::shared_ptr<Frame> Clip::GetFrame(long int requested_frame) throw(ReaderClosed)
258 {
259  if (reader)
260  {
261  // Adjust out of bounds frame number
262  requested_frame = adjust_frame_number_minimum(requested_frame);
263 
264  // Adjust has_video and has_audio overrides
265  int enabled_audio = has_audio.GetInt(requested_frame);
266  if (enabled_audio == -1 && reader && reader->info.has_audio)
267  enabled_audio = 1;
268  else if (enabled_audio == -1 && reader && !reader->info.has_audio)
269  enabled_audio = 0;
270  int enabled_video = has_video.GetInt(requested_frame);
271  if (enabled_video == -1 && reader && reader->info.has_video)
272  enabled_video = 1;
273  else if (enabled_video == -1 && reader && !reader->info.has_audio)
274  enabled_video = 0;
275 
276  // Adjust parent reader with same settings (for performance gains)
277  if (reader) {
278  // Override parent reader
279  reader->info.has_audio = enabled_audio;
280  reader->info.has_video = enabled_video;
281  }
282 
283  // Is a time map detected
284  long int new_frame_number = requested_frame;
285  if (time.Values.size() > 1)
286  new_frame_number = time.GetLong(requested_frame);
287 
288 
289  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
290  tr1::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
291 
292  // Create a new frame
293  tr1::shared_ptr<Frame> frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
294  frame->SampleRate(original_frame->SampleRate());
295  frame->ChannelsLayout(original_frame->ChannelsLayout());
296 
297  // Copy the image from the odd field
298  if (enabled_video)
299  frame->AddImage(tr1::shared_ptr<QImage>(new QImage(*original_frame->GetImage())));
300 
301  // Loop through each channel, add audio
302  if (enabled_audio && reader->info.has_audio)
303  for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
304  frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
305 
306  // Get time mapped frame number (used to increase speed, change direction, etc...)
307  tr1::shared_ptr<Frame> new_frame = get_time_mapped_frame(frame, requested_frame);
308 
309  // Apply effects to the frame (if any)
310  apply_effects(new_frame);
311 
312  // Return processed 'frame'
313  return new_frame;
314  }
315  else
316  // Throw error if reader not initialized
317  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
318 }
319 
320 // Get file extension
321 string Clip::get_file_extension(string path)
322 {
323  // return last part of path
324  return path.substr(path.find_last_of(".") + 1);
325 }
326 
327 // Reverse an audio buffer
328 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
329 {
330  int number_of_samples = buffer->getNumSamples();
331  int channels = buffer->getNumChannels();
332 
333  // Reverse array (create new buffer to hold the reversed version)
334  AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
335  reversed->clear();
336 
337  for (int channel = 0; channel < channels; channel++)
338  {
339  int n=0;
340  for (int s = number_of_samples - 1; s >= 0; s--, n++)
341  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
342  }
343 
344  // Copy the samples back to the original array
345  buffer->clear();
346  // Loop through channels, and get audio samples
347  for (int channel = 0; channel < channels; channel++)
348  // Get the audio samples for this channel
349  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
350 
351  delete reversed;
352  reversed = NULL;
353 }
354 
355 // Adjust the audio and image of a time mapped frame
356 tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame, long int frame_number) throw(ReaderClosed)
357 {
358  // Check for valid reader
359  if (!reader)
360  // Throw error if reader not initialized
361  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
362 
363  // Check for a valid time map curve
364  if (time.Values.size() > 1)
365  {
366  tr1::shared_ptr<Frame> new_frame;
367 
368  // create buffer and resampler
369  juce::AudioSampleBuffer *samples = NULL;
370  if (!resampler)
371  resampler = new AudioResampler();
372 
373  // Get new frame number
374  int new_frame_number = round(time.GetValue(frame_number));
375 
376  // Create a new frame
377  int samples_in_frame = Frame::GetSamplesPerFrame(new_frame_number, reader->info.fps, reader->info.sample_rate, frame->GetAudioChannelsCount());
378  new_frame = tr1::shared_ptr<Frame>(new Frame(new_frame_number, 1, 1, "#000000", samples_in_frame, frame->GetAudioChannelsCount()));
379 
380  // Copy the image from the new frame
381  new_frame->AddImage(GetOrCreateFrame(new_frame_number)->GetImage());
382 
383 
384  // Get delta (difference in previous Y value)
385  int delta = int(round(time.GetDelta(frame_number)));
386 
387  // Init audio vars
388  int sample_rate = reader->info.sample_rate;
389  int channels = reader->info.channels;
390  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
391 
392  // Only resample audio if needed
393  if (reader->info.has_audio) {
394  // Determine if we are speeding up or slowing down
395  if (time.GetRepeatFraction(frame_number).den > 1) {
396  // SLOWING DOWN AUDIO
397  // Resample data, and return new buffer pointer
398  AudioSampleBuffer *resampled_buffer = NULL;
399  int resampled_buffer_size = 0;
400 
401  // SLOW DOWN audio (split audio)
402  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
403  samples->clear();
404 
405  // Loop through channels, and get audio samples
406  for (int channel = 0; channel < channels; channel++)
407  // Get the audio samples for this channel
408  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
409  number_of_samples, 1.0f);
410 
411  // Reverse the samples (if needed)
412  if (!time.IsIncreasing(frame_number))
413  reverse_buffer(samples);
414 
415  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
416  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
417 
418  // Resample the data (since it's the 1st slice)
419  resampled_buffer = resampler->GetResampledBuffer();
420 
421  // Get the length of the resampled buffer (if one exists)
422  resampled_buffer_size = resampled_buffer->getNumSamples();
423 
424  // Just take the samples we need for the requested frame
425  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
426  if (start > 0)
427  start -= 1;
428  for (int channel = 0; channel < channels; channel++)
429  // Add new (slower) samples, to the frame object
430  new_frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
431  number_of_samples, 1.0f);
432 
433  // Clean up
434  resampled_buffer = NULL;
435 
436  }
437  else if (abs(delta) > 1 && abs(delta) < 100) {
438  int start = 0;
439  if (delta > 0) {
440  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
441  int total_delta_samples = 0;
442  for (int delta_frame = new_frame_number - (delta - 1);
443  delta_frame <= new_frame_number; delta_frame++)
444  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
445  reader->info.sample_rate,
446  reader->info.channels);
447 
448  // Allocate a new sample buffer for these delta frames
449  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
450  samples->clear();
451 
452  // Loop through each frame in this delta
453  for (int delta_frame = new_frame_number - (delta - 1);
454  delta_frame <= new_frame_number; delta_frame++) {
455  // buffer to hold detal samples
456  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
457  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
458  number_of_delta_samples);
459  delta_samples->clear();
460 
461  for (int channel = 0; channel < channels; channel++)
462  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
463  number_of_delta_samples, 1.0f);
464 
465  // Reverse the samples (if needed)
466  if (!time.IsIncreasing(frame_number))
467  reverse_buffer(delta_samples);
468 
469  // Copy the samples to
470  for (int channel = 0; channel < channels; channel++)
471  // Get the audio samples for this channel
472  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
473  number_of_delta_samples, 1.0f);
474 
475  // Clean up
476  delete delta_samples;
477  delta_samples = NULL;
478 
479  // Increment start position
480  start += number_of_delta_samples;
481  }
482  }
483  else {
484  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
485  int total_delta_samples = 0;
486  for (int delta_frame = new_frame_number - (delta + 1);
487  delta_frame >= new_frame_number; delta_frame--)
488  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
489  reader->info.sample_rate,
490  reader->info.channels);
491 
492  // Allocate a new sample buffer for these delta frames
493  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
494  samples->clear();
495 
496  // Loop through each frame in this delta
497  for (int delta_frame = new_frame_number - (delta + 1);
498  delta_frame >= new_frame_number; delta_frame--) {
499  // buffer to hold delta samples
500  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
501  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
502  number_of_delta_samples);
503  delta_samples->clear();
504 
505  for (int channel = 0; channel < channels; channel++)
506  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
507  number_of_delta_samples, 1.0f);
508 
509  // Reverse the samples (if needed)
510  if (!time.IsIncreasing(frame_number))
511  reverse_buffer(delta_samples);
512 
513  // Copy the samples to
514  for (int channel = 0; channel < channels; channel++)
515  // Get the audio samples for this channel
516  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
517  number_of_delta_samples, 1.0f);
518 
519  // Clean up
520  delete delta_samples;
521  delta_samples = NULL;
522 
523  // Increment start position
524  start += number_of_delta_samples;
525  }
526  }
527 
528  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
529  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
530 
531  // Resample data, and return new buffer pointer
532  AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
533  int resampled_buffer_size = buffer->getNumSamples();
534 
535  // Add the newly resized audio samples to the current frame
536  for (int channel = 0; channel < channels; channel++)
537  // Add new (slower) samples, to the frame object
538  new_frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
539 
540  // Clean up
541  buffer = NULL;
542  }
543  else {
544  // Use the samples on this frame (but maybe reverse them if needed)
545  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
546  samples->clear();
547 
548  // Loop through channels, and get audio samples
549  for (int channel = 0; channel < channels; channel++)
550  // Get the audio samples for this channel
551  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
552 
553  // reverse the samples
554  if (!time.IsIncreasing(frame_number))
555  reverse_buffer(samples);
556 
557  // Add reversed samples to the frame object
558  for (int channel = 0; channel < channels; channel++)
559  new_frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
560 
561 
562  }
563 
564  delete samples;
565  samples = NULL;
566  }
567 
568  // Return new time mapped frame
569  return new_frame;
570 
571  } else
572  // Use original frame
573  return frame;
574 }
575 
576 // Adjust frame number minimum value
577 long int Clip::adjust_frame_number_minimum(long int frame_number)
578 {
579  // Never return a frame number 0 or below
580  if (frame_number < 1)
581  return 1;
582  else
583  return frame_number;
584 
585 }
586 
587 // Get or generate a blank frame
588 tr1::shared_ptr<Frame> Clip::GetOrCreateFrame(long int number)
589 {
590  tr1::shared_ptr<Frame> new_frame;
591 
592  // Init some basic properties about this frame
593  int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
594 
595  try {
596  // Debug output
597  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
598 
599  // Attempt to get a frame (but this could fail if a reader has just been closed)
600  new_frame = reader->GetFrame(number);
601 
602  // Return real frame
603  return new_frame;
604 
605  } catch (const ReaderClosed & e) {
606  // ...
607  } catch (const TooManySeeks & e) {
608  // ...
609  } catch (const OutOfBoundsFrame & e) {
610  // ...
611  }
612 
613  // Debug output
614  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
615 
616  // Create blank frame
617  new_frame = tr1::shared_ptr<Frame>(new Frame(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels));
618  new_frame->SampleRate(reader->info.sample_rate);
619  new_frame->ChannelsLayout(reader->info.channel_layout);
620  return new_frame;
621 }
622 
623 // Generate JSON string of this object
624 string Clip::Json() {
625 
626  // Return formatted string
627  return JsonValue().toStyledString();
628 }
629 
630 // Get all properties for a specific frame
631 string Clip::PropertiesJSON(long int requested_frame) {
632 
633  // Requested Point
634  Point requested_point(requested_frame, requested_frame);
635 
636  // Generate JSON properties list
637  Json::Value root;
638  root["id"] = add_property_json("ID", 0.0, "string", Id(), false, 0, -1, -1, CONSTANT, -1, true);
639  root["position"] = add_property_json("Position", Position(), "float", "", false, 0, 0, 30 * 60 * 60 * 48, CONSTANT, -1, false);
640  root["layer"] = add_property_json("Track", Layer(), "int", "", false, 0, 0, 20, CONSTANT, -1, false);
641  root["start"] = add_property_json("Start", Start(), "float", "", false, 0, 0, 30 * 60 * 60 * 48, CONSTANT, -1, false);
642  root["end"] = add_property_json("End", End(), "float", "", false, 0, 0, 30 * 60 * 60 * 48, CONSTANT, -1, false);
643  root["duration"] = add_property_json("Duration", Duration(), "float", "", false, 0, 0, 30 * 60 * 60 * 48, CONSTANT, -1, true);
644  root["gravity"] = add_property_json("Gravity", gravity, "int", "", false, 0, 0, 8, CONSTANT, -1, false);
645  root["scale"] = add_property_json("Scale", scale, "int", "", false, 0, 0, 3, CONSTANT, -1, false);
646  root["anchor"] = add_property_json("Anchor", anchor, "int", "", false, 0, 0, 1, CONSTANT, -1, false);
647  root["waveform"] = add_property_json("Waveform", waveform, "int", "", false, 0, 0, 1, CONSTANT, -1, false);
648 
649  // Add gravity choices (dropdown style)
650  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
651  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
652  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
653  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
654  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
655  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
656  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
657  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
658  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
659 
660  // Add scale choices (dropdown style)
661  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
662  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
663  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
664  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
665 
666  // Add anchor choices (dropdown style)
667  root["anchor"]["choices"].append(add_property_choice_json("Canvas", ANCHOR_CANVAS, anchor));
668  root["anchor"]["choices"].append(add_property_choice_json("Viewport", ANCHOR_VIEWPORT, anchor));
669 
670  // Add waveform choices (dropdown style)
671  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
672  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
673 
674  // Keyframes
675  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", location_x.Contains(requested_point), location_x.GetCount(), -1.0, 1.0, location_x.GetClosestPoint(requested_point).interpolation, location_x.GetClosestPoint(requested_point).co.X, false);
676  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", location_y.Contains(requested_point), location_y.GetCount(), -1.0, 1.0, location_y.GetClosestPoint(requested_point).interpolation, location_y.GetClosestPoint(requested_point).co.X, false);
677  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", scale_x.Contains(requested_point), scale_x.GetCount(), 0.0, 1.0, scale_x.GetClosestPoint(requested_point).interpolation, scale_x.GetClosestPoint(requested_point).co.X, false);
678  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", scale_y.Contains(requested_point), scale_y.GetCount(), 0.0, 1.0, scale_y.GetClosestPoint(requested_point).interpolation, scale_y.GetClosestPoint(requested_point).co.X, false);
679  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", alpha.Contains(requested_point), alpha.GetCount(), 0.0, 1.0, alpha.GetClosestPoint(requested_point).interpolation, alpha.GetClosestPoint(requested_point).co.X, false);
680  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", rotation.Contains(requested_point), rotation.GetCount(), -360, 360, rotation.GetClosestPoint(requested_point).interpolation, rotation.GetClosestPoint(requested_point).co.X, false);
681  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", volume.Contains(requested_point), volume.GetCount(), 0.0, 1.0, volume.GetClosestPoint(requested_point).interpolation, volume.GetClosestPoint(requested_point).co.X, false);
682  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", time.Contains(requested_point), time.GetCount(), 0.0, 30 * 60 * 60 * 48, time.GetClosestPoint(requested_point).interpolation, time.GetClosestPoint(requested_point).co.X, false);
683  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", channel_filter.Contains(requested_point), channel_filter.GetCount(), -1, 10, channel_filter.GetClosestPoint(requested_point).interpolation, channel_filter.GetClosestPoint(requested_point).co.X, false);
684  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", channel_mapping.Contains(requested_point), channel_mapping.GetCount(), -1, 10, channel_mapping.GetClosestPoint(requested_point).interpolation, channel_mapping.GetClosestPoint(requested_point).co.X, false);
685  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", has_audio.Contains(requested_point), has_audio.GetCount(), -1, 1.0, has_audio.GetClosestPoint(requested_point).interpolation, has_audio.GetClosestPoint(requested_point).co.X, false);
686  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", has_video.Contains(requested_point), has_video.GetCount(), -1, 1.0, has_video.GetClosestPoint(requested_point).interpolation, has_video.GetClosestPoint(requested_point).co.X, false);
687 
688  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", wave_color.red.Contains(requested_point), wave_color.red.GetCount(), 0, 255, wave_color.red.GetClosestPoint(requested_point).interpolation, wave_color.red.GetClosestPoint(requested_point).co.X, false);
689  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", wave_color.red.Contains(requested_point), wave_color.red.GetCount(), 0, 255, wave_color.red.GetClosestPoint(requested_point).interpolation, wave_color.red.GetClosestPoint(requested_point).co.X, false);
690  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", wave_color.blue.Contains(requested_point), wave_color.blue.GetCount(), 0, 255, wave_color.blue.GetClosestPoint(requested_point).interpolation, wave_color.blue.GetClosestPoint(requested_point).co.X, false);
691  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", wave_color.green.Contains(requested_point), wave_color.green.GetCount(), 0, 255, wave_color.green.GetClosestPoint(requested_point).interpolation, wave_color.green.GetClosestPoint(requested_point).co.X, false);
692 
693 
694  // Return formatted string
695  return root.toStyledString();
696 }
697 
698 // Generate Json::JsonValue for this object
699 Json::Value Clip::JsonValue() {
700 
701  // Create root json object
702  Json::Value root = ClipBase::JsonValue(); // get parent properties
703  root["gravity"] = gravity;
704  root["scale"] = scale;
705  root["anchor"] = anchor;
706  root["waveform"] = waveform;
707  root["scale_x"] = scale_x.JsonValue();
708  root["scale_y"] = scale_y.JsonValue();
709  root["location_x"] = location_x.JsonValue();
710  root["location_y"] = location_y.JsonValue();
711  root["alpha"] = alpha.JsonValue();
712  root["rotation"] = rotation.JsonValue();
713  root["time"] = time.JsonValue();
714  root["volume"] = volume.JsonValue();
715  root["wave_color"] = wave_color.JsonValue();
716  root["crop_width"] = crop_width.JsonValue();
717  root["crop_height"] = crop_height.JsonValue();
718  root["crop_x"] = crop_x.JsonValue();
719  root["crop_y"] = crop_y.JsonValue();
720  root["shear_x"] = shear_x.JsonValue();
721  root["shear_y"] = shear_y.JsonValue();
722  root["channel_filter"] = channel_filter.JsonValue();
723  root["channel_mapping"] = channel_mapping.JsonValue();
724  root["has_audio"] = has_audio.JsonValue();
725  root["has_video"] = has_video.JsonValue();
726  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
727  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
728  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
729  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
730  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
731  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
732  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
733  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
734 
735  // Add array of effects
736  root["effects"] = Json::Value(Json::arrayValue);
737 
738  // loop through effects
739  list<EffectBase*>::iterator effect_itr;
740  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
741  {
742  // Get clip object from the iterator
743  EffectBase *existing_effect = (*effect_itr);
744  root["effects"].append(existing_effect->JsonValue());
745  }
746 
747  if (reader)
748  root["reader"] = reader->JsonValue();
749 
750  // return JsonValue
751  return root;
752 }
753 
754 // Load JSON string into this object
755 void Clip::SetJson(string value) throw(InvalidJSON) {
756 
757  // Parse JSON string into JSON objects
758  Json::Value root;
759  Json::Reader reader;
760  bool success = reader.parse( value, root );
761  if (!success)
762  // Raise exception
763  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
764 
765  try
766  {
767  // Set all values that match
768  SetJsonValue(root);
769  }
770  catch (exception e)
771  {
772  // Error parsing JSON (or missing keys)
773  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
774  }
775 }
776 
777 // Load Json::JsonValue into this object
778 void Clip::SetJsonValue(Json::Value root) {
779 
780  // Set parent data
782 
783  // Set data from Json (if key is found)
784  if (!root["gravity"].isNull())
785  gravity = (GravityType) root["gravity"].asInt();
786  if (!root["scale"].isNull())
787  scale = (ScaleType) root["scale"].asInt();
788  if (!root["anchor"].isNull())
789  anchor = (AnchorType) root["anchor"].asInt();
790  if (!root["waveform"].isNull())
791  waveform = root["waveform"].asBool();
792  if (!root["scale_x"].isNull())
793  scale_x.SetJsonValue(root["scale_x"]);
794  if (!root["scale_y"].isNull())
795  scale_y.SetJsonValue(root["scale_y"]);
796  if (!root["location_x"].isNull())
797  location_x.SetJsonValue(root["location_x"]);
798  if (!root["location_y"].isNull())
799  location_y.SetJsonValue(root["location_y"]);
800  if (!root["alpha"].isNull())
801  alpha.SetJsonValue(root["alpha"]);
802  if (!root["rotation"].isNull())
803  rotation.SetJsonValue(root["rotation"]);
804  if (!root["time"].isNull())
805  time.SetJsonValue(root["time"]);
806  if (!root["volume"].isNull())
807  volume.SetJsonValue(root["volume"]);
808  if (!root["wave_color"].isNull())
809  wave_color.SetJsonValue(root["wave_color"]);
810  if (!root["crop_width"].isNull())
811  crop_width.SetJsonValue(root["crop_width"]);
812  if (!root["crop_height"].isNull())
813  crop_height.SetJsonValue(root["crop_height"]);
814  if (!root["crop_x"].isNull())
815  crop_x.SetJsonValue(root["crop_x"]);
816  if (!root["crop_y"].isNull())
817  crop_y.SetJsonValue(root["crop_y"]);
818  if (!root["shear_x"].isNull())
819  shear_x.SetJsonValue(root["shear_x"]);
820  if (!root["shear_y"].isNull())
821  shear_y.SetJsonValue(root["shear_y"]);
822  if (!root["channel_filter"].isNull())
823  channel_filter.SetJsonValue(root["channel_filter"]);
824  if (!root["channel_mapping"].isNull())
825  channel_mapping.SetJsonValue(root["channel_mapping"]);
826  if (!root["has_audio"].isNull())
827  has_audio.SetJsonValue(root["has_audio"]);
828  if (!root["has_video"].isNull())
829  has_video.SetJsonValue(root["has_video"]);
830  if (!root["perspective_c1_x"].isNull())
831  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
832  if (!root["perspective_c1_y"].isNull())
833  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
834  if (!root["perspective_c2_x"].isNull())
835  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
836  if (!root["perspective_c2_y"].isNull())
837  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
838  if (!root["perspective_c3_x"].isNull())
839  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
840  if (!root["perspective_c3_y"].isNull())
841  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
842  if (!root["perspective_c4_x"].isNull())
843  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
844  if (!root["perspective_c4_y"].isNull())
845  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
846  if (!root["effects"].isNull()) {
847 
848  // Clear existing effects
849  effects.clear();
850 
851  // loop through effects
852  for (int x = 0; x < root["effects"].size(); x++) {
853  // Get each effect
854  Json::Value existing_effect = root["effects"][x];
855 
856  // Create Effect
857  EffectBase *e = NULL;
858 
859  if (!existing_effect["type"].isNull()) {
860  // Create instance of effect
861  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
862 
863  // Load Json into Effect
864  e->SetJsonValue(existing_effect);
865 
866  // Add Effect to Timeline
867  AddEffect(e);
868  }
869  }
870  }
871  if (!root["reader"].isNull()) // does Json contain a reader?
872  {
873  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
874  {
875  // Close previous reader (if any)
876  bool already_open = false;
877  if (reader)
878  {
879  // Track if reader was open
880  already_open = reader->IsOpen();
881 
882  // Close and delete existing reader (if any)
883  reader->Close();
884  delete reader;
885  reader = NULL;
886  }
887 
888  // Create new reader (and load properties)
889  string type = root["reader"]["type"].asString();
890 
891  if (type == "FFmpegReader") {
892 
893  // Create new reader
894  reader = new FFmpegReader(root["reader"]["path"].asString());
895  reader->SetJsonValue(root["reader"]);
896 
897  } else if (type == "QtImageReader") {
898 
899  // Create new reader
900  reader = new QtImageReader(root["reader"]["path"].asString());
901  reader->SetJsonValue(root["reader"]);
902 
903 #ifdef USE_IMAGEMAGICK
904  } else if (type == "ImageReader") {
905 
906  // Create new reader
907  reader = new ImageReader(root["reader"]["path"].asString());
908  reader->SetJsonValue(root["reader"]);
909 
910  } else if (type == "TextReader") {
911 
912  // Create new reader
913  reader = new TextReader();
914  reader->SetJsonValue(root["reader"]);
915 #endif
916 
917  } else if (type == "ChunkReader") {
918 
919  // Create new reader
920  reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
921  reader->SetJsonValue(root["reader"]);
922 
923  } else if (type == "DummyReader") {
924 
925  // Create new reader
926  reader = new DummyReader();
927  reader->SetJsonValue(root["reader"]);
928  }
929 
930  // mark as managed reader
931  if (reader)
932  manage_reader = true;
933 
934  // Re-Open reader (if needed)
935  if (already_open)
936  reader->Open();
937 
938  }
939  }
940 }
941 
942 // Sort effects by order
943 void Clip::sort_effects()
944 {
945  // sort clips
946  effects.sort(CompareClipEffects());
947 }
948 
949 // Add an effect to the clip
951 {
952  // Add effect to list
953  effects.push_back(effect);
954 
955  // Sort effects
956  sort_effects();
957 }
958 
959 // Remove an effect from the clip
961 {
962  effects.remove(effect);
963 }
964 
965 // Apply effects to the source frame (if any)
966 tr1::shared_ptr<Frame> Clip::apply_effects(tr1::shared_ptr<Frame> frame)
967 {
968  // Find Effects at this position and layer
969  list<EffectBase*>::iterator effect_itr;
970  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
971  {
972  // Get clip object from the iterator
973  EffectBase *effect = (*effect_itr);
974 
975  // Apply the effect to this frame
976  frame = effect->GetFrame(frame, frame->number);
977 
978  } // end effect loop
979 
980  // Return modified frame
981  return frame;
982 }
vector< Coordinate > Values
Vector of all Values (i.e. the processed coordinates from the curve)
Definition: KeyFrame.h:93
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:104
Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:248
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Close()
Close the internal reader.
Definition: Clip.cpp:221
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:219
Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:250
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
float GetDelta(long int index)
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:411
Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:244
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:246
Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:238
string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:59
string PropertiesJSON(long int requested_frame)
Definition: Clip.cpp:631
float End()
Override End() method.
Definition: Clip.cpp:235
Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:249
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:257
Align clip to the bottom right of its parent.
Definition: Enums.h:45
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:319
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it&#39;s parent.
Definition: Clip.h:151
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:229
Do not scale the clip.
Definition: Enums.h:54
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:115
This class is used as a simple, dummy reader, which always returns a blank frame. ...
Definition: DummyReader.h:53
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
InterpolationType interpolation
This is the interpolation mode.
Definition: Point.h:86
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
bool Contains(Point p)
Does this keyframe contain a specific point.
Definition: KeyFrame.cpp:175
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
Definition: Clip.h:228
A Point is the basic building block of a key-frame curve.
Definition: Point.h:81
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:49
void AddEffect(EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:950
virtual void Close()=0
Close the reader (and any resources it was consuming)
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:78
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:258
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
~Clip()
Destructor.
Definition: Clip.cpp:171
Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:259
virtual tr1::shared_ptr< Frame > GetFrame(tr1::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:232
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:778
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:360
Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:236
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:67
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:220
float GetValue(long int index)
Get the value at a specific index.
Definition: KeyFrame.cpp:224
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:221
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:92
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ClipBase.cpp:49
Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:245
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:239
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:242
ScaleType scale
The scale determines how a clip should be resized to fit it&#39;s parent.
Definition: Clip.h:152
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Align clip to the top left of its parent.
Definition: Enums.h:37
Json::Value add_property_choice_json(string name, int value, int selected_value)
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:86
Json::Value add_property_json(string name, float value, string type, string memo, bool contains_point, int number_of_points, float min_value, float max_value, InterpolationType intepolation, int closest_point_x, bool readonly)
Generate JSON for a property.
Definition: ClipBase.cpp:65
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:76
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:254
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:77
bool IsIncreasing(int index)
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:290
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:154
void SetJson(string value)
Load JSON string into this object.
Definition: Clip.cpp:755
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:255
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:57
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:104
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:109
float X
The X value of the coordinate (usually representing the frame #)
Definition: Coordinate.h:61
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
Definition: ChunkReader.h:75
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:225
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:153
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
Point GetClosestPoint(Point p)
Get current point (or closest point) from the X coordinate (i.e. the frame number) ...
Definition: KeyFrame.cpp:193
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:108
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:243
Clip()
Default Constructor.
Definition: Clip.cpp:100
Anchor the clip to the viewport (which can be moved / animated around the canvas) ...
Definition: Enums.h:61
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:58
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:58
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
void Open()
Open the internal reader.
Definition: Clip.cpp:204
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
int GetInt(long int index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:246
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:235
void RemoveEffect(EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:960
This namespace is the default namespace for all code in the openshot library.
long int GetCount()
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:453
virtual tr1::shared_ptr< Frame > GetFrame(long int number)=0
Coordinate co
This is the primary coordinate.
Definition: Point.h:83
AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:153
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:224
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:218
Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:247
AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ClipBase.cpp:33
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:81
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:69
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:251
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:497
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:699
long int GetLong(long int index)
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:268
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:81
long int GetLength()
Definition: KeyFrame.cpp:443
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:35
Anchor the clip to the canvas.
Definition: Enums.h:60
Constant curves jump from their previous position to a new one (with no interpolation).
Definition: Point.h:48
string Json()
Get and Set JSON methods.
Definition: Clip.cpp:624
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:79
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254
Fraction GetRepeatFraction(long int index)
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:389
ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:194
virtual bool IsOpen()=0
A thread safe version of GetFrame.
This class is used to resample audio data for many sequential frames.
Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:237