35 #ifdef USE_IMAGEMAGICK
79 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
104 parentTrackedObject =
nullptr;
105 parentClipObject = NULL;
129 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
133 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
135 }
catch (
const std::exception& e) {}
143 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
150 Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
169 Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
175 std::string ext = get_file_extension(
path);
176 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
179 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
180 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob")
222 allocated_reader = reader;
232 if (allocated_reader) {
233 delete allocated_reader;
234 allocated_reader = NULL;
250 if (parentTimeline) {
252 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
253 Clip* clipObject = parentTimeline->
GetClip(object_id);
259 else if (clipObject) {
268 parentTrackedObject = trackedObject;
274 parentClipObject = clipObject;
298 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
319 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
334 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
350 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
364 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
369 frame_number = adjust_frame_number_minimum(frame_number);
372 std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
373 return GetFrame(original_frame, frame_number, NULL);
377 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
381 std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
385 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
390 frame_number = adjust_frame_number_minimum(frame_number);
393 std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
394 return GetFrame(original_frame, frame_number, NULL);
398 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
406 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
411 frame_number = adjust_frame_number_minimum(frame_number);
414 int64_t new_frame_number = frame_number;
415 int64_t time_mapped_number = adjust_frame_number_minimum(
time.
GetLong(frame_number));
417 new_frame_number = time_mapped_number;
420 std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
424 get_time_mapped_frame(original_frame, new_frame_number);
427 apply_effects(original_frame);
430 if (
timeline != NULL && options != NULL) {
434 original_frame = timeline_instance->
apply_effects(original_frame, background_frame->number,
Layer());
439 apply_keyframes(original_frame, background_frame->GetImage());
442 return original_frame;
446 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
453 for (
const auto& effect : effects) {
454 if (effect->Id() ==
id) {
462 std::string Clip::get_file_extension(std::string
path)
465 return path.substr(
path.find_last_of(
".") + 1);
469 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
471 int number_of_samples = buffer->getNumSamples();
472 int channels = buffer->getNumChannels();
475 juce::AudioSampleBuffer *reversed =
new juce::AudioSampleBuffer(channels, number_of_samples);
478 for (
int channel = 0; channel < channels; channel++)
481 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
482 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
488 for (
int channel = 0; channel < channels; channel++)
490 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
497 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
502 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
510 juce::AudioSampleBuffer *samples = NULL;
515 int new_frame_number = frame->number;
522 int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
530 juce::AudioSampleBuffer *resampled_buffer = NULL;
533 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
537 for (
int channel = 0; channel < channels; channel++)
539 samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
540 number_of_samples, 1.0f);
544 reverse_buffer(samples);
556 for (
int channel = 0; channel < channels; channel++)
558 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel,
start),
559 number_of_samples, 1.0f);
562 resampled_buffer = NULL;
565 else if (abs(delta) > 1 && abs(delta) < 100) {
569 int total_delta_samples = 0;
570 for (
int delta_frame = new_frame_number - (delta - 1);
571 delta_frame <= new_frame_number; delta_frame++)
577 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
581 for (
int delta_frame = new_frame_number - (delta - 1);
582 delta_frame <= new_frame_number; delta_frame++) {
584 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
585 juce::AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
586 number_of_delta_samples);
587 delta_samples->clear();
589 for (
int channel = 0; channel < channels; channel++)
590 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
591 number_of_delta_samples, 1.0f);
595 reverse_buffer(delta_samples);
598 for (
int channel = 0; channel < channels; channel++)
600 samples->addFrom(channel,
start, delta_samples->getReadPointer(channel),
601 number_of_delta_samples, 1.0f);
604 delete delta_samples;
605 delta_samples = NULL;
608 start += number_of_delta_samples;
613 int total_delta_samples = 0;
614 for (
int delta_frame = new_frame_number - (delta + 1);
615 delta_frame >= new_frame_number; delta_frame--)
621 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
625 for (
int delta_frame = new_frame_number - (delta + 1);
626 delta_frame >= new_frame_number; delta_frame--) {
628 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
629 juce::AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
630 number_of_delta_samples);
631 delta_samples->clear();
633 for (
int channel = 0; channel < channels; channel++)
634 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
635 number_of_delta_samples, 1.0f);
639 reverse_buffer(delta_samples);
642 for (
int channel = 0; channel < channels; channel++)
644 samples->addFrom(channel,
start, delta_samples->getReadPointer(channel),
645 number_of_delta_samples, 1.0f);
648 delete delta_samples;
649 delta_samples = NULL;
652 start += number_of_delta_samples;
657 resampler->
SetBuffer(samples,
float(
start) /
float(number_of_samples));
663 for (
int channel = 0; channel < channels; channel++)
665 frame->AddAudio(
true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
672 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
676 for (
int channel = 0; channel < channels; channel++)
678 samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
682 reverse_buffer(samples);
685 for (
int channel = 0; channel < channels; channel++)
686 frame->AddAudio(
true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
698 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
701 if (frame_number < 1)
709 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
716 auto reader_frame = reader->
GetFrame(number);
723 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
724 reader_copy->SampleRate(reader_frame->SampleRate());
725 reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
742 auto new_frame = std::make_shared<Frame>(
744 "#000000", estimated_samples_in_frame, reader->
info.
channels);
747 new_frame->AddAudioSilence(estimated_samples_in_frame);
763 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
764 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
766 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
767 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
768 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
773 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
774 if (!parentObjectId.empty()) {
775 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
777 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string",
"", NULL, -1, -1,
false, requested_frame);
812 if (parentTrackedObject)
817 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
820 std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
821 double parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
823 std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
826 float parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"cx"];
827 float parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"cy"];
828 float parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
829 float parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
830 float parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"r"];
833 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
834 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
835 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
836 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
837 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
842 else if (parentClipObject)
847 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
850 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
851 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
852 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
853 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
854 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
855 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
856 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
859 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
860 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
861 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
862 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
863 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
864 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
865 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
905 return root.toStyledString();
913 root[
"parentObjectId"] = parentObjectId;
915 root[
"scale"] =
scale;
919 root[
"waveform"] = waveform;
947 root[
"effects"] = Json::Value(Json::arrayValue);
950 for (
auto existing_effect : effects)
952 root[
"effects"].append(existing_effect->JsonValue());
958 root[
"reader"] = Json::Value(Json::objectValue);
974 catch (
const std::exception& e)
977 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
988 if (!root[
"parentObjectId"].isNull()){
989 parentObjectId = root[
"parentObjectId"].asString();
990 if (parentObjectId.size() > 0 && parentObjectId !=
""){
993 parentTrackedObject =
nullptr;
994 parentClipObject = NULL;
997 if (!root[
"gravity"].isNull())
999 if (!root[
"scale"].isNull())
1001 if (!root[
"anchor"].isNull())
1003 if (!root[
"display"].isNull())
1005 if (!root[
"mixing"].isNull())
1007 if (!root[
"waveform"].isNull())
1008 waveform = root[
"waveform"].asBool();
1009 if (!root[
"scale_x"].isNull())
1011 if (!root[
"scale_y"].isNull())
1013 if (!root[
"location_x"].isNull())
1015 if (!root[
"location_y"].isNull())
1017 if (!root[
"alpha"].isNull())
1019 if (!root[
"rotation"].isNull())
1021 if (!root[
"time"].isNull())
1023 if (!root[
"volume"].isNull())
1025 if (!root[
"wave_color"].isNull())
1027 if (!root[
"shear_x"].isNull())
1029 if (!root[
"shear_y"].isNull())
1031 if (!root[
"origin_x"].isNull())
1033 if (!root[
"origin_y"].isNull())
1035 if (!root[
"channel_filter"].isNull())
1037 if (!root[
"channel_mapping"].isNull())
1039 if (!root[
"has_audio"].isNull())
1041 if (!root[
"has_video"].isNull())
1043 if (!root[
"perspective_c1_x"].isNull())
1045 if (!root[
"perspective_c1_y"].isNull())
1047 if (!root[
"perspective_c2_x"].isNull())
1049 if (!root[
"perspective_c2_y"].isNull())
1051 if (!root[
"perspective_c3_x"].isNull())
1053 if (!root[
"perspective_c3_y"].isNull())
1055 if (!root[
"perspective_c4_x"].isNull())
1057 if (!root[
"perspective_c4_y"].isNull())
1059 if (!root[
"effects"].isNull()) {
1065 for (
const auto existing_effect : root[
"effects"]) {
1068 if (!existing_effect[
"type"].isNull()) {
1071 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1082 if (!root[
"reader"].isNull())
1084 if (!root[
"reader"][
"type"].isNull())
1087 bool already_open =
false;
1091 already_open = reader->
IsOpen();
1100 std::string type = root[
"reader"][
"type"].asString();
1102 if (type ==
"FFmpegReader") {
1108 }
else if (type ==
"QtImageReader") {
1114 #ifdef USE_IMAGEMAGICK
1115 }
else if (type ==
"ImageReader") {
1118 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1121 }
else if (type ==
"TextReader") {
1128 }
else if (type ==
"ChunkReader") {
1134 }
else if (type ==
"DummyReader") {
1140 }
else if (type ==
"Timeline") {
1150 allocated_reader = reader;
1162 void Clip::sort_effects()
1175 effects.push_back(effect);
1191 if (parentTimeline){
1199 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1202 trackedObjectBBox->ParentClip(
this);
1218 effects.remove(effect);
1222 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1225 for (
auto effect : effects)
1228 frame = effect->GetFrame(frame, frame->number);
1234 bool Clip::isEqual(
double a,
double b)
1236 return fabs(a - b) < 0.000001;
1240 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1248 std::shared_ptr<QImage> source_image = frame->GetImage();
1263 source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue,
alpha);
1267 int width = background_canvas->width();
1268 int height = background_canvas->height();
1271 QTransform transform = get_transform(frame, width, height);
1277 QPainter painter(background_canvas.get());
1278 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1281 painter.setTransform(transform);
1284 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1285 painter.drawImage(0, 0, *source_image);
1292 std::stringstream frame_number_str;
1299 frame_number_str << frame->number;
1312 painter.setPen(QColor(
"#ffffff"));
1313 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1319 frame->AddImage(background_canvas);
1323 QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1326 std::shared_ptr<QImage> source_image = frame->GetImage();
1334 unsigned char *pixels = source_image->bits();
1337 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1341 pixels[byte_index + 0] *= alpha_value;
1342 pixels[byte_index + 1] *= alpha_value;
1343 pixels[byte_index + 2] *= alpha_value;
1344 pixels[byte_index + 3] *= alpha_value;
1352 QSize source_size = source_image->size();
1355 if (parentTrackedObject){
1362 source_size.scale(width, height, Qt::KeepAspectRatio);
1365 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::get_transform (Scale: SCALE_FIT)",
"frame->number", frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
1369 source_size.scale(width, height, Qt::IgnoreAspectRatio);
1372 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::get_transform (Scale: SCALE_STRETCH)",
"frame->number", frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
1376 source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1379 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::get_transform (Scale: SCALE_CROP)",
"frame->number", frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
1387 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::get_transform (Scale: SCALE_NONE)",
"frame->number", frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
1393 float parentObject_location_x = 0.0;
1394 float parentObject_location_y = 0.0;
1395 float parentObject_scale_x = 1.0;
1396 float parentObject_scale_y = 1.0;
1397 float parentObject_shear_x = 0.0;
1398 float parentObject_shear_y = 0.0;
1399 float parentObject_rotation = 0.0;
1402 if (parentClipObject){
1407 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1410 parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
1411 parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
1412 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
1413 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
1414 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
1415 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
1416 parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
1420 if (parentTrackedObject){
1425 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1428 std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1431 if (!trackedObjectParentClipProperties.empty())
1434 float parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
1437 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1440 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"location_x"];
1441 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"location_y"];
1442 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1443 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1444 parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"rotation"];
1449 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1452 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5;
1453 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5;
1454 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1455 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1456 parentObject_rotation = trackedObjectProperties[
"r"];
1469 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1470 sx*= parentObject_scale_x;
1471 sy*= parentObject_scale_y;
1474 float scaled_source_width = source_size.width() * sx;
1475 float scaled_source_height = source_size.height() * sy;
1483 x = (width - scaled_source_width) / 2.0;
1486 x = width - scaled_source_width;
1489 y = (height - scaled_source_height) / 2.0;
1492 x = (width - scaled_source_width) / 2.0;
1493 y = (height - scaled_source_height) / 2.0;
1496 x = width - scaled_source_width;
1497 y = (height - scaled_source_height) / 2.0;
1500 y = (height - scaled_source_height);
1503 x = (width - scaled_source_width) / 2.0;
1504 y = (height - scaled_source_height);
1507 x = width - scaled_source_width;
1508 y = (height - scaled_source_height);
1513 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::get_transform (Gravity)",
"frame->number", frame->number,
"source_clip->gravity",
gravity,
"scaled_source_width", scaled_source_width,
"scaled_source_height", scaled_source_height);
1515 QTransform transform;
1521 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1522 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1527 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::get_transform (Build QTransform - if needed)",
"frame->number", frame->number,
"x", x,
"y", y,
"r", r,
"sx", sx,
"sy", sy);
1529 if (!isEqual(x, 0) || !isEqual(y, 0)) {
1531 transform.translate(x, y);
1533 if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1535 float origin_x_offset = (scaled_source_width * origin_x_value);
1536 float origin_y_offset = (scaled_source_height * origin_y_value);
1537 transform.translate(origin_x_offset, origin_y_offset);
1538 transform.rotate(r);
1539 transform.shear(shear_x_value, shear_y_value);
1540 transform.translate(-origin_x_offset,-origin_y_offset);
1543 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1544 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1545 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1546 transform.scale(source_width_scale, source_height_scale);
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
This class is used to resample audio data for many sequential frames.
juce::AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
void SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
float Start() const
Get start position (in seconds) of clip (trim start of video)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
float Duration() const
Get the length of this clip (in seconds)
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
float Position() const
Get position on timeline (in seconds)
float position
The position on the timeline where this clip should start playing.
float end
The position in seconds to end playing (used to trim the ending of a clip)
std::string previous_properties
This string contains the previous JSON properties.
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline)
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
void Open() override
Open the internal reader.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
openshot::FrameDisplayType display
The format to display the frame number (if any)
void init_reader_rotation()
Update default rotation from reader.
Clip()
Default Constructor.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
float End() const
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
std::string Json() const override
Generate JSON string of this object.
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void init_reader_settings()
Init reader info details.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Json::Value JsonValue() const override
Generate Json::Value for this object.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
bool Waveform()
Get the waveform property of this clip.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
void Close() override
Close the internal reader.
virtual ~Clip()
Destructor.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
openshot::ReaderBase * Reader()
Get the current reader.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
std::string PropertiesJSON(int64_t requested_frame) const override
openshot::Color wave_color
Curve representing the color of the audio wave form.
void init_settings()
Init default settings for a clip.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
This class represents a color (used on the timeline and clips)
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Json::Value JsonValue() const
Generate Json::Value for this object.
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
This abstract class is the base class, used by all effects in libopenshot.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
This class returns a listing of all effects supported by libopenshot.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
int num
Numerator for the fraction.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
int den
Denominator for the fraction.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
int64_t GetLength() const
Fraction GetRepeatFraction(int64_t index) const
Get the fraction that represents how many times this value is repeated in the curve.
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
int64_t GetCount() const
Get the number of points (i.e. # of points)
Exception for frames that are out of bounds.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
AnchorType
This enumeration determines what parent a clip should be aligned to.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
GravityType
This enumeration determines how clips are aligned to their parent container.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
@ GRAVITY_TOP
Align clip to the top center of its parent.
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
@ SCALE_NONE
Do not scale the clip.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
@ FRAME_DISPLAY_NONE
Do not display the frame number.
const Json::Value stringToJson(const std::string value)
bool has_tracked_object
Determines if this effect track objects through the clip.
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int height
The height of the video (in pixels)
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
bool has_video
Determines if this file has a video stream.
bool has_audio
Determines if this file has an audio stream.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool is_top_clip
Is clip on top (if overlapping another clip)