Libmv: Replace region tracker with autotracker in Blender

The title actually tells it all, this commit switches Blender to use the new
autotrack API from Libmv.

From the user point of view it means that prediction model is now used when
tracking which gives really nice results.

All the other changes are not really visible for users, those are just frame
accessors, caches and so for the new API.
This commit is contained in:
Sergey Sharybin 2014-06-22 20:19:48 +06:00
parent 7013d55580
commit 606329d0f8
9 changed files with 1066 additions and 511 deletions

View File

@ -67,21 +67,12 @@ int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
libmv_configureTrackRegionOptions(*libmv_options,
&options);
bool tracking_result
= ((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
&result,
&options);
(((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
&result,
&options));
libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
libmv_regionTrackergetResult(result, libmv_result);
// TODO(keir): Update the termination string with failure details.
if (result.termination == TrackRegionResult::CONVERGENCE ||
result.termination == TrackRegionResult::NO_CONVERGENCE) {
tracking_result = true;
}
return tracking_result;
return result.is_usable();
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,

View File

@ -210,16 +210,18 @@ void BKE_tracking_disable_channels(struct ImBuf *ibuf, bool disable_red, bool di
bool disable_blue, bool grayscale);
/* **** 2D tracking **** */
struct MovieTrackingContext *BKE_tracking_context_new(struct MovieClip *clip, struct MovieClipUser *user,
const bool backwards, const bool sequence);
void BKE_tracking_context_free(struct MovieTrackingContext *context);
void BKE_tracking_context_sync(struct MovieTrackingContext *context);
void BKE_tracking_context_sync_user(const struct MovieTrackingContext *context, struct MovieClipUser *user);
bool BKE_tracking_context_step(struct MovieTrackingContext *context);
void BKE_tracking_context_finish(struct MovieTrackingContext *context);
void BKE_tracking_refine_marker(struct MovieClip *clip, struct MovieTrackingTrack *track, struct MovieTrackingMarker *marker, bool backwards);
/* *** 2D auto track *** */
struct AutoTrackContext *BKE_autotrack_context_new(struct MovieClip *clip, struct MovieClipUser *user,
const bool backwards, const bool sequence);
bool BKE_autotrack_context_step(struct AutoTrackContext *context);
void BKE_autotrack_context_sync(struct AutoTrackContext *context);
void BKE_autotrack_context_sync_user(struct AutoTrackContext *context, struct MovieClipUser *user);
void BKE_autotrack_context_finish(struct AutoTrackContext *context);
void BKE_autotrack_context_free(struct AutoTrackContext *context);
/* **** Plane tracking **** */
void BKE_tracking_track_plane_from_existing_motion(struct MovieTrackingPlaneTrack *plane_track, int start_frame);

View File

@ -158,6 +158,7 @@ set(SRC
intern/text.c
intern/texture.c
intern/tracking.c
intern/tracking_auto.c
intern/tracking_detect.c
intern/tracking_plane_tracker.c
intern/tracking_region_tracker.c

View File

@ -0,0 +1,563 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/tracking_auto.c
* \ingroup bke
*/
#include <stdlib.h>
#include "MEM_guardedalloc.h"
#include "DNA_movieclip_types.h"
#include "DNA_object_types.h" /* SELECT */
#include "BLI_threads.h"
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BKE_movieclip.h"
#include "BKE_tracking.h"
#include "libmv-capi.h"
#include "tracking_private.h"
typedef struct AutoTrackOptions {
int clip_index; /** Index of the clip this track belogs to. */
int track_index; /* Index of the track in AutoTrack tracks structure. */
MovieTrackingTrack *track; /* Pointer to an original track/ */
libmv_TrackRegionOptions track_region_options; /* Options for the region
tracker. */
bool use_keyframe_match; /* Keyframe pattern matching. */
/* TODO(sergey): A bit awkward to keep it in here, only used to
* place a disabled marker once the trackign fails,
* Wither find a more clear way to do it or call it track context
* or state, not options.
*/
bool is_failed;
int failed_frame;
} AutoTrackOptions;
typedef struct AutoTrackContext {
MovieClip *clips[MAX_ACCESSOR_CLIP];
int num_clips;
MovieClipUser user;
int frame_width, frame_height;
struct libmv_AutoTrack *autotrack;
TrackingImageAccessor *image_accessor;
int num_tracks; /* Number of tracks being tracked. */
AutoTrackOptions *options; /* Per-tracking track options. */
bool backwards;
bool sequence;
int first_frame;
int sync_frame;
bool first_sync;
SpinLock spin_lock;
} AutoTrackContext;
static void normalized_to_libmv_frame(const float normalized[2],
const int frame_dimensions[2],
float result[2])
{
result[0] = normalized[0] * frame_dimensions[0] - 0.5f;
result[1] = normalized[1] * frame_dimensions[1] - 0.5f;
}
static void normalized_relative_to_libmv_frame(const float normalized[2],
const float origin[2],
const int frame_dimensions[2],
float result[2])
{
result[0] = (normalized[0] + origin[0]) * frame_dimensions[0] - 0.5f;
result[1] = (normalized[1] + origin[1]) * frame_dimensions[1] - 0.5f;
}
static void libmv_frame_to_normalized(const float frame_coord[2],
const int frame_dimensions[2],
float result[2])
{
result[0] = (frame_coord[0] + 0.5f) / frame_dimensions[0];
result[1] = (frame_coord[1] + 0.5f) / frame_dimensions[1];
}
static void libmv_frame_to_normalized_relative(const float frame_coord[2],
const float origin[2],
const int frame_dimensions[2],
float result[2])
{
result[0] = (frame_coord[0] - origin[0]) / frame_dimensions[0];
result[1] = (frame_coord[1] - origin[1]) / frame_dimensions[1];
}
static void dna_marker_to_libmv_marker(/*const*/ MovieTrackingTrack *track,
/*const*/ MovieTrackingMarker *marker,
int clip,
int track_index,
int frame_width,
int frame_height,
bool backwards,
libmv_Marker *libmv_marker)
{
const int frame_dimensions[2] = {frame_width, frame_height};
int i;
libmv_marker->clip = clip;
libmv_marker->frame = marker->framenr;
libmv_marker->track = track_index;
normalized_to_libmv_frame(marker->pos,
frame_dimensions,
libmv_marker->center);
for (i = 0; i < 4; ++i) {
normalized_relative_to_libmv_frame(marker->pattern_corners[i],
marker->pos,
frame_dimensions,
libmv_marker->patch[i]);
}
normalized_relative_to_libmv_frame(marker->search_min,
marker->pos,
frame_dimensions,
libmv_marker->search_region_min);
normalized_relative_to_libmv_frame(marker->search_max,
marker->pos,
frame_dimensions,
libmv_marker->search_region_max);
/* TODO(sergey): All the markers does have 1.0 weight. */
libmv_marker->weight = 1.0f;
if (marker->flag & MARKER_TRACKED) {
libmv_marker->source = LIBMV_MARKER_SOURCE_TRACKED;
}
else {
libmv_marker->source = LIBMV_MARKER_SOURCE_MANUAL;
}
libmv_marker->status = LIBMV_MARKER_STATUS_UNKNOWN;
libmv_marker->model_type = LIBMV_MARKER_MODEL_TYPE_POINT;
libmv_marker->model_id = 0;
/* TODO(sergey): We currently don't support reference marker from
* different clip.
*/
libmv_marker->reference_clip = clip;
if (track->pattern_match == TRACK_MATCH_KEYFRAME) {
MovieTrackingMarker *keyframe_marker =
tracking_get_keyframed_marker(track,
marker->framenr,
backwards);
libmv_marker->reference_frame = keyframe_marker->framenr;
}
else {
libmv_marker->reference_frame = backwards ?
marker->framenr - 1 :
marker->framenr;
}
}
static void libmv_marker_to_dna_marker(libmv_Marker *libmv_marker,
int frame_width,
int frame_height,
MovieTrackingMarker *marker)
{
const int frame_dimensions[2] = {frame_width, frame_height};
int i;
marker->framenr = libmv_marker->frame;
libmv_frame_to_normalized(libmv_marker->center,
frame_dimensions,
marker->pos);
for (i = 0; i < 4; ++i) {
libmv_frame_to_normalized_relative(libmv_marker->patch[i],
libmv_marker->center,
frame_dimensions,
marker->pattern_corners[i]);
}
libmv_frame_to_normalized_relative(libmv_marker->search_region_min,
libmv_marker->center,
frame_dimensions,
marker->search_min);
libmv_frame_to_normalized_relative(libmv_marker->search_region_max,
libmv_marker->center,
frame_dimensions,
marker->search_max);
marker->flag = 0;
if (libmv_marker->source == LIBMV_MARKER_SOURCE_TRACKED) {
marker->flag |= MARKER_TRACKED;
}
else {
marker->flag &= ~MARKER_TRACKED;
}
}
static bool check_track_trackable(MovieClip *clip,
MovieTrackingTrack *track,
MovieClipUser *user)
{
if (TRACK_SELECTED(track) &&
(track->flag & (TRACK_LOCKED | TRACK_HIDDEN)) == 0)
{
MovieTrackingMarker *marker;
int frame;
frame = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr);
marker = BKE_tracking_marker_get(track, frame);
return (marker->flag & MARKER_DISABLED) == 0;
}
return false;
}
/* Returns false if marker crossed margin area from frame bounds. */
static bool tracking_check_marker_margin(libmv_Marker *libmv_marker,
int margin,
int frame_width,
int frame_height)
{
float patch_min[2], patch_max[2];
float margin_left, margin_top, margin_right, margin_bottom;
INIT_MINMAX2(patch_min, patch_max);
minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[0]);
minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[1]);
minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[2]);
minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[3]);
margin_left = max_ff(libmv_marker->center[0] - patch_min[0], margin);
margin_top = max_ff(patch_max[1] - libmv_marker->center[1], margin);
margin_right = max_ff(patch_max[0] - libmv_marker->center[0], margin);
margin_bottom = max_ff(libmv_marker->center[1] - patch_min[1], margin);
if (libmv_marker->center[0] < margin_left ||
libmv_marker->center[0] > frame_width - margin_right ||
libmv_marker->center[1] < margin_bottom ||
libmv_marker->center[1] > frame_height - margin_top)
{
return false;
}
return true;
}
AutoTrackContext *BKE_autotrack_context_new(MovieClip *clip,
MovieClipUser *user,
const bool backwards,
const bool sequence)
{
AutoTrackContext *context = MEM_callocN(sizeof(AutoTrackContext),
"autotrack context");
MovieTracking *tracking = &clip->tracking;
MovieTrackingTrack *track;
ListBase *tracksbase = BKE_tracking_get_active_tracks(tracking);
int i, track_index, frame_width, frame_height;
BKE_movieclip_get_size(clip, user, &frame_width, &frame_height);
/* TODO(sergey): Currently using only a single clip. */
context->clips[0] = clip;
context->num_clips = 1;
context->user = *user;
context->user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL;
context->user.render_flag = 0;
context->frame_width = frame_width;
context->frame_height = frame_height;
context->backwards = backwards;
context->sequence = sequence;
context->first_frame = user->framenr;
context->sync_frame = user->framenr;
context->first_sync = true;
BLI_spin_init(&context->spin_lock);
context->image_accessor =
tracking_image_accessor_new(context->clips, 1, user->framenr);
context->autotrack =
libmv_autoTrackNew(context->image_accessor->libmv_accessor);
/* Fill in Autotrack with all markers we know. */
track_index = 0;
for (track = tracksbase->first;
track;
track = track->next)
{
if (check_track_trackable(clip, track, user)) {
context->num_tracks++;
}
for (i = 0; i < track->markersnr; ++i) {
MovieTrackingMarker *marker = marker = track->markers + i;
if ((marker->flag & MARKER_DISABLED) == 0) {
libmv_Marker libmv_marker;
dna_marker_to_libmv_marker(track,
marker,
0,
track_index,
frame_width,
frame_height,
backwards,
&libmv_marker);
libmv_autoTrackAddMarker(context->autotrack,
&libmv_marker);
}
}
track_index++;
}
/* Create per-track tracking options. */
context->options =
MEM_callocN(sizeof(AutoTrackOptions) * context->num_tracks,
"auto track options");
i = track_index = 0;
for (track = tracksbase->first;
track;
track = track->next)
{
if (check_track_trackable(clip, track, user)) {
AutoTrackOptions *options = &context->options[i++];
/* TODO(sergey): Single clip only for now. */
options->clip_index = 0;
options->track_index = track_index;
options->track = track;
tracking_configure_tracker(track,
NULL,
&options->track_region_options);
options->use_keyframe_match =
track->pattern_match == TRACK_MATCH_KEYFRAME;
}
++track_index;
}
return context;
}
bool BKE_autotrack_context_step(AutoTrackContext *context)
{
int frame_delta = context->backwards ? -1 : 1;
bool ok = false;
int track;
#pragma omp parallel for if(context->num_tracks > 1)
for (track = 0; track < context->num_tracks; ++track) {
AutoTrackOptions *options = &context->options[track];
libmv_Marker libmv_current_marker,
libmv_reference_marker,
libmv_tracked_marker;
libmv_TrackRegionResult libmv_result;
int frame = BKE_movieclip_remap_scene_to_clip_frame(
context->clips[options->clip_index],
context->user.framenr);
if (libmv_autoTrackGetMarker(context->autotrack,
options->clip_index,
frame,
options->track_index,
&libmv_current_marker))
{
if (!tracking_check_marker_margin(&libmv_current_marker,
options->track->margin,
context->frame_width,
context->frame_height))
{
continue;
}
libmv_tracked_marker = libmv_current_marker;
libmv_tracked_marker.frame = frame + frame_delta;
if (options->use_keyframe_match) {
libmv_tracked_marker.reference_frame =
libmv_current_marker.reference_frame;
libmv_autoTrackGetMarker(context->autotrack,
options->clip_index,
libmv_tracked_marker.reference_frame,
options->track_index,
&libmv_reference_marker);
}
else {
libmv_tracked_marker.reference_frame = frame;
libmv_reference_marker = libmv_current_marker;
}
if (libmv_autoTrackMarker(context->autotrack,
&options->track_region_options,
&libmv_tracked_marker,
&libmv_result))
{
BLI_spin_lock(&context->spin_lock);
libmv_autoTrackAddMarker(context->autotrack,
&libmv_tracked_marker);
BLI_spin_unlock(&context->spin_lock);
}
else {
options->is_failed = true;
options->failed_frame = frame;
}
ok = true;
}
}
BLI_spin_lock(&context->spin_lock);
context->user.framenr += frame_delta;
BLI_spin_unlock(&context->spin_lock);
return ok;
}
void BKE_autotrack_context_sync(AutoTrackContext *context)
{
int newframe = context->user.framenr,
frame_delta = context->backwards ? -1 : 1;
int clip, frame;
BLI_spin_lock(&context->spin_lock);
for (frame = context->sync_frame;
frame != (context->backwards ? newframe - 1 : newframe + 1);
frame += frame_delta)
{
MovieTrackingMarker marker;
libmv_Marker libmv_marker;
int clip = 0;
int track;
for (track = 0; track < context->num_tracks; ++track) {
AutoTrackOptions *options = &context->options[track];
int track_frame = BKE_movieclip_remap_scene_to_clip_frame(
context->clips[options->clip_index], frame);
if (options->is_failed) {
if (options->failed_frame == track_frame) {
MovieTrackingMarker *prev_marker =
BKE_tracking_marker_get_exact(
options->track,
frame);
if (prev_marker) {
marker = *prev_marker;
marker.framenr = context->backwards ?
track_frame - 1 :
track_frame + 1;
marker.flag |= MARKER_DISABLED;
BKE_tracking_marker_insert(options->track, &marker);
}
}
continue;
}
if (libmv_autoTrackGetMarker(context->autotrack,
clip,
track_frame,
options->track_index,
&libmv_marker))
{
libmv_marker_to_dna_marker(&libmv_marker,
context->frame_width,
context->frame_height,
&marker);
if (context->first_sync && frame == context->sync_frame) {
tracking_marker_insert_disabled(options->track,
&marker,
!context->backwards,
false);
}
BKE_tracking_marker_insert(options->track, &marker);
tracking_marker_insert_disabled(options->track,
&marker,
context->backwards,
false);
}
}
}
BLI_spin_unlock(&context->spin_lock);
for (clip = 0; clip < context->num_clips; ++clip) {
MovieTracking *tracking = &context->clips[clip]->tracking;
BKE_tracking_dopesheet_tag_update(tracking);
}
context->sync_frame = newframe;
context->first_sync = false;
}
void BKE_autotrack_context_sync_user(AutoTrackContext *context,
MovieClipUser *user)
{
user->framenr = context->sync_frame;
}
void BKE_autotrack_context_finish(AutoTrackContext *context)
{
int clip_index;
for (clip_index = 0; clip_index < context->num_clips; ++clip_index) {
MovieClip *clip = context->clips[clip_index];
ListBase *plane_tracks_base =
BKE_tracking_get_active_plane_tracks(&clip->tracking);
MovieTrackingPlaneTrack *plane_track;
for (plane_track = plane_tracks_base->first;
plane_track;
plane_track = plane_track->next)
{
if ((plane_track->flag & PLANE_TRACK_AUTOKEY) == 0) {
int track;
for (track = 0; track < context->num_tracks; ++track) {
MovieTrackingTrack *old_track;
bool do_update = false;
int j;
old_track = context->options[track].track;
for (j = 0; j < plane_track->point_tracksnr; j++) {
if (plane_track->point_tracks[j] == old_track) {
do_update = true;
break;
}
}
if (do_update) {
BKE_tracking_track_plane_from_existing_motion(
plane_track,
context->first_frame);
break;
}
}
}
}
}
}
void BKE_autotrack_context_free(AutoTrackContext *context)
{
libmv_autoTrackDestroy(context->autotrack);
tracking_image_accessor_destroy(context->image_accessor);
MEM_freeN(context->options);
BLI_spin_end(&context->spin_lock);
MEM_freeN(context);
}

View File

@ -51,170 +51,6 @@
#include "libmv-capi.h"
#include "tracking_private.h"
typedef struct TrackContext {
/* the reference marker and cutout search area */
MovieTrackingMarker reference_marker;
/* keyframed patch. This is the search area */
float *search_area;
int search_area_height;
int search_area_width;
int framenr;
float *mask;
} TrackContext;
typedef struct MovieTrackingContext {
MovieClipUser user;
MovieClip *clip;
int clip_flag;
int frames, first_frame;
bool first_time;
MovieTrackingSettings settings;
TracksMap *tracks_map;
bool backwards, sequence;
int sync_frame;
} MovieTrackingContext;
static void track_context_free(void *customdata)
{
TrackContext *track_context = (TrackContext *)customdata;
if (track_context->search_area)
MEM_freeN(track_context->search_area);
if (track_context->mask)
MEM_freeN(track_context->mask);
}
/* Create context for motion 2D tracking, copies all data needed
* for thread-safe tracking, allowing clip modifications during
* tracking.
*/
MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *user,
const bool backwards, const bool sequence)
{
MovieTrackingContext *context = MEM_callocN(sizeof(MovieTrackingContext), "trackingContext");
MovieTracking *tracking = &clip->tracking;
MovieTrackingSettings *settings = &tracking->settings;
ListBase *tracksbase = BKE_tracking_get_active_tracks(tracking);
MovieTrackingTrack *track;
MovieTrackingObject *object = BKE_tracking_object_get_active(tracking);
int num_tracks = 0;
context->clip = clip;
context->settings = *settings;
context->backwards = backwards;
context->sync_frame = user->framenr;
context->first_time = true;
context->first_frame = user->framenr;
context->sequence = sequence;
/* count */
track = tracksbase->first;
while (track) {
if (TRACK_SELECTED(track) && (track->flag & (TRACK_LOCKED | TRACK_HIDDEN)) == 0) {
int framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr);
MovieTrackingMarker *marker = BKE_tracking_marker_get(track, framenr);
if ((marker->flag & MARKER_DISABLED) == 0)
num_tracks++;
}
track = track->next;
}
/* create tracking contextx for all tracks which would be tracked */
if (num_tracks) {
int width, height;
context->tracks_map = tracks_map_new(object->name, object->flag & TRACKING_OBJECT_CAMERA,
num_tracks, sizeof(TrackContext));
BKE_movieclip_get_size(clip, user, &width, &height);
/* create tracking data */
track = tracksbase->first;
while (track) {
if (TRACK_SELECTED(track) && (track->flag & (TRACK_HIDDEN | TRACK_LOCKED)) == 0) {
int framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr);
MovieTrackingMarker *marker = BKE_tracking_marker_get(track, framenr);
if ((marker->flag & MARKER_DISABLED) == 0) {
TrackContext track_context;
memset(&track_context, 0, sizeof(TrackContext));
tracks_map_insert(context->tracks_map, track, &track_context);
}
}
track = track->next;
}
}
/* store needed clip flags passing to get_buffer functions
* - MCLIP_USE_PROXY is needed to because timecode affects on movie clip
* only in case Proxy/Timecode flag is set, so store this flag to use
* timecodes properly but reset render size to SIZE_FULL so correct resolution
* would be used for images
* - MCLIP_USE_PROXY_CUSTOM_DIR is needed because proxy/timecode files might
* be stored in a different location
* ignore all the rest possible flags for now
*/
context->clip_flag = clip->flag & MCLIP_TIMECODE_FLAGS;
context->user = *user;
context->user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL;
context->user.render_flag = 0;
if (!sequence)
BLI_begin_threaded_malloc();
return context;
}
/* Free context used for tracking. */
void BKE_tracking_context_free(MovieTrackingContext *context)
{
if (!context->sequence)
BLI_end_threaded_malloc();
tracks_map_free(context->tracks_map, track_context_free);
MEM_freeN(context);
}
/* Synchronize tracks between clip editor and tracking context,
* by merging them together so all new created tracks and tracked
* ones presents in the movie clip.
*/
void BKE_tracking_context_sync(MovieTrackingContext *context)
{
MovieTracking *tracking = &context->clip->tracking;
int newframe;
tracks_map_merge(context->tracks_map, tracking);
if (context->backwards)
newframe = context->user.framenr + 1;
else
newframe = context->user.framenr - 1;
context->sync_frame = newframe;
BKE_tracking_dopesheet_tag_update(tracking);
}
/* Synchronize clip user's frame number with a frame number from tracking context,
* used to update current frame displayed in the clip editor while tracking.
*/
void BKE_tracking_context_sync_user(const MovieTrackingContext *context, MovieClipUser *user)
{
user->framenr = context->sync_frame;
}
/* **** utility functions for tracking **** */
/* convert from float and byte RGBA to grayscale. Supports different coefficients for RGB. */
@ -296,51 +132,6 @@ static ImBuf *tracking_context_get_frame_ibuf(MovieClip *clip, MovieClipUser *us
return ibuf;
}
/* Get previous keyframed marker. */
static MovieTrackingMarker *tracking_context_get_keyframed_marker(MovieTrackingTrack *track,
int curfra, bool backwards)
{
MovieTrackingMarker *marker_keyed = NULL;
MovieTrackingMarker *marker_keyed_fallback = NULL;
int a = BKE_tracking_marker_get(track, curfra) - track->markers;
while (a >= 0 && a < track->markersnr) {
int next = backwards ? a + 1 : a - 1;
bool is_keyframed = false;
MovieTrackingMarker *cur_marker = &track->markers[a];
MovieTrackingMarker *next_marker = NULL;
if (next >= 0 && next < track->markersnr)
next_marker = &track->markers[next];
if ((cur_marker->flag & MARKER_DISABLED) == 0) {
/* If it'll happen so we didn't find a real keyframe marker,
* fallback to the first marker in current tracked segment
* as a keyframe.
*/
if (next_marker && next_marker->flag & MARKER_DISABLED) {
if (marker_keyed_fallback == NULL)
marker_keyed_fallback = cur_marker;
}
is_keyframed |= (cur_marker->flag & MARKER_TRACKED) == 0;
}
if (is_keyframed) {
marker_keyed = cur_marker;
break;
}
a = next;
}
if (marker_keyed == NULL)
marker_keyed = marker_keyed_fallback;
return marker_keyed;
}
/* Get image buffer for previous marker's keyframe. */
static ImBuf *tracking_context_get_keyframed_ibuf(MovieClip *clip, MovieClipUser *user, int clip_flag,
MovieTrackingTrack *track, int curfra, bool backwards,
@ -349,7 +140,7 @@ static ImBuf *tracking_context_get_keyframed_ibuf(MovieClip *clip, MovieClipUser
MovieTrackingMarker *marker_keyed;
int keyed_framenr;
marker_keyed = tracking_context_get_keyframed_marker(track, curfra, backwards);
marker_keyed = tracking_get_keyframed_marker(track, curfra, backwards);
if (marker_keyed == NULL) {
return NULL;
}
@ -381,50 +172,9 @@ static ImBuf *tracking_context_get_reference_ibuf(MovieClip *clip, MovieClipUser
return ibuf;
}
/* Update track's reference patch (patch from which track is tracking from)
*
* Returns false if reference image buffer failed to load.
*/
static bool track_context_update_reference(MovieTrackingContext *context, TrackContext *track_context,
MovieTrackingTrack *track, MovieTrackingMarker *marker, int curfra,
int frame_width, int frame_height)
{
MovieTrackingMarker *reference_marker = NULL;
ImBuf *reference_ibuf = NULL;
int width, height;
/* calculate patch for keyframed position */
reference_ibuf = tracking_context_get_reference_ibuf(context->clip, &context->user, context->clip_flag,
track, curfra, context->backwards, &reference_marker);
if (!reference_ibuf)
return false;
track_context->reference_marker = *reference_marker;
if (track_context->search_area) {
MEM_freeN(track_context->search_area);
}
track_context->search_area = track_get_search_floatbuf(reference_ibuf, track, reference_marker, &width, &height);
track_context->search_area_height = height;
track_context->search_area_width = width;
if ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_MASK) != 0) {
if (track_context->mask)
MEM_freeN(track_context->mask);
track_context->mask = BKE_tracking_track_get_mask(frame_width, frame_height, track, marker);
}
IMB_freeImBuf(reference_ibuf);
return true;
}
/* Fill in libmv tracker options structure with settings need to be used to perform track. */
static void tracking_configure_tracker(const MovieTrackingTrack *track, float *mask,
libmv_TrackRegionOptions *options)
void tracking_configure_tracker(const MovieTrackingTrack *track, float *mask,
libmv_TrackRegionOptions *options)
{
options->motion_model = track->motion_model;
@ -442,102 +192,6 @@ static void tracking_configure_tracker(const MovieTrackingTrack *track, float *m
options->image1_mask = NULL;
}
/* returns false if marker crossed margin area from frame bounds */
static bool tracking_check_marker_margin(MovieTrackingTrack *track, MovieTrackingMarker *marker,
int frame_width, int frame_height)
{
float pat_min[2], pat_max[2];
float margin_left, margin_top, margin_right, margin_bottom;
float normalized_track_margin[2];
/* margin from frame boundaries */
BKE_tracking_marker_pattern_minmax(marker, pat_min, pat_max);
normalized_track_margin[0] = (float)track->margin / frame_width;
normalized_track_margin[1] = (float)track->margin / frame_height;
margin_left = max_ff(-pat_min[0], normalized_track_margin[0]);
margin_top = max_ff( pat_max[1], normalized_track_margin[1]);
margin_right = max_ff( pat_max[0], normalized_track_margin[0]);
margin_bottom = max_ff(-pat_min[1], normalized_track_margin[1]);
/* do not track markers which are too close to boundary */
if (marker->pos[0] < margin_left || marker->pos[0] > 1.0f - margin_right ||
marker->pos[1] < margin_bottom || marker->pos[1] > 1.0f - margin_top)
{
return false;
}
return true;
}
/* Scale search area of marker based on scale changes of pattern area,
*
* TODO(sergey): currently based on pattern bounding box scale change,
* smarter approach here is welcome.
*/
static void tracking_scale_marker_search(const MovieTrackingMarker *old_marker, MovieTrackingMarker *new_marker)
{
float old_pat_min[2], old_pat_max[2];
float new_pat_min[2], new_pat_max[2];
float scale_x, scale_y;
BKE_tracking_marker_pattern_minmax(old_marker, old_pat_min, old_pat_max);
BKE_tracking_marker_pattern_minmax(new_marker, new_pat_min, new_pat_max);
scale_x = (new_pat_max[0] - new_pat_min[0]) / (old_pat_max[0] - old_pat_min[0]);
scale_y = (new_pat_max[1] - new_pat_min[1]) / (old_pat_max[1] - old_pat_min[1]);
new_marker->search_min[0] *= scale_x;
new_marker->search_min[1] *= scale_y;
new_marker->search_max[0] *= scale_x;
new_marker->search_max[1] *= scale_y;
}
/* Insert new marker which was tracked from old_marker to a new image,
* will also ensure tracked segment is surrounded by disabled markers.
*/
static void tracking_insert_new_marker(MovieTrackingContext *context, MovieTrackingTrack *track,
const MovieTrackingMarker *old_marker, int curfra, bool tracked,
int frame_width, int frame_height,
const double dst_pixel_x[5], const double dst_pixel_y[5])
{
MovieTrackingMarker new_marker;
int frame_delta = context->backwards ? -1 : 1;
int nextfra = curfra + frame_delta;
new_marker = *old_marker;
if (tracked) {
tracking_set_marker_coords_from_tracking(frame_width, frame_height, &new_marker, dst_pixel_x, dst_pixel_y);
new_marker.flag |= MARKER_TRACKED;
new_marker.framenr = nextfra;
tracking_scale_marker_search(old_marker, &new_marker);
if (context->first_time) {
/* check if there's no keyframe/tracked markers before tracking marker.
* if so -- create disabled marker before currently tracking "segment"
*/
tracking_marker_insert_disabled(track, old_marker, !context->backwards, false);
}
/* insert currently tracked marker */
BKE_tracking_marker_insert(track, &new_marker);
/* make currently tracked segment be finished with disabled marker */
tracking_marker_insert_disabled(track, &new_marker, context->backwards, false);
}
else {
new_marker.framenr = nextfra;
new_marker.flag |= MARKER_DISABLED;
BKE_tracking_marker_insert(track, &new_marker);
}
}
/* Peform tracking from a reference_marker to destination_ibuf.
* Uses marker as an initial position guess.
*
@ -601,130 +255,6 @@ static bool configure_and_run_tracker(ImBuf *destination_ibuf, MovieTrackingTrac
return tracked;
}
/* Track all the tracks from context one more frame,
* returns FALSe if nothing was tracked.
*/
bool BKE_tracking_context_step(MovieTrackingContext *context)
{
ImBuf *destination_ibuf;
int frame_delta = context->backwards ? -1 : 1;
int curfra = BKE_movieclip_remap_scene_to_clip_frame(context->clip, context->user.framenr);
int a, map_size;
bool ok = false;
int frame_width, frame_height;
map_size = tracks_map_get_size(context->tracks_map);
/* Nothing to track, avoid unneeded frames reading to save time and memory. */
if (!map_size)
return false;
/* Get an image buffer for frame we're tracking to. */
context->user.framenr += frame_delta;
destination_ibuf = BKE_movieclip_get_ibuf_flag(context->clip, &context->user,
context->clip_flag, MOVIECLIP_CACHE_SKIP);
if (!destination_ibuf)
return false;
frame_width = destination_ibuf->x;
frame_height = destination_ibuf->y;
#pragma omp parallel for private(a) shared(destination_ibuf, ok) if (map_size > 1)
for (a = 0; a < map_size; a++) {
TrackContext *track_context = NULL;
MovieTrackingTrack *track;
MovieTrackingMarker *marker;
tracks_map_get_indexed_element(context->tracks_map, a, &track, (void **)&track_context);
marker = BKE_tracking_marker_get_exact(track, curfra);
if (marker && (marker->flag & MARKER_DISABLED) == 0) {
bool tracked = false, need_readjust;
double dst_pixel_x[5], dst_pixel_y[5];
if (track->pattern_match == TRACK_MATCH_KEYFRAME)
need_readjust = context->first_time;
else
need_readjust = true;
/* do not track markers which are too close to boundary */
if (tracking_check_marker_margin(track, marker, frame_width, frame_height)) {
if (need_readjust) {
if (track_context_update_reference(context, track_context, track, marker,
curfra, frame_width, frame_height) == false)
{
/* happens when reference frame fails to be loaded */
continue;
}
}
tracked = configure_and_run_tracker(destination_ibuf, track,
&track_context->reference_marker, marker,
track_context->search_area,
track_context->search_area_width,
track_context->search_area_height,
track_context->mask,
dst_pixel_x, dst_pixel_y);
}
BLI_spin_lock(&context->tracks_map->spin_lock);
tracking_insert_new_marker(context, track, marker, curfra, tracked,
frame_width, frame_height, dst_pixel_x, dst_pixel_y);
BLI_spin_unlock(&context->tracks_map->spin_lock);
ok = true;
}
}
IMB_freeImBuf(destination_ibuf);
context->first_time = false;
context->frames++;
return ok;
}
void BKE_tracking_context_finish(MovieTrackingContext *context)
{
MovieClip *clip = context->clip;
ListBase *plane_tracks_base = BKE_tracking_get_active_plane_tracks(&clip->tracking);
MovieTrackingPlaneTrack *plane_track;
int map_size = tracks_map_get_size(context->tracks_map);
for (plane_track = plane_tracks_base->first;
plane_track;
plane_track = plane_track->next)
{
if ((plane_track->flag & PLANE_TRACK_AUTOKEY) == 0) {
int i;
for (i = 0; i < map_size; i++) {
TrackContext *track_context = NULL;
MovieTrackingTrack *track, *old_track;
bool do_update = false;
int j;
tracks_map_get_indexed_element(context->tracks_map, i, &track, (void **)&track_context);
old_track = BLI_ghash_lookup(context->tracks_map->hash, track);
for (j = 0; j < plane_track->point_tracksnr; j++) {
if (plane_track->point_tracks[j] == old_track) {
do_update = true;
break;
}
}
if (do_update) {
BKE_tracking_track_plane_from_existing_motion(plane_track, context->first_frame);
break;
}
}
}
}
}
static bool refine_marker_reference_frame_get(MovieTrackingTrack *track,
MovieTrackingMarker *marker,
bool backwards,

View File

@ -47,8 +47,13 @@
#include "BLF_translation.h"
#include "BKE_movieclip.h"
#include "BKE_tracking.h"
#include "IMB_imbuf_types.h"
#include "IMB_imbuf.h"
#include "IMB_moviecache.h"
#include "tracking_private.h"
#include "libmv-capi.h"
@ -390,8 +395,7 @@ void tracking_marker_insert_disabled(MovieTrackingTrack *track, const MovieTrack
}
/* Fill in Libmv C-API camera intrinsics options from tracking structure.
*/
/* Fill in Libmv C-API camera intrinsics options from tracking structure. */
void tracking_cameraIntrinscisOptionsFromTracking(MovieTracking *tracking,
int calibration_width, int calibration_height,
libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
@ -453,3 +457,439 @@ void tracking_trackingCameraFromIntrinscisOptions(MovieTracking *tracking,
BLI_assert(!"Unknown distortion model");
}
}
/* Get previous keyframed marker. */
MovieTrackingMarker *tracking_get_keyframed_marker(MovieTrackingTrack *track,
int current_frame,
bool backwards)
{
MovieTrackingMarker *marker_keyed = NULL;
MovieTrackingMarker *marker_keyed_fallback = NULL;
int a = BKE_tracking_marker_get(track, current_frame) - track->markers;
while (a >= 0 && a < track->markersnr) {
int next = backwards ? a + 1 : a - 1;
bool is_keyframed = false;
MovieTrackingMarker *cur_marker = &track->markers[a];
MovieTrackingMarker *next_marker = NULL;
if (next >= 0 && next < track->markersnr)
next_marker = &track->markers[next];
if ((cur_marker->flag & MARKER_DISABLED) == 0) {
/* If it'll happen so we didn't find a real keyframe marker,
* fallback to the first marker in current tracked segment
* as a keyframe.
*/
if (next_marker && next_marker->flag & MARKER_DISABLED) {
if (marker_keyed_fallback == NULL)
marker_keyed_fallback = cur_marker;
}
is_keyframed |= (cur_marker->flag & MARKER_TRACKED) == 0;
}
if (is_keyframed) {
marker_keyed = cur_marker;
break;
}
a = next;
}
if (marker_keyed == NULL)
marker_keyed = marker_keyed_fallback;
return marker_keyed;
}
/*********************** Frame accessr *************************/
typedef struct AccessCacheKey {
int clip_index;
int frame;
int downscale;
libmv_InputMode input_mode;
int64_t transform_key;
} AccessCacheKey;
static unsigned int accesscache_hashhash(const void *key_v)
{
const AccessCacheKey *key = (const AccessCacheKey *) key_v;
/* TODP(sergey): Need better hasing here for faster frame access. */
return key->clip_index << 16 | key->frame;
}
static bool accesscache_hashcmp(const void *a_v, const void *b_v)
{
const AccessCacheKey *a = (const AccessCacheKey *) a_v;
const AccessCacheKey *b = (const AccessCacheKey *) b_v;
#define COMPARE_FIELD(field)
{ \
if (a->clip_index != b->clip_index) { \
return false; \
} \
} (void) 0
COMPARE_FIELD(clip_index);
COMPARE_FIELD(frame);
COMPARE_FIELD(downscale);
COMPARE_FIELD(input_mode);
COMPARE_FIELD(transform_key);
#undef COMPARE_FIELD
return true;
}
static void accesscache_put(TrackingImageAccessor *accessor,
int clip_index,
int frame,
libmv_InputMode input_mode,
int downscale,
int64_t transform_key,
ImBuf *ibuf)
{
AccessCacheKey key;
key.clip_index = clip_index;
key.frame = frame;
key.input_mode = input_mode;
key.downscale = downscale;
key.transform_key = transform_key;
IMB_moviecache_put(accessor->cache, &key, ibuf);
}
static ImBuf *accesscache_get(TrackingImageAccessor *accessor,
int clip_index,
int frame,
libmv_InputMode input_mode,
int downscale,
int64_t transform_key)
{
AccessCacheKey key;
key.clip_index = clip_index;
key.frame = frame;
key.input_mode = input_mode;
key.downscale = downscale;
key.transform_key = transform_key;
return IMB_moviecache_get(accessor->cache, &key);
}
static ImBuf *accessor_get_preprocessed_ibuf(TrackingImageAccessor *accessor,
int clip_index,
int frame)
{
MovieClip *clip;
MovieClipUser user;
ImBuf *ibuf;
int scene_frame;
BLI_assert(clip_index < accessor->num_clips);
clip = accessor->clips[clip_index];
scene_frame = BKE_movieclip_remap_clip_to_scene_frame(clip, frame);
BKE_movieclip_user_set_frame(&user, scene_frame);
user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL;
user.render_flag = 0;
ibuf = BKE_movieclip_get_ibuf(clip, &user);
return ibuf;
}
static ImBuf *make_grayscale_ibuf_copy(ImBuf *ibuf)
{
ImBuf *grayscale = IMB_allocImBuf(ibuf->x, ibuf->y, 32, 0);
size_t size;
int i;
BLI_assert(ibuf->channels == 3 || ibuf->channels == 4);
/* TODO(sergey): Bummer, currently IMB API only allows to create 4 channels
* float buffer, so we do it manually here.
*
* Will generalize it later.
*/
size = (size_t)grayscale->x * (size_t)grayscale->y * sizeof(float);
grayscale->channels = 1;
if ((grayscale->rect_float = MEM_mapallocN(size, "tracking grayscale image"))) {
grayscale->mall |= IB_rectfloat;
grayscale->flags |= IB_rectfloat;
}
for (i = 0; i < grayscale->x * grayscale->y; ++i) {
const float *pixel = ibuf->rect_float + ibuf->channels * i;
grayscale->rect_float[i] = 0.2126f * pixel[0] +
0.7152f * pixel[1] +
0.0722f * pixel[2];
}
return grayscale;
}
static void ibuf_to_float_image(const ImBuf *ibuf, libmv_FloatImage *float_image)
{
BLI_assert(ibuf->rect_float != NULL);
float_image->buffer = ibuf->rect_float;
float_image->width = ibuf->x;
float_image->height = ibuf->y;
float_image->channels = ibuf->channels;
}
static ImBuf *float_image_to_ibuf(libmv_FloatImage *float_image)
{
ImBuf *ibuf = IMB_allocImBuf(float_image->width, float_image->height, 32, 0);
size_t size = (size_t)ibuf->x * (size_t)ibuf->y *
float_image->channels * sizeof(float);
ibuf->channels = float_image->channels;
if ((ibuf->rect_float = MEM_mapallocN(size, "tracking grayscale image"))) {
ibuf->mall |= IB_rectfloat;
ibuf->flags |= IB_rectfloat;
}
memcpy(ibuf->rect_float, float_image->buffer, size);
return ibuf;
}
static ImBuf *accessor_get_ibuf(TrackingImageAccessor *accessor,
int clip_index,
int frame,
libmv_InputMode input_mode,
int downscale,
const libmv_Region *region,
const libmv_FrameTransform *transform)
{
ImBuf *ibuf, *orig_ibuf, *final_ibuf;
int64_t transform_key = 0;
if (transform != NULL) {
transform_key = libmv_frameAccessorgetTransformKey(transform);
}
/* First try to get fully processed image from the cache. */
ibuf = accesscache_get(accessor,
clip_index,
frame,
input_mode,
downscale,
transform_key);
if (ibuf != NULL) {
return ibuf;
}
/* And now we do postprocessing of the original frame. */
orig_ibuf = accessor_get_preprocessed_ibuf(accessor, clip_index, frame);
if (orig_ibuf == NULL) {
return NULL;
}
if (region != NULL) {
int width = region->max[0] - region->min[0],
height = region->max[1] - region->min[1];
/* If the requested region goes outside of the actual frame we still
* return the requested region size, but only fill it's partially with
* the data we can.
*/
int clamped_origin_x = max_ii((int)region->min[0], 0),
clamped_origin_y = max_ii((int)region->min[1], 0);
int dst_offset_x = clamped_origin_x - (int)region->min[0],
dst_offset_y = clamped_origin_y - (int)region->min[1];
int clamped_width = width - dst_offset_x,
clamped_height = height - dst_offset_y;
clamped_width = min_ii(clamped_width, orig_ibuf->x - clamped_origin_x);
clamped_height = min_ii(clamped_height, orig_ibuf->y - clamped_origin_y);
final_ibuf = IMB_allocImBuf(width, height, 32, IB_rectfloat);
if (orig_ibuf->rect_float != NULL) {
IMB_rectcpy(final_ibuf, orig_ibuf,
dst_offset_x, dst_offset_y,
clamped_origin_x, clamped_origin_y,
clamped_width, clamped_height);
}
else {
int y;
/* TODO(sergey): We don't do any color space or alpha conversion
* here. Probably Libmv is better to work in the linear space,
* but keep sRGB space here for compatibility for now.
*/
for (y = 0; y < clamped_height; ++y) {
int x;
for (x = 0; x < clamped_width; ++x) {
int src_x = x + clamped_origin_x,
src_y = y + clamped_origin_y;
int dst_x = x + dst_offset_x,
dst_y = y + dst_offset_y;
int dst_index = (dst_y * width + dst_x) * 4,
src_index = (src_y * orig_ibuf->x + src_x) * 4;
rgba_uchar_to_float(final_ibuf->rect_float + dst_index,
(unsigned char *)orig_ibuf->rect +
src_index);
}
}
}
}
else {
/* Libmv only works with float images,
*
* This would likely make it so loads of float buffers are being stored
* in the cache which is nice on the one hand (faster re-use of the
* frames) but on the other hand it bumps the memory usage up.
*/
BLI_lock_thread(LOCK_MOVIECLIP);
IMB_float_from_rect(orig_ibuf);
BLI_unlock_thread(LOCK_MOVIECLIP);
final_ibuf = orig_ibuf;
}
if (downscale > 0) {
if (final_ibuf == orig_ibuf) {
final_ibuf = IMB_dupImBuf(orig_ibuf);
}
IMB_scaleImBuf(final_ibuf,
ibuf->x / (1 << downscale),
ibuf->y / (1 << downscale));
}
if (input_mode == LIBMV_IMAGE_MODE_RGBA) {
BLI_assert(ibuf->channels == 3 || ibuf->channels == 4);
/* pass */
}
else /* if (input_mode == LIBMV_IMAGE_MODE_MONO) */ {
ImBuf *grayscale_ibuf = make_grayscale_ibuf_copy(final_ibuf);
if (final_ibuf != orig_ibuf) {
/* We dereference original frame later. */
IMB_freeImBuf(final_ibuf);
}
final_ibuf = grayscale_ibuf;
}
if (transform != NULL) {
libmv_FloatImage input_image, output_image;
ibuf_to_float_image(final_ibuf, &input_image);
libmv_frameAccessorgetTransformRun(transform,
&input_image,
&output_image);
if (final_ibuf != orig_ibuf) {
IMB_freeImBuf(final_ibuf);
}
final_ibuf = float_image_to_ibuf(&output_image);
libmv_floatImageDestroy(&output_image);
}
/* it's possible processing stil didn't happen at this point,
* but we really need a copy of the buffer to be transformed
* and to be put to the cache.
*/
if (final_ibuf == orig_ibuf) {
final_ibuf = IMB_dupImBuf(orig_ibuf);
}
IMB_freeImBuf(orig_ibuf);
/* We put postprocessed frame to the cache always for now,
* not the smartest thing in the world, but who cares at this point.
*/
/* TODO(sergey): Disable cache for now, because we don't store region
* in the cache key and can't check whether cached version is usable for
* us or not.
*
* Need to think better about what to cache and when.
*/
if (false) {
accesscache_put(accessor,
clip_index,
frame,
input_mode,
downscale,
transform_key,
final_ibuf);
}
return final_ibuf;
}
static libmv_CacheKey accessor_get_image_callback(
struct libmv_FrameAccessorUserData *user_data,
int clip_index,
int frame,
libmv_InputMode input_mode,
int downscale,
const libmv_Region *region,
const libmv_FrameTransform *transform,
float **destination,
int *width,
int *height,
int *channels)
{
TrackingImageAccessor *accessor = (TrackingImageAccessor *) user_data;
ImBuf *ibuf;
BLI_assert(clip_index >= 0 && clip_index < accessor->num_clips);
ibuf = accessor_get_ibuf(accessor,
clip_index,
frame,
input_mode,
downscale,
region,
transform);
if (ibuf) {
*destination = ibuf->rect_float;
*width = ibuf->x;
*height = ibuf->y;
*channels = ibuf->channels;
}
else {
*destination = NULL;
*width = 0;
*height = 0;
*channels = 0;
}
return ibuf;
}
static void accessor_release_image_callback(libmv_CacheKey cache_key)
{
ImBuf *ibuf = (ImBuf *) cache_key;
IMB_freeImBuf(ibuf);
}
TrackingImageAccessor *tracking_image_accessor_new(MovieClip *clips[MAX_ACCESSOR_CLIP],
int num_clips,
int start_frame)
{
TrackingImageAccessor *accessor =
MEM_callocN(sizeof(TrackingImageAccessor), "tracking image accessor");
BLI_assert(num_clips <= MAX_ACCESSOR_CLIP);
accessor->cache = IMB_moviecache_create("frame access cache",
sizeof(AccessCacheKey),
accesscache_hashhash,
accesscache_hashcmp);
memcpy(accessor->clips, clips, num_clips * sizeof(MovieClip*));
accessor->num_clips = num_clips;
accessor->start_frame = start_frame;
accessor->libmv_accessor =
libmv_FrameAccessorNew((libmv_FrameAccessorUserData *) accessor,
accessor_get_image_callback,
accessor_release_image_callback);
return accessor;
}
void tracking_image_accessor_destroy(TrackingImageAccessor *accessor)
{
IMB_moviecache_free(accessor->cache);
libmv_FrameAccessorDestroy(accessor->libmv_accessor);
MEM_freeN(accessor);
}

View File

@ -95,4 +95,32 @@ void tracking_cameraIntrinscisOptionsFromTracking(struct MovieTracking *tracking
void tracking_trackingCameraFromIntrinscisOptions(struct MovieTracking *tracking,
const struct libmv_CameraIntrinsicsOptions *camera_intrinsics_options);
struct libmv_TrackRegionOptions;
void tracking_configure_tracker(const MovieTrackingTrack *track, float *mask,
struct libmv_TrackRegionOptions *options);
struct MovieTrackingMarker *tracking_get_keyframed_marker(
struct MovieTrackingTrack *track,
int current_frame,
bool backwards);
/*********************** Frame accessr *************************/
struct libmv_FrameAccessor;
#define MAX_ACCESSOR_CLIP 64
typedef struct TrackingImageAccessor {
struct MovieCache *cache;
struct MovieClip *clips[MAX_ACCESSOR_CLIP];
int num_clips;
int start_frame;
struct libmv_FrameAccessor *libmv_accessor;
} TrackingImageAccessor;
TrackingImageAccessor *tracking_image_accessor_new(MovieClip *clips[MAX_ACCESSOR_CLIP],
int num_clips,
int start_frame);
void tracking_image_accessor_destroy(TrackingImageAccessor *accessor);
#endif /* __TRACKING_PRIVATE_H__ */

View File

@ -1159,7 +1159,7 @@ static void clip_main_area_draw(const bContext *C, ARegion *ar)
/* if tracking is in progress, we should synchronize framenr from clipuser
* so latest tracked frame would be shown */
if (clip && clip->tracking_context)
BKE_tracking_context_sync_user(clip->tracking_context, &sc->user);
BKE_autotrack_context_sync_user(clip->tracking_context, &sc->user);
if (sc->flag & SC_LOCK_SELECTION) {
ImBuf *tmpibuf = NULL;

View File

@ -1083,7 +1083,7 @@ void CLIP_OT_slide_marker(wmOperatorType *ot)
/********************** track operator *********************/
typedef struct TrackMarkersJob {
struct MovieTrackingContext *context; /* tracking context */
struct AutoTrackContext *context; /* tracking context */
int sfra, efra, lastfra; /* Start, end and recently tracked frames */
int backwards; /* Backwards tracking flag */
MovieClip *clip; /* Clip which is tracking */
@ -1231,7 +1231,7 @@ static int track_markers_initjob(bContext *C, TrackMarkersJob *tmj, int backward
tmj->delay /= 2;
}
tmj->context = BKE_tracking_context_new(clip, &sc->user, backwards, 1);
tmj->context = BKE_autotrack_context_new(clip, &sc->user, backwards, 1);
clip->tracking_context = tmj->context;
@ -1265,14 +1265,14 @@ static void track_markers_startjob(void *tmv, short *stop, short *do_update, flo
double start_time = PIL_check_seconds_timer(), exec_time;
if (!BKE_tracking_context_step(tmj->context))
if (!BKE_autotrack_context_step(tmj->context))
break;
exec_time = PIL_check_seconds_timer() - start_time;
if (tmj->delay > (float)exec_time)
PIL_sleep_ms(tmj->delay - (float)exec_time);
}
else if (!BKE_tracking_context_step(tmj->context))
else if (!BKE_autotrack_context_step(tmj->context))
break;
*do_update = true;
@ -1296,7 +1296,7 @@ static void track_markers_updatejob(void *tmv)
{
TrackMarkersJob *tmj = (TrackMarkersJob *)tmv;
BKE_tracking_context_sync(tmj->context);
BKE_autotrack_context_sync(tmj->context);
}
static void track_markers_endjob(void *tmv)
@ -1310,8 +1310,8 @@ static void track_markers_endjob(void *tmv)
ED_update_for_newframe(tmj->main, tmj->scene, 0);
}
BKE_tracking_context_sync(tmj->context);
BKE_tracking_context_finish(tmj->context);
BKE_autotrack_context_sync(tmj->context);
BKE_autotrack_context_finish(tmj->context);
WM_main_add_notifier(NC_SCENE | ND_FRAME, tmj->scene);
}
@ -1319,7 +1319,7 @@ static void track_markers_endjob(void *tmv)
static void track_markers_freejob(void *tmv)
{
TrackMarkersJob *tmj = (TrackMarkersJob *)tmv;
BKE_tracking_context_free(tmj->context);
BKE_autotrack_context_free(tmj->context);
MEM_freeN(tmj);
}
@ -1328,7 +1328,7 @@ static int track_markers_exec(bContext *C, wmOperator *op)
SpaceClip *sc;
MovieClip *clip;
Scene *scene = CTX_data_scene(C);
struct MovieTrackingContext *context;
struct AutoTrackContext *context;
MovieClipUser *user, fake_user = {0};
int framenr, sfra, efra;
const bool backwards = RNA_boolean_get(op->ptr, "backwards");
@ -1388,10 +1388,10 @@ static int track_markers_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
/* do not disable tracks due to threshold when tracking frame-by-frame */
context = BKE_tracking_context_new(clip, user, backwards, sequence);
context = BKE_autotrack_context_new(clip, user, backwards, sequence);
while (framenr != efra) {
if (!BKE_tracking_context_step(context))
if (!BKE_autotrack_context_step(context))
break;
if (backwards) framenr--;
@ -1401,9 +1401,9 @@ static int track_markers_exec(bContext *C, wmOperator *op)
break;
}
BKE_tracking_context_sync(context);
BKE_tracking_context_finish(context);
BKE_tracking_context_free(context);
BKE_autotrack_context_sync(context);
BKE_autotrack_context_finish(context);
BKE_autotrack_context_free(context);
/* update scene current frame to the lastes tracked frame */
scene->r.cfra = BKE_movieclip_remap_clip_to_scene_frame(clip, framenr);